mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
* build(README.md): initial commit adding a separate folder for additional proxy files. Meant to reduce size of core package * build(litellm-proxy-extras/): new pip package for storing migration files allows litellm proxy to use migration files, without adding them to core repo * build(litellm-proxy-extras/): cleanup pyproject.toml * build: move prisma migration files inside new proxy extras package * build(run_migration.py): update script to write to correct folder * build(proxy_cli.py): load in migration files from litellm-proxy-extras Closes https://github.com/BerriAI/litellm/issues/9558 * build: add MIT license to litellm-proxy-extras * test: update test * fix: fix schema * bump: version 0.1.0 → 0.1.1 * build(publish-proxy-extras.sh): add script for publishing new proxy-extras version * build(liccheck.ini): add litellm-proxy-extras to authorized packages * fix(litellm-proxy-extras/utils.py): move prisma migrate logic inside extra proxy pkg easier since migrations folder already there * build(pre-commit-config.yaml): add litellm_proxy_extras to ci tests * docs(config_settings.md): document new env var * build(pyproject.toml): bump relevant files when litellm-proxy-extras version changed * build(pre-commit-config.yaml): run poetry check on litellm-proxy-extras as well
56 lines
No EOL
2.2 KiB
Text
56 lines
No EOL
2.2 KiB
Text
# LITELLM PROXY DEPENDENCIES #
|
|
anyio==4.5.0 # openai + http req.
|
|
httpx==0.27.0 # Pin Httpx dependency
|
|
openai==1.68.2 # openai req.
|
|
fastapi==0.115.5 # server dep
|
|
backoff==2.2.1 # server dep
|
|
pyyaml==6.0.2 # server dep
|
|
uvicorn==0.29.0 # server dep
|
|
gunicorn==23.0.0 # server dep
|
|
uvloop==0.21.0 # uvicorn dep, gives us much better performance under load
|
|
boto3==1.34.34 # aws bedrock/sagemaker calls
|
|
redis==5.2.1 # redis caching
|
|
redisvl==0.4.1 # semantic caching
|
|
prisma==0.11.0 # for db
|
|
mangum==0.17.0 # for aws lambda functions
|
|
pynacl==1.5.0 # for encrypting keys
|
|
google-cloud-aiplatform==1.47.0 # for vertex ai calls
|
|
anthropic[vertex]==0.21.3
|
|
mcp==1.5.0 # for MCP server
|
|
google-generativeai==0.5.0 # for vertex ai calls
|
|
async_generator==1.10.0 # for async ollama calls
|
|
langfuse==2.45.0 # for langfuse self-hosted logging
|
|
prometheus_client==0.20.0 # for /metrics endpoint on proxy
|
|
ddtrace==2.19.0 # for advanced DD tracing / profiling
|
|
orjson==3.10.12 # fast /embedding responses
|
|
apscheduler==3.10.4 # for resetting budget in background
|
|
fastapi-sso==0.16.0 # admin UI, SSO
|
|
pyjwt[crypto]==2.9.0
|
|
python-multipart==0.0.18 # admin UI
|
|
Pillow==11.0.0
|
|
azure-ai-contentsafety==1.0.0 # for azure content safety
|
|
azure-identity==1.16.1 # for azure content safety
|
|
azure-storage-file-datalake==12.15.0 # for azure buck storage logging
|
|
opentelemetry-api==1.25.0
|
|
opentelemetry-sdk==1.25.0
|
|
opentelemetry-exporter-otlp==1.25.0
|
|
sentry_sdk==2.21.0 # for sentry error handling
|
|
detect-secrets==1.5.0 # Enterprise - secret detection / masking in LLM requests
|
|
cryptography==43.0.1
|
|
tzdata==2025.1 # IANA time zone database
|
|
litellm-proxy-extras==0.1.1 # for proxy extras - e.g. prisma migrations
|
|
|
|
### LITELLM PACKAGE DEPENDENCIES
|
|
python-dotenv==1.0.0 # for env
|
|
tiktoken==0.8.0 # for calculating usage
|
|
importlib-metadata==6.8.0 # for random utils
|
|
tokenizers==0.20.2 # for calculating usage
|
|
click==8.1.7 # for proxy cli
|
|
jinja2==3.1.6 # for prompt templates
|
|
aiohttp==3.10.2 # for network calls
|
|
aioboto3==12.3.0 # for async sagemaker calls
|
|
tenacity==8.2.3 # for retrying requests, when litellm.num_retries set
|
|
pydantic==2.10.2 # proxy + openai req.
|
|
jsonschema==4.22.0 # validating json schema
|
|
websockets==13.1.0 # for realtime API
|
|
#### |