litellm/requirements.txt
2024-04-23 19:18:19 -07:00

42 lines
No EOL
1.5 KiB
Text

# LITELLM PROXY DEPENDENCIES #
anyio==4.2.0 # openai + http req.
openai==1.14.3 # openai req.
fastapi==0.100.0 # server dep
backoff==2.2.1 # server dep
pyyaml==6.0.0 # server dep
uvicorn==0.29.0 # server dep
gunicorn==21.2.0 # server dep
boto3==1.34.34 # aws bedrock/sagemaker calls
redis==5.0.0 # caching
numpy==1.24.3 # semantic caching
pandas==2.1.1 # for viewing clickhouse spend analytics
prisma==0.11.0 # for db
mangum==0.17.0 # for aws lambda functions
pynacl==1.5.0 # for encrypting keys
google-cloud-aiplatform==1.47.0 # for vertex ai calls
anthropic[vertex]==0.21.3
google-generativeai==0.5.0 # for vertex ai calls
async_generator==1.10.0 # for async ollama calls
langfuse==2.7.3 # for langfuse self-hosted logging
datadog-api-client==2.23.0 # for datadog logging
prometheus_client==0.20.0 # for /metrics endpoint on proxy
orjson==3.9.15 # fast /embedding responses
apscheduler==3.10.4 # for resetting budget in background
fastapi-sso==0.10.0 # admin UI, SSO
pyjwt[crypto]==2.8.0
python-multipart==0.0.9 # admin UI
Pillow==10.3.0
### LITELLM PACKAGE DEPENDENCIES
python-dotenv==1.0.0 # for env
tiktoken==0.6.0 # for calculating usage
importlib-metadata==6.8.0 # for random utils
tokenizers==0.14.0 # for calculating usage
click==8.1.7 # for proxy cli
jinja2==3.1.3 # for prompt templates
certifi==2023.7.22 # [TODO] clean up
aiohttp==3.9.0 # for network calls
aioboto3==12.3.0 # for async sagemaker calls
tenacity==8.2.3 # for retrying requests, when litellm.num_retries set
pydantic==2.7.1 # openai req.
####