forked from phoenix/litellm-mirror
build(requirements.txt): pin all dependency versions
This commit is contained in:
parent
8d2e411df6
commit
df60e475e8
2 changed files with 12 additions and 9 deletions
|
@ -1,4 +1,7 @@
|
||||||
model_list:
|
model_list:
|
||||||
|
- model_name: gpt-3.5-turbo
|
||||||
|
litellm_params:
|
||||||
|
model: gpt-3.5-turbo
|
||||||
- model_name: fake-openai-endpoint
|
- model_name: fake-openai-endpoint
|
||||||
litellm_params:
|
litellm_params:
|
||||||
model: openai/my-fake-model
|
model: openai/my-fake-model
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
# LITELLM PROXY DEPENDENCIES #
|
# LITELLM PROXY DEPENDENCIES #
|
||||||
anyio==4.2.0 # openai + http req.
|
anyio==4.2.0 # openai + http req.
|
||||||
openai>=1.0.0 # openai req.
|
openai==1.14.3 # openai req.
|
||||||
fastapi>=0.109.1 # server dep
|
fastapi==0.100.0 # server dep
|
||||||
pydantic>=2.5 # openai req.
|
|
||||||
backoff==2.2.1 # server dep
|
backoff==2.2.1 # server dep
|
||||||
pyyaml>=6.0.1 # server dep
|
pyyaml==6.0.0 # server dep
|
||||||
uvicorn==0.29.0 # server dep
|
uvicorn==0.29.0 # server dep
|
||||||
gunicorn==21.2.0 # server dep
|
gunicorn==21.2.0 # server dep
|
||||||
boto3==1.34.34 # aws bedrock/sagemaker calls
|
boto3==1.34.34 # aws bedrock/sagemaker calls
|
||||||
|
@ -18,7 +17,7 @@ google-cloud-aiplatform==1.47.0 # for vertex ai calls
|
||||||
anthropic[vertex]==0.21.3
|
anthropic[vertex]==0.21.3
|
||||||
google-generativeai==0.5.0 # for vertex ai calls
|
google-generativeai==0.5.0 # for vertex ai calls
|
||||||
async_generator==1.10.0 # for async ollama calls
|
async_generator==1.10.0 # for async ollama calls
|
||||||
langfuse>=2.7.3 # for langfuse self-hosted logging
|
langfuse==2.7.3 # for langfuse self-hosted logging
|
||||||
datadog-api-client==2.23.0 # for datadog logging
|
datadog-api-client==2.23.0 # for datadog logging
|
||||||
prometheus_client==0.20.0 # for /metrics endpoint on proxy
|
prometheus_client==0.20.0 # for /metrics endpoint on proxy
|
||||||
orjson==3.9.15 # fast /embedding responses
|
orjson==3.9.15 # fast /embedding responses
|
||||||
|
@ -29,14 +28,15 @@ python-multipart==0.0.9 # admin UI
|
||||||
Pillow==10.3.0
|
Pillow==10.3.0
|
||||||
|
|
||||||
### LITELLM PACKAGE DEPENDENCIES
|
### LITELLM PACKAGE DEPENDENCIES
|
||||||
python-dotenv>=0.2.0 # for env
|
python-dotenv==1.0.0 # for env
|
||||||
tiktoken>=0.4.0 # for calculating usage
|
tiktoken==0.6.0 # for calculating usage
|
||||||
importlib-metadata>=6.8.0 # for random utils
|
importlib-metadata==6.8.0 # for random utils
|
||||||
tokenizers==0.14.0 # for calculating usage
|
tokenizers==0.14.0 # for calculating usage
|
||||||
click==8.1.7 # for proxy cli
|
click==8.1.7 # for proxy cli
|
||||||
jinja2==3.1.3 # for prompt templates
|
jinja2==3.1.3 # for prompt templates
|
||||||
certifi>=2023.7.22 # [TODO] clean up
|
certifi==2023.7.22 # [TODO] clean up
|
||||||
aiohttp==3.9.0 # for network calls
|
aiohttp==3.9.0 # for network calls
|
||||||
aioboto3==12.3.0 # for async sagemaker calls
|
aioboto3==12.3.0 # for async sagemaker calls
|
||||||
tenacity==8.2.3 # for retrying requests, when litellm.num_retries set
|
tenacity==8.2.3 # for retrying requests, when litellm.num_retries set
|
||||||
|
pydantic==2.7.1 # openai req.
|
||||||
####
|
####
|
Loading…
Add table
Add a link
Reference in a new issue