From df60e475e8dbfac4ffc0474f88faa48abeac900d Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 23 Apr 2024 19:18:14 -0700 Subject: [PATCH] build(requirements.txt): pin all dependency versions --- litellm/proxy/_new_secret_config.yaml | 3 +++ requirements.txt | 18 +++++++++--------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index 431311811..4f13fa00e 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -1,4 +1,7 @@ model_list: +- model_name: gpt-3.5-turbo + litellm_params: + model: gpt-3.5-turbo - model_name: fake-openai-endpoint litellm_params: model: openai/my-fake-model diff --git a/requirements.txt b/requirements.txt index 4b1660f19..3c5d142c8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,9 @@ # LITELLM PROXY DEPENDENCIES # anyio==4.2.0 # openai + http req. -openai>=1.0.0 # openai req. -fastapi>=0.109.1 # server dep -pydantic>=2.5 # openai req. +openai==1.14.3 # openai req. +fastapi==0.100.0 # server dep backoff==2.2.1 # server dep -pyyaml>=6.0.1 # server dep +pyyaml==6.0.0 # server dep uvicorn==0.29.0 # server dep gunicorn==21.2.0 # server dep boto3==1.34.34 # aws bedrock/sagemaker calls @@ -18,7 +17,7 @@ google-cloud-aiplatform==1.47.0 # for vertex ai calls anthropic[vertex]==0.21.3 google-generativeai==0.5.0 # for vertex ai calls async_generator==1.10.0 # for async ollama calls -langfuse>=2.7.3 # for langfuse self-hosted logging +langfuse==2.7.3 # for langfuse self-hosted logging datadog-api-client==2.23.0 # for datadog logging prometheus_client==0.20.0 # for /metrics endpoint on proxy orjson==3.9.15 # fast /embedding responses @@ -29,14 +28,15 @@ python-multipart==0.0.9 # admin UI Pillow==10.3.0 ### LITELLM PACKAGE DEPENDENCIES -python-dotenv>=0.2.0 # for env -tiktoken>=0.4.0 # for calculating usage -importlib-metadata>=6.8.0 # for random utils +python-dotenv==1.0.0 # for env +tiktoken==0.6.0 # for calculating usage +importlib-metadata==6.8.0 # for random utils tokenizers==0.14.0 # for calculating usage click==8.1.7 # for proxy cli jinja2==3.1.3 # for prompt templates -certifi>=2023.7.22 # [TODO] clean up +certifi==2023.7.22 # [TODO] clean up aiohttp==3.9.0 # for network calls aioboto3==12.3.0 # for async sagemaker calls tenacity==8.2.3 # for retrying requests, when litellm.num_retries set +pydantic==2.7.1 # openai req. #### \ No newline at end of file