forked from phoenix/litellm-mirror
Merge pull request #2721 from BerriAI/litellm_remove_background_tasks
(fix) Proxy - remove background tasks
This commit is contained in:
commit
e81eb1bd11
3 changed files with 9 additions and 13 deletions
|
@ -28,8 +28,9 @@ jobs:
|
||||||
pip install "pytest==7.3.1"
|
pip install "pytest==7.3.1"
|
||||||
pip install "pytest-asyncio==0.21.1"
|
pip install "pytest-asyncio==0.21.1"
|
||||||
pip install mypy
|
pip install mypy
|
||||||
pip install "google-generativeai>=0.3.2"
|
pip install "google-generativeai==0.3.2"
|
||||||
pip install "google-cloud-aiplatform>=1.38.0"
|
pip install "google-cloud-aiplatform==1.43.0"
|
||||||
|
pip install pyarrow
|
||||||
pip install "boto3>=1.28.57"
|
pip install "boto3>=1.28.57"
|
||||||
pip install "aioboto3>=12.3.0"
|
pip install "aioboto3>=12.3.0"
|
||||||
pip install langchain
|
pip install langchain
|
||||||
|
@ -152,8 +153,9 @@ jobs:
|
||||||
pip install "pytest-mock==3.12.0"
|
pip install "pytest-mock==3.12.0"
|
||||||
pip install "pytest-asyncio==0.21.1"
|
pip install "pytest-asyncio==0.21.1"
|
||||||
pip install mypy
|
pip install mypy
|
||||||
pip install "google-generativeai>=0.3.2"
|
pip install "google-generativeai==0.3.2"
|
||||||
pip install "google-cloud-aiplatform>=1.38.0"
|
pip install "google-cloud-aiplatform==1.43.0"
|
||||||
|
pip install pyarrow
|
||||||
pip install "boto3>=1.28.57"
|
pip install "boto3>=1.28.57"
|
||||||
pip install "aioboto3>=12.3.0"
|
pip install "aioboto3>=12.3.0"
|
||||||
pip install langchain
|
pip install langchain
|
||||||
|
|
|
@ -10,5 +10,5 @@ anthropic
|
||||||
boto3
|
boto3
|
||||||
orjson
|
orjson
|
||||||
pydantic
|
pydantic
|
||||||
google-cloud-aiplatform
|
google-cloud-aiplatform==1.43.0
|
||||||
redisvl==0.0.7 # semantic caching
|
redisvl==0.0.7 # semantic caching
|
|
@ -130,7 +130,6 @@ from fastapi import (
|
||||||
HTTPException,
|
HTTPException,
|
||||||
status,
|
status,
|
||||||
Depends,
|
Depends,
|
||||||
BackgroundTasks,
|
|
||||||
Header,
|
Header,
|
||||||
Response,
|
Response,
|
||||||
Form,
|
Form,
|
||||||
|
@ -2896,7 +2895,6 @@ async def completion(
|
||||||
fastapi_response: Response,
|
fastapi_response: Response,
|
||||||
model: Optional[str] = None,
|
model: Optional[str] = None,
|
||||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||||
background_tasks: BackgroundTasks = BackgroundTasks(),
|
|
||||||
):
|
):
|
||||||
global user_temperature, user_request_timeout, user_max_tokens, user_api_base
|
global user_temperature, user_request_timeout, user_max_tokens, user_api_base
|
||||||
try:
|
try:
|
||||||
|
@ -3062,7 +3060,6 @@ async def chat_completion(
|
||||||
fastapi_response: Response,
|
fastapi_response: Response,
|
||||||
model: Optional[str] = None,
|
model: Optional[str] = None,
|
||||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||||
background_tasks: BackgroundTasks = BackgroundTasks(),
|
|
||||||
):
|
):
|
||||||
global general_settings, user_debug, proxy_logging_obj, llm_model_list
|
global general_settings, user_debug, proxy_logging_obj, llm_model_list
|
||||||
try:
|
try:
|
||||||
|
@ -3299,7 +3296,6 @@ async def embeddings(
|
||||||
request: Request,
|
request: Request,
|
||||||
model: Optional[str] = None,
|
model: Optional[str] = None,
|
||||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||||
background_tasks: BackgroundTasks = BackgroundTasks(),
|
|
||||||
):
|
):
|
||||||
global proxy_logging_obj
|
global proxy_logging_obj
|
||||||
try:
|
try:
|
||||||
|
@ -3475,7 +3471,6 @@ async def embeddings(
|
||||||
async def image_generation(
|
async def image_generation(
|
||||||
request: Request,
|
request: Request,
|
||||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||||
background_tasks: BackgroundTasks = BackgroundTasks(),
|
|
||||||
):
|
):
|
||||||
global proxy_logging_obj
|
global proxy_logging_obj
|
||||||
try:
|
try:
|
||||||
|
@ -6158,7 +6153,7 @@ async def block_team(
|
||||||
raise Exception("No DB Connected.")
|
raise Exception("No DB Connected.")
|
||||||
|
|
||||||
record = await prisma_client.db.litellm_teamtable.update(
|
record = await prisma_client.db.litellm_teamtable.update(
|
||||||
where={"team_id": data.team_id}, data={"blocked": True}
|
where={"team_id": data.team_id}, data={"blocked": True} # type: ignore
|
||||||
)
|
)
|
||||||
|
|
||||||
return record
|
return record
|
||||||
|
@ -6180,7 +6175,7 @@ async def unblock_team(
|
||||||
raise Exception("No DB Connected.")
|
raise Exception("No DB Connected.")
|
||||||
|
|
||||||
record = await prisma_client.db.litellm_teamtable.update(
|
record = await prisma_client.db.litellm_teamtable.update(
|
||||||
where={"team_id": data.team_id}, data={"blocked": False}
|
where={"team_id": data.team_id}, data={"blocked": False} # type: ignore
|
||||||
)
|
)
|
||||||
|
|
||||||
return record
|
return record
|
||||||
|
@ -6783,7 +6778,6 @@ async def async_queue_request(
|
||||||
request: Request,
|
request: Request,
|
||||||
model: Optional[str] = None,
|
model: Optional[str] = None,
|
||||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||||
background_tasks: BackgroundTasks = BackgroundTasks(),
|
|
||||||
):
|
):
|
||||||
global general_settings, user_debug, proxy_logging_obj
|
global general_settings, user_debug, proxy_logging_obj
|
||||||
"""
|
"""
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue