mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
fix: fix linting error
This commit is contained in:
parent
b56122b164
commit
b59e54d835
4 changed files with 59 additions and 36 deletions
|
@ -159,7 +159,7 @@ class DBSpendUpdateWriter:
|
||||||
)
|
)
|
||||||
|
|
||||||
verbose_proxy_logger.debug("Runs spend update on all tables")
|
verbose_proxy_logger.debug("Runs spend update on all tables")
|
||||||
except Exception as e:
|
except Exception:
|
||||||
verbose_proxy_logger.debug(
|
verbose_proxy_logger.debug(
|
||||||
f"Error updating Prisma database: {traceback.format_exc()}"
|
f"Error updating Prisma database: {traceback.format_exc()}"
|
||||||
)
|
)
|
||||||
|
|
|
@ -5,7 +5,6 @@ import asyncio
|
||||||
import base64
|
import base64
|
||||||
import uuid
|
import uuid
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from datetime import datetime
|
|
||||||
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Union, cast
|
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Union, cast
|
||||||
|
|
||||||
from litellm import Router, verbose_logger
|
from litellm import Router, verbose_logger
|
||||||
|
@ -301,7 +300,7 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger):
|
||||||
purpose=cast(OpenAIFilesPurpose, purpose),
|
purpose=cast(OpenAIFilesPurpose, purpose),
|
||||||
created_at=file_objects[0].created_at,
|
created_at=file_objects[0].created_at,
|
||||||
bytes=file_objects[0].bytes,
|
bytes=file_objects[0].bytes,
|
||||||
filename=str(datetime.now().timestamp()),
|
filename=file_objects[0].filename,
|
||||||
status="uploaded",
|
status="uploaded",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -159,6 +159,51 @@ async def create_file_for_each_model(
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
async def route_create_file(
|
||||||
|
llm_router: Optional[Router],
|
||||||
|
_create_file_request: CreateFileRequest,
|
||||||
|
purpose: OpenAIFilesPurpose,
|
||||||
|
proxy_logging_obj: ProxyLogging,
|
||||||
|
user_api_key_dict: UserAPIKeyAuth,
|
||||||
|
target_model_names_list: List[str],
|
||||||
|
is_router_model: bool,
|
||||||
|
router_model: Optional[str],
|
||||||
|
custom_llm_provider: str,
|
||||||
|
) -> OpenAIFileObject:
|
||||||
|
if (
|
||||||
|
litellm.enable_loadbalancing_on_batch_endpoints is True
|
||||||
|
and is_router_model
|
||||||
|
and router_model is not None
|
||||||
|
):
|
||||||
|
response = await _deprecated_loadbalanced_create_file(
|
||||||
|
llm_router=llm_router,
|
||||||
|
router_model=router_model,
|
||||||
|
_create_file_request=_create_file_request,
|
||||||
|
)
|
||||||
|
elif target_model_names_list:
|
||||||
|
response = await create_file_for_each_model(
|
||||||
|
llm_router=llm_router,
|
||||||
|
_create_file_request=_create_file_request,
|
||||||
|
target_model_names_list=target_model_names_list,
|
||||||
|
purpose=purpose,
|
||||||
|
proxy_logging_obj=proxy_logging_obj,
|
||||||
|
user_api_key_dict=user_api_key_dict,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# get configs for custom_llm_provider
|
||||||
|
llm_provider_config = get_files_provider_config(
|
||||||
|
custom_llm_provider=custom_llm_provider
|
||||||
|
)
|
||||||
|
if llm_provider_config is not None:
|
||||||
|
# add llm_provider_config to data
|
||||||
|
_create_file_request.update(llm_provider_config)
|
||||||
|
_create_file_request.pop("custom_llm_provider", None) # type: ignore
|
||||||
|
# for now use custom_llm_provider=="openai" -> this will change as LiteLLM adds more providers for acreate_batch
|
||||||
|
response = await litellm.acreate_file(**_create_file_request, custom_llm_provider=custom_llm_provider) # type: ignore
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
@router.post(
|
||||||
"/{provider}/v1/files",
|
"/{provider}/v1/files",
|
||||||
dependencies=[Depends(user_api_key_auth)],
|
dependencies=[Depends(user_api_key_auth)],
|
||||||
|
@ -267,37 +312,17 @@ async def create_file(
|
||||||
file=file_data, purpose=cast(CREATE_FILE_REQUESTS_PURPOSE, purpose), **data
|
file=file_data, purpose=cast(CREATE_FILE_REQUESTS_PURPOSE, purpose), **data
|
||||||
)
|
)
|
||||||
|
|
||||||
response: Optional[OpenAIFileObject] = None
|
response = await route_create_file(
|
||||||
if (
|
|
||||||
litellm.enable_loadbalancing_on_batch_endpoints is True
|
|
||||||
and is_router_model
|
|
||||||
and router_model is not None
|
|
||||||
):
|
|
||||||
response = await _deprecated_loadbalanced_create_file(
|
|
||||||
llm_router=llm_router,
|
|
||||||
router_model=router_model,
|
|
||||||
_create_file_request=_create_file_request,
|
|
||||||
)
|
|
||||||
elif target_model_names_list:
|
|
||||||
response = await create_file_for_each_model(
|
|
||||||
llm_router=llm_router,
|
llm_router=llm_router,
|
||||||
_create_file_request=_create_file_request,
|
_create_file_request=_create_file_request,
|
||||||
target_model_names_list=target_model_names_list,
|
|
||||||
purpose=purpose,
|
purpose=purpose,
|
||||||
proxy_logging_obj=proxy_logging_obj,
|
proxy_logging_obj=proxy_logging_obj,
|
||||||
user_api_key_dict=user_api_key_dict,
|
user_api_key_dict=user_api_key_dict,
|
||||||
|
target_model_names_list=target_model_names_list,
|
||||||
|
is_router_model=is_router_model,
|
||||||
|
router_model=router_model,
|
||||||
|
custom_llm_provider=custom_llm_provider,
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
# get configs for custom_llm_provider
|
|
||||||
llm_provider_config = get_files_provider_config(
|
|
||||||
custom_llm_provider=custom_llm_provider
|
|
||||||
)
|
|
||||||
if llm_provider_config is not None:
|
|
||||||
# add llm_provider_config to data
|
|
||||||
_create_file_request.update(llm_provider_config)
|
|
||||||
_create_file_request.pop("custom_llm_provider", None) # type: ignore
|
|
||||||
# for now use custom_llm_provider=="openai" -> this will change as LiteLLM adds more providers for acreate_batch
|
|
||||||
response = await litellm.acreate_file(**_create_file_request, custom_llm_provider=custom_llm_provider) # type: ignore
|
|
||||||
|
|
||||||
if response is None:
|
if response is None:
|
||||||
raise HTTPException(
|
raise HTTPException(
|
||||||
|
|
|
@ -278,6 +278,7 @@ class ProxyLogging:
|
||||||
self.premium_user = premium_user
|
self.premium_user = premium_user
|
||||||
self.service_logging_obj = ServiceLogging()
|
self.service_logging_obj = ServiceLogging()
|
||||||
self.db_spend_update_writer = DBSpendUpdateWriter()
|
self.db_spend_update_writer = DBSpendUpdateWriter()
|
||||||
|
self.proxy_hook_mapping: Dict[str, CustomLogger] = {}
|
||||||
|
|
||||||
def startup_event(
|
def startup_event(
|
||||||
self,
|
self,
|
||||||
|
@ -353,8 +354,6 @@ class ProxyLogging:
|
||||||
self.db_spend_update_writer.redis_update_buffer.redis_cache = redis_cache
|
self.db_spend_update_writer.redis_update_buffer.redis_cache = redis_cache
|
||||||
self.db_spend_update_writer.pod_lock_manager.redis_cache = redis_cache
|
self.db_spend_update_writer.pod_lock_manager.redis_cache = redis_cache
|
||||||
|
|
||||||
self.proxy_hook_mapping: Dict[str, CustomLogger] = {}
|
|
||||||
|
|
||||||
def _add_proxy_hooks(self, llm_router: Optional[Router] = None):
|
def _add_proxy_hooks(self, llm_router: Optional[Router] = None):
|
||||||
"""
|
"""
|
||||||
Add proxy hooks to litellm.callbacks
|
Add proxy hooks to litellm.callbacks
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue