Litellm add managed files db (#9930)

* fix(openai.py): ensure openai file object shows up on logs

* fix(managed_files.py): return unified file id as b64 str

allows retrieve file id to work as expected

* fix(managed_files.py): apply decoded file id transformation

* fix: add unit test for file id + decode logic

* fix: initial commit for litellm_proxy support with CRUD Endpoints

* fix(managed_files.py): support retrieve file operation

* fix(managed_files.py): support for DELETE endpoint for files

* fix(managed_files.py): retrieve file content support

supports retrieve file content api from openai

* fix: fix linting error

* test: update tests

* fix: fix linting error

* feat(managed_files.py): support reading / writing files in DB

* feat(managed_files.py): support deleting file from DB on delete

* test: update testing

* fix(spend_tracking_utils.py): ensure each file create request is logged correctly

* fix(managed_files.py): fix storing / returning managed file object from cache

* fix(files/main.py): pass litellm params to azure route

* test: fix test

* build: add new prisma migration

* build: bump requirements

* test: add more testing

* refactor: cleanup post merge w/ main

* fix: fix code qa errors
This commit is contained in:
Krish Dholakia 2025-04-12 08:24:46 -07:00 committed by GitHub
parent 93037ea4d3
commit 421e0a3004
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
19 changed files with 286 additions and 158 deletions

View file

@ -128,37 +128,6 @@ async def _deprecated_loadbalanced_create_file(
return response
async def create_file_for_each_model(
llm_router: Optional[Router],
_create_file_request: CreateFileRequest,
target_model_names_list: List[str],
purpose: OpenAIFilesPurpose,
proxy_logging_obj: ProxyLogging,
user_api_key_dict: UserAPIKeyAuth,
) -> OpenAIFileObject:
if llm_router is None:
raise HTTPException(
status_code=500,
detail={
"error": "LLM Router not initialized. Ensure models added to proxy."
},
)
responses = []
for model in target_model_names_list:
individual_response = await llm_router.acreate_file(
model=model, **_create_file_request
)
responses.append(individual_response)
response = await _PROXY_LiteLLMManagedFiles.return_unified_file_id(
file_objects=responses,
create_file_request=_create_file_request,
purpose=purpose,
internal_usage_cache=proxy_logging_obj.internal_usage_cache,
litellm_parent_otel_span=user_api_key_dict.parent_otel_span,
)
return response
async def route_create_file(
llm_router: Optional[Router],
_create_file_request: CreateFileRequest,
@ -181,13 +150,29 @@ async def route_create_file(
_create_file_request=_create_file_request,
)
elif target_model_names_list:
response = await create_file_for_each_model(
managed_files_obj = cast(
Optional[_PROXY_LiteLLMManagedFiles],
proxy_logging_obj.get_proxy_hook("managed_files"),
)
if managed_files_obj is None:
raise ProxyException(
message="Managed files hook not found",
type="None",
param="None",
code=500,
)
if llm_router is None:
raise ProxyException(
message="LLM Router not found",
type="None",
param="None",
code=500,
)
response = await managed_files_obj.acreate_file(
llm_router=llm_router,
_create_file_request=_create_file_request,
create_file_request=_create_file_request,
target_model_names_list=target_model_names_list,
purpose=purpose,
proxy_logging_obj=proxy_logging_obj,
user_api_key_dict=user_api_key_dict,
litellm_parent_otel_span=user_api_key_dict.parent_otel_span,
)
else:
# get configs for custom_llm_provider