###################################################################### # /v1/files Endpoints # Equivalent of https://platform.openai.com/docs/api-reference/files ###################################################################### import asyncio import traceback from datetime import datetime, timedelta, timezone from typing import List, Optional import fastapi import httpx from fastapi import ( APIRouter, Depends, File, Form, Header, HTTPException, Request, Response, UploadFile, status, ) import litellm from litellm import CreateFileRequest, FileContentRequest from litellm._logging import verbose_proxy_logger from litellm.batches.main import FileObject from litellm.proxy._types import * from litellm.proxy.auth.user_api_key_auth import user_api_key_auth router = APIRouter() @router.post( "/v1/files", dependencies=[Depends(user_api_key_auth)], tags=["files"], ) @router.post( "/files", dependencies=[Depends(user_api_key_auth)], tags=["files"], ) async def create_file( request: Request, fastapi_response: Response, purpose: str = Form(...), file: UploadFile = File(...), user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), ): """ Upload a file that can be used across - Assistants API, Batch API This is the equivalent of POST https://api.openai.com/v1/files Supports Identical Params as: https://platform.openai.com/docs/api-reference/files/create Example Curl ``` curl http://localhost:4000/v1/files \ -H "Authorization: Bearer sk-1234" \ -F purpose="batch" \ -F file="@mydata.jsonl" ``` """ from litellm.proxy.proxy_server import ( add_litellm_data_to_request, general_settings, get_custom_headers, proxy_config, proxy_logging_obj, version, ) data: Dict = {} try: # Use orjson to parse JSON data, orjson speeds up requests significantly # Read the file content file_content = await file.read() # Prepare the data for forwarding data = {"purpose": purpose} # Include original request and headers in the data data = await add_litellm_data_to_request( data=data, request=request, general_settings=general_settings, user_api_key_dict=user_api_key_dict, version=version, proxy_config=proxy_config, ) # Prepare the file data according to FileTypes file_data = (file.filename, file_content, file.content_type) _create_file_request = CreateFileRequest(file=file_data, **data) # for now use custom_llm_provider=="openai" -> this will change as LiteLLM adds more providers for acreate_batch response = await litellm.acreate_file( custom_llm_provider="openai", **_create_file_request ) ### ALERTING ### asyncio.create_task( proxy_logging_obj.update_request_status( litellm_call_id=data.get("litellm_call_id", ""), status="success" ) ) ### RESPONSE HEADERS ### hidden_params = getattr(response, "_hidden_params", {}) or {} model_id = hidden_params.get("model_id", None) or "" cache_key = hidden_params.get("cache_key", None) or "" api_base = hidden_params.get("api_base", None) or "" fastapi_response.headers.update( get_custom_headers( user_api_key_dict=user_api_key_dict, model_id=model_id, cache_key=cache_key, api_base=api_base, version=version, model_region=getattr(user_api_key_dict, "allowed_model_region", ""), ) ) return response except Exception as e: await proxy_logging_obj.post_call_failure_hook( user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data ) verbose_proxy_logger.error( "litellm.proxy.proxy_server.create_file(): Exception occured - {}".format( str(e) ) ) verbose_proxy_logger.debug(traceback.format_exc()) if isinstance(e, HTTPException): raise ProxyException( message=getattr(e, "message", str(e.detail)), type=getattr(e, "type", "None"), param=getattr(e, "param", "None"), code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST), ) else: error_msg = f"{str(e)}" raise ProxyException( message=getattr(e, "message", error_msg), type=getattr(e, "type", "None"), param=getattr(e, "param", "None"), code=getattr(e, "status_code", 500), ) @router.get( "/v1/files/{file_id:path}", dependencies=[Depends(user_api_key_auth)], tags=["files"], ) @router.get( "/files/{file_id:path}", dependencies=[Depends(user_api_key_auth)], tags=["files"], ) async def get_file( request: Request, fastapi_response: Response, file_id: str, user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), ): """ Returns information about a specific file. that can be used across - Assistants API, Batch API This is the equivalent of GET https://api.openai.com/v1/files/{file_id} Supports Identical Params as: https://platform.openai.com/docs/api-reference/files/retrieve Example Curl ``` curl http://localhost:4000/v1/files/file-abc123 \ -H "Authorization: Bearer sk-1234" ``` """ from litellm.proxy.proxy_server import ( add_litellm_data_to_request, general_settings, get_custom_headers, proxy_config, proxy_logging_obj, version, ) data: Dict = {} try: # Include original request and headers in the data data = await add_litellm_data_to_request( data=data, request=request, general_settings=general_settings, user_api_key_dict=user_api_key_dict, version=version, proxy_config=proxy_config, ) # for now use custom_llm_provider=="openai" -> this will change as LiteLLM adds more providers for acreate_batch response = await litellm.afile_retrieve( custom_llm_provider="openai", file_id=file_id, **data ) ### ALERTING ### asyncio.create_task( proxy_logging_obj.update_request_status( litellm_call_id=data.get("litellm_call_id", ""), status="success" ) ) ### RESPONSE HEADERS ### hidden_params = getattr(response, "_hidden_params", {}) or {} model_id = hidden_params.get("model_id", None) or "" cache_key = hidden_params.get("cache_key", None) or "" api_base = hidden_params.get("api_base", None) or "" fastapi_response.headers.update( get_custom_headers( user_api_key_dict=user_api_key_dict, model_id=model_id, cache_key=cache_key, api_base=api_base, version=version, model_region=getattr(user_api_key_dict, "allowed_model_region", ""), ) ) return response except Exception as e: await proxy_logging_obj.post_call_failure_hook( user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data ) verbose_proxy_logger.error( "litellm.proxy.proxy_server.retrieve_file(): Exception occured - {}".format( str(e) ) ) verbose_proxy_logger.debug(traceback.format_exc()) if isinstance(e, HTTPException): raise ProxyException( message=getattr(e, "message", str(e.detail)), type=getattr(e, "type", "None"), param=getattr(e, "param", "None"), code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST), ) else: error_msg = f"{str(e)}" raise ProxyException( message=getattr(e, "message", error_msg), type=getattr(e, "type", "None"), param=getattr(e, "param", "None"), code=getattr(e, "status_code", 500), ) @router.get( "/v1/files", dependencies=[Depends(user_api_key_auth)], tags=["files"], ) @router.get( "/files", dependencies=[Depends(user_api_key_auth)], tags=["files"], ) async def list_files( request: Request, fastapi_response: Response, user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), purpose: Optional[str] = None, ): """ Returns information about a specific file. that can be used across - Assistants API, Batch API This is the equivalent of GET https://api.openai.com/v1/files/ Supports Identical Params as: https://platform.openai.com/docs/api-reference/files/list Example Curl ``` curl http://localhost:4000/v1/files\ -H "Authorization: Bearer sk-1234" ``` """ from litellm.proxy.proxy_server import ( add_litellm_data_to_request, general_settings, get_custom_headers, proxy_config, proxy_logging_obj, version, ) data: Dict = {} try: # Include original request and headers in the data data = await add_litellm_data_to_request( data=data, request=request, general_settings=general_settings, user_api_key_dict=user_api_key_dict, version=version, proxy_config=proxy_config, ) # for now use custom_llm_provider=="openai" -> this will change as LiteLLM adds more providers for acreate_batch response = await litellm.afile_list( custom_llm_provider="openai", purpose=purpose, **data ) ### ALERTING ### asyncio.create_task( proxy_logging_obj.update_request_status( litellm_call_id=data.get("litellm_call_id", ""), status="success" ) ) ### RESPONSE HEADERS ### hidden_params = getattr(response, "_hidden_params", {}) or {} model_id = hidden_params.get("model_id", None) or "" cache_key = hidden_params.get("cache_key", None) or "" api_base = hidden_params.get("api_base", None) or "" fastapi_response.headers.update( get_custom_headers( user_api_key_dict=user_api_key_dict, model_id=model_id, cache_key=cache_key, api_base=api_base, version=version, model_region=getattr(user_api_key_dict, "allowed_model_region", ""), ) ) return response except Exception as e: await proxy_logging_obj.post_call_failure_hook( user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data ) verbose_proxy_logger.error( "litellm.proxy.proxy_server.list_files(): Exception occured - {}".format( str(e) ) ) verbose_proxy_logger.debug(traceback.format_exc()) if isinstance(e, HTTPException): raise ProxyException( message=getattr(e, "message", str(e.detail)), type=getattr(e, "type", "None"), param=getattr(e, "param", "None"), code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST), ) else: error_msg = f"{str(e)}" raise ProxyException( message=getattr(e, "message", error_msg), type=getattr(e, "type", "None"), param=getattr(e, "param", "None"), code=getattr(e, "status_code", 500), )