mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
(fix) LiteLLM Proxy fix GET /files/{file_id:path}/content"
endpoint (#7342)
* fix order of get_file_content * update e2 files tests * add e2 batches endpoint testing * update config.yml * write content to file * use correct oai_misc_config * fixes for openai batches endpoint testing * remove extra out file * fix input.jsonl
This commit is contained in:
parent
c78893723f
commit
b90b98b88f
10 changed files with 414 additions and 135 deletions
|
@ -265,6 +265,131 @@ async def create_file(
|
|||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{provider}/v1/files/{file_id:path}/content",
|
||||
dependencies=[Depends(user_api_key_auth)],
|
||||
tags=["files"],
|
||||
)
|
||||
@router.get(
|
||||
"/v1/files/{file_id:path}/content",
|
||||
dependencies=[Depends(user_api_key_auth)],
|
||||
tags=["files"],
|
||||
)
|
||||
@router.get(
|
||||
"/files/{file_id:path}/content",
|
||||
dependencies=[Depends(user_api_key_auth)],
|
||||
tags=["files"],
|
||||
)
|
||||
async def get_file_content(
|
||||
request: Request,
|
||||
fastapi_response: Response,
|
||||
file_id: str,
|
||||
provider: Optional[str] = None,
|
||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||
):
|
||||
"""
|
||||
Returns information about a specific file. that can be used across - Assistants API, Batch API
|
||||
This is the equivalent of GET https://api.openai.com/v1/files/{file_id}/content
|
||||
|
||||
Supports Identical Params as: https://platform.openai.com/docs/api-reference/files/retrieve-contents
|
||||
|
||||
Example Curl
|
||||
```
|
||||
curl http://localhost:4000/v1/files/file-abc123/content \
|
||||
-H "Authorization: Bearer sk-1234"
|
||||
|
||||
```
|
||||
"""
|
||||
from litellm.proxy.proxy_server import (
|
||||
add_litellm_data_to_request,
|
||||
general_settings,
|
||||
get_custom_headers,
|
||||
proxy_config,
|
||||
proxy_logging_obj,
|
||||
version,
|
||||
)
|
||||
|
||||
data: Dict = {}
|
||||
try:
|
||||
|
||||
# Include original request and headers in the data
|
||||
data = await add_litellm_data_to_request(
|
||||
data=data,
|
||||
request=request,
|
||||
general_settings=general_settings,
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
version=version,
|
||||
proxy_config=proxy_config,
|
||||
)
|
||||
|
||||
if provider is None:
|
||||
provider = "openai"
|
||||
response = await litellm.afile_content(
|
||||
custom_llm_provider=provider, file_id=file_id, **data # type: ignore
|
||||
)
|
||||
|
||||
### ALERTING ###
|
||||
asyncio.create_task(
|
||||
proxy_logging_obj.update_request_status(
|
||||
litellm_call_id=data.get("litellm_call_id", ""), status="success"
|
||||
)
|
||||
)
|
||||
|
||||
### RESPONSE HEADERS ###
|
||||
hidden_params = getattr(response, "_hidden_params", {}) or {}
|
||||
model_id = hidden_params.get("model_id", None) or ""
|
||||
cache_key = hidden_params.get("cache_key", None) or ""
|
||||
api_base = hidden_params.get("api_base", None) or ""
|
||||
|
||||
fastapi_response.headers.update(
|
||||
get_custom_headers(
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
model_id=model_id,
|
||||
cache_key=cache_key,
|
||||
api_base=api_base,
|
||||
version=version,
|
||||
model_region=getattr(user_api_key_dict, "allowed_model_region", ""),
|
||||
)
|
||||
)
|
||||
httpx_response: Optional[httpx.Response] = getattr(response, "response", None)
|
||||
if httpx_response is None:
|
||||
raise ValueError(
|
||||
f"Invalid response - response.response is None - got {response}"
|
||||
)
|
||||
|
||||
return Response(
|
||||
content=httpx_response.content,
|
||||
status_code=httpx_response.status_code,
|
||||
headers=httpx_response.headers,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
await proxy_logging_obj.post_call_failure_hook(
|
||||
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
|
||||
)
|
||||
verbose_proxy_logger.error(
|
||||
"litellm.proxy.proxy_server.retrieve_file_content(): Exception occured - {}".format(
|
||||
str(e)
|
||||
)
|
||||
)
|
||||
verbose_proxy_logger.debug(traceback.format_exc())
|
||||
if isinstance(e, HTTPException):
|
||||
raise ProxyException(
|
||||
message=getattr(e, "message", str(e.detail)),
|
||||
type=getattr(e, "type", "None"),
|
||||
param=getattr(e, "param", "None"),
|
||||
code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST),
|
||||
)
|
||||
else:
|
||||
error_msg = f"{str(e)}"
|
||||
raise ProxyException(
|
||||
message=getattr(e, "message", error_msg),
|
||||
type=getattr(e, "type", "None"),
|
||||
param=getattr(e, "param", "None"),
|
||||
code=getattr(e, "status_code", 500),
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{provider}/v1/files/{file_id:path}",
|
||||
dependencies=[Depends(user_api_key_auth)],
|
||||
|
@ -609,127 +734,3 @@ async def list_files(
|
|||
param=getattr(e, "param", "None"),
|
||||
code=getattr(e, "status_code", 500),
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{provider}/v1/files/{file_id:path}/content",
|
||||
dependencies=[Depends(user_api_key_auth)],
|
||||
tags=["files"],
|
||||
)
|
||||
@router.get(
|
||||
"/v1/files/{file_id:path}/content",
|
||||
dependencies=[Depends(user_api_key_auth)],
|
||||
tags=["files"],
|
||||
)
|
||||
@router.get(
|
||||
"/files/{file_id:path}/content",
|
||||
dependencies=[Depends(user_api_key_auth)],
|
||||
tags=["files"],
|
||||
)
|
||||
async def get_file_content(
|
||||
request: Request,
|
||||
fastapi_response: Response,
|
||||
file_id: str,
|
||||
provider: Optional[str] = None,
|
||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||
):
|
||||
"""
|
||||
Returns information about a specific file. that can be used across - Assistants API, Batch API
|
||||
This is the equivalent of GET https://api.openai.com/v1/files/{file_id}/content
|
||||
|
||||
Supports Identical Params as: https://platform.openai.com/docs/api-reference/files/retrieve-contents
|
||||
|
||||
Example Curl
|
||||
```
|
||||
curl http://localhost:4000/v1/files/file-abc123/content \
|
||||
-H "Authorization: Bearer sk-1234"
|
||||
|
||||
```
|
||||
"""
|
||||
from litellm.proxy.proxy_server import (
|
||||
add_litellm_data_to_request,
|
||||
general_settings,
|
||||
get_custom_headers,
|
||||
proxy_config,
|
||||
proxy_logging_obj,
|
||||
version,
|
||||
)
|
||||
|
||||
data: Dict = {}
|
||||
try:
|
||||
|
||||
# Include original request and headers in the data
|
||||
data = await add_litellm_data_to_request(
|
||||
data=data,
|
||||
request=request,
|
||||
general_settings=general_settings,
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
version=version,
|
||||
proxy_config=proxy_config,
|
||||
)
|
||||
|
||||
if provider is None:
|
||||
provider = "openai"
|
||||
response = await litellm.afile_content(
|
||||
custom_llm_provider=provider, file_id=file_id, **data # type: ignore
|
||||
)
|
||||
|
||||
### ALERTING ###
|
||||
asyncio.create_task(
|
||||
proxy_logging_obj.update_request_status(
|
||||
litellm_call_id=data.get("litellm_call_id", ""), status="success"
|
||||
)
|
||||
)
|
||||
|
||||
### RESPONSE HEADERS ###
|
||||
hidden_params = getattr(response, "_hidden_params", {}) or {}
|
||||
model_id = hidden_params.get("model_id", None) or ""
|
||||
cache_key = hidden_params.get("cache_key", None) or ""
|
||||
api_base = hidden_params.get("api_base", None) or ""
|
||||
|
||||
fastapi_response.headers.update(
|
||||
get_custom_headers(
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
model_id=model_id,
|
||||
cache_key=cache_key,
|
||||
api_base=api_base,
|
||||
version=version,
|
||||
model_region=getattr(user_api_key_dict, "allowed_model_region", ""),
|
||||
)
|
||||
)
|
||||
httpx_response: Optional[httpx.Response] = getattr(response, "response", None)
|
||||
if httpx_response is None:
|
||||
raise ValueError(
|
||||
f"Invalid response - response.response is None - got {response}"
|
||||
)
|
||||
return Response(
|
||||
content=httpx_response.content,
|
||||
status_code=httpx_response.status_code,
|
||||
headers=httpx_response.headers,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
await proxy_logging_obj.post_call_failure_hook(
|
||||
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
|
||||
)
|
||||
verbose_proxy_logger.error(
|
||||
"litellm.proxy.proxy_server.retrieve_file_content(): Exception occured - {}".format(
|
||||
str(e)
|
||||
)
|
||||
)
|
||||
verbose_proxy_logger.debug(traceback.format_exc())
|
||||
if isinstance(e, HTTPException):
|
||||
raise ProxyException(
|
||||
message=getattr(e, "message", str(e.detail)),
|
||||
type=getattr(e, "type", "None"),
|
||||
param=getattr(e, "param", "None"),
|
||||
code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST),
|
||||
)
|
||||
else:
|
||||
error_msg = f"{str(e)}"
|
||||
raise ProxyException(
|
||||
message=getattr(e, "message", error_msg),
|
||||
type=getattr(e, "type", "None"),
|
||||
param=getattr(e, "param", "None"),
|
||||
code=getattr(e, "status_code", 500),
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue