Merge pull request #3430 from BerriAI/litellm_return_api_base

feat(proxy_server.py): return api base in response headers
This commit is contained in:
Krish Dholakia 2024-05-03 16:25:21 -07:00 committed by GitHub
commit 1b35a75245
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 67 additions and 11 deletions

View file

@ -3658,6 +3658,7 @@ async def chat_completion(
hidden_params = getattr(response, "_hidden_params", {}) or {}
model_id = hidden_params.get("model_id", None) or ""
cache_key = hidden_params.get("cache_key", None) or ""
api_base = hidden_params.get("api_base", None) or ""
# Post Call Processing
if llm_router is not None:
@ -3670,6 +3671,7 @@ async def chat_completion(
custom_headers = {
"x-litellm-model-id": model_id,
"x-litellm-cache-key": cache_key,
"x-litellm-model-api-base": api_base,
}
selected_data_generator = select_data_generator(
response=response, user_api_key_dict=user_api_key_dict
@ -3682,6 +3684,7 @@ async def chat_completion(
fastapi_response.headers["x-litellm-model-id"] = model_id
fastapi_response.headers["x-litellm-cache-key"] = cache_key
fastapi_response.headers["x-litellm-model-api-base"] = api_base
### CALL HOOKS ### - modify outgoing data
response = await proxy_logging_obj.post_call_success_hook(