use correct get custom headers

This commit is contained in:
Ishaan Jaff 2025-03-12 17:16:51 -07:00
parent 584338fb82
commit 4aa588d203
5 changed files with 22 additions and 34 deletions

View file

@ -18,6 +18,7 @@ from litellm.batches.main import (
) )
from litellm.proxy._types import * from litellm.proxy._types import *
from litellm.proxy.auth.user_api_key_auth import user_api_key_auth from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
from litellm.proxy.common_request_processing import ProxyBaseLLMRequestProcessing
from litellm.proxy.common_utils.http_parsing_utils import _read_request_body from litellm.proxy.common_utils.http_parsing_utils import _read_request_body
from litellm.proxy.common_utils.openai_endpoint_utils import ( from litellm.proxy.common_utils.openai_endpoint_utils import (
get_custom_llm_provider_from_request_body, get_custom_llm_provider_from_request_body,
@ -69,7 +70,6 @@ async def create_batch(
from litellm.proxy.proxy_server import ( from litellm.proxy.proxy_server import (
add_litellm_data_to_request, add_litellm_data_to_request,
general_settings, general_settings,
get_custom_headers,
llm_router, llm_router,
proxy_config, proxy_config,
proxy_logging_obj, proxy_logging_obj,
@ -137,7 +137,7 @@ async def create_batch(
api_base = hidden_params.get("api_base", None) or "" api_base = hidden_params.get("api_base", None) or ""
fastapi_response.headers.update( fastapi_response.headers.update(
get_custom_headers( ProxyBaseLLMRequestProcessing.get_custom_headers(
user_api_key_dict=user_api_key_dict, user_api_key_dict=user_api_key_dict,
model_id=model_id, model_id=model_id,
cache_key=cache_key, cache_key=cache_key,
@ -201,7 +201,6 @@ async def retrieve_batch(
from litellm.proxy.proxy_server import ( from litellm.proxy.proxy_server import (
add_litellm_data_to_request, add_litellm_data_to_request,
general_settings, general_settings,
get_custom_headers,
llm_router, llm_router,
proxy_config, proxy_config,
proxy_logging_obj, proxy_logging_obj,
@ -266,7 +265,7 @@ async def retrieve_batch(
api_base = hidden_params.get("api_base", None) or "" api_base = hidden_params.get("api_base", None) or ""
fastapi_response.headers.update( fastapi_response.headers.update(
get_custom_headers( ProxyBaseLLMRequestProcessing.get_custom_headers(
user_api_key_dict=user_api_key_dict, user_api_key_dict=user_api_key_dict,
model_id=model_id, model_id=model_id,
cache_key=cache_key, cache_key=cache_key,
@ -326,11 +325,7 @@ async def list_batches(
``` ```
""" """
from litellm.proxy.proxy_server import ( from litellm.proxy.proxy_server import proxy_logging_obj, version
get_custom_headers,
proxy_logging_obj,
version,
)
verbose_proxy_logger.debug("GET /v1/batches after={} limit={}".format(after, limit)) verbose_proxy_logger.debug("GET /v1/batches after={} limit={}".format(after, limit))
try: try:
@ -352,7 +347,7 @@ async def list_batches(
api_base = hidden_params.get("api_base", None) or "" api_base = hidden_params.get("api_base", None) or ""
fastapi_response.headers.update( fastapi_response.headers.update(
get_custom_headers( ProxyBaseLLMRequestProcessing.get_custom_headers(
user_api_key_dict=user_api_key_dict, user_api_key_dict=user_api_key_dict,
model_id=model_id, model_id=model_id,
cache_key=cache_key, cache_key=cache_key,
@ -417,7 +412,6 @@ async def cancel_batch(
from litellm.proxy.proxy_server import ( from litellm.proxy.proxy_server import (
add_litellm_data_to_request, add_litellm_data_to_request,
general_settings, general_settings,
get_custom_headers,
proxy_config, proxy_config,
proxy_logging_obj, proxy_logging_obj,
version, version,
@ -463,7 +457,7 @@ async def cancel_batch(
api_base = hidden_params.get("api_base", None) or "" api_base = hidden_params.get("api_base", None) or ""
fastapi_response.headers.update( fastapi_response.headers.update(
get_custom_headers( ProxyBaseLLMRequestProcessing.get_custom_headers(
user_api_key_dict=user_api_key_dict, user_api_key_dict=user_api_key_dict,
model_id=model_id, model_id=model_id,
cache_key=cache_key, cache_key=cache_key,

View file

@ -15,6 +15,7 @@ import litellm
from litellm._logging import verbose_proxy_logger from litellm._logging import verbose_proxy_logger
from litellm.proxy._types import * from litellm.proxy._types import *
from litellm.proxy.auth.user_api_key_auth import user_api_key_auth from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
from litellm.proxy.common_request_processing import ProxyBaseLLMRequestProcessing
from litellm.proxy.utils import handle_exception_on_proxy from litellm.proxy.utils import handle_exception_on_proxy
router = APIRouter() router = APIRouter()
@ -97,7 +98,6 @@ async def create_fine_tuning_job(
from litellm.proxy.proxy_server import ( from litellm.proxy.proxy_server import (
add_litellm_data_to_request, add_litellm_data_to_request,
general_settings, general_settings,
get_custom_headers,
premium_user, premium_user,
proxy_config, proxy_config,
proxy_logging_obj, proxy_logging_obj,
@ -151,7 +151,7 @@ async def create_fine_tuning_job(
api_base = hidden_params.get("api_base", None) or "" api_base = hidden_params.get("api_base", None) or ""
fastapi_response.headers.update( fastapi_response.headers.update(
get_custom_headers( ProxyBaseLLMRequestProcessing.get_custom_headers(
user_api_key_dict=user_api_key_dict, user_api_key_dict=user_api_key_dict,
model_id=model_id, model_id=model_id,
cache_key=cache_key, cache_key=cache_key,
@ -205,7 +205,6 @@ async def retrieve_fine_tuning_job(
from litellm.proxy.proxy_server import ( from litellm.proxy.proxy_server import (
add_litellm_data_to_request, add_litellm_data_to_request,
general_settings, general_settings,
get_custom_headers,
premium_user, premium_user,
proxy_config, proxy_config,
proxy_logging_obj, proxy_logging_obj,
@ -248,7 +247,7 @@ async def retrieve_fine_tuning_job(
api_base = hidden_params.get("api_base", None) or "" api_base = hidden_params.get("api_base", None) or ""
fastapi_response.headers.update( fastapi_response.headers.update(
get_custom_headers( ProxyBaseLLMRequestProcessing.get_custom_headers(
user_api_key_dict=user_api_key_dict, user_api_key_dict=user_api_key_dict,
model_id=model_id, model_id=model_id,
cache_key=cache_key, cache_key=cache_key,
@ -305,7 +304,6 @@ async def list_fine_tuning_jobs(
from litellm.proxy.proxy_server import ( from litellm.proxy.proxy_server import (
add_litellm_data_to_request, add_litellm_data_to_request,
general_settings, general_settings,
get_custom_headers,
premium_user, premium_user,
proxy_config, proxy_config,
proxy_logging_obj, proxy_logging_obj,
@ -349,7 +347,7 @@ async def list_fine_tuning_jobs(
api_base = hidden_params.get("api_base", None) or "" api_base = hidden_params.get("api_base", None) or ""
fastapi_response.headers.update( fastapi_response.headers.update(
get_custom_headers( ProxyBaseLLMRequestProcessing.get_custom_headers(
user_api_key_dict=user_api_key_dict, user_api_key_dict=user_api_key_dict,
model_id=model_id, model_id=model_id,
cache_key=cache_key, cache_key=cache_key,
@ -404,7 +402,6 @@ async def cancel_fine_tuning_job(
from litellm.proxy.proxy_server import ( from litellm.proxy.proxy_server import (
add_litellm_data_to_request, add_litellm_data_to_request,
general_settings, general_settings,
get_custom_headers,
premium_user, premium_user,
proxy_config, proxy_config,
proxy_logging_obj, proxy_logging_obj,
@ -451,7 +448,7 @@ async def cancel_fine_tuning_job(
api_base = hidden_params.get("api_base", None) or "" api_base = hidden_params.get("api_base", None) or ""
fastapi_response.headers.update( fastapi_response.headers.update(
get_custom_headers( ProxyBaseLLMRequestProcessing.get_custom_headers(
user_api_key_dict=user_api_key_dict, user_api_key_dict=user_api_key_dict,
model_id=model_id, model_id=model_id,
cache_key=cache_key, cache_key=cache_key,

View file

@ -27,6 +27,7 @@ from litellm import CreateFileRequest, get_secret_str
from litellm._logging import verbose_proxy_logger from litellm._logging import verbose_proxy_logger
from litellm.proxy._types import * from litellm.proxy._types import *
from litellm.proxy.auth.user_api_key_auth import user_api_key_auth from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
from litellm.proxy.common_request_processing import ProxyBaseLLMRequestProcessing
from litellm.proxy.common_utils.openai_endpoint_utils import ( from litellm.proxy.common_utils.openai_endpoint_utils import (
get_custom_llm_provider_from_request_body, get_custom_llm_provider_from_request_body,
) )
@ -145,7 +146,6 @@ async def create_file(
from litellm.proxy.proxy_server import ( from litellm.proxy.proxy_server import (
add_litellm_data_to_request, add_litellm_data_to_request,
general_settings, general_settings,
get_custom_headers,
llm_router, llm_router,
proxy_config, proxy_config,
proxy_logging_obj, proxy_logging_obj,
@ -234,7 +234,7 @@ async def create_file(
api_base = hidden_params.get("api_base", None) or "" api_base = hidden_params.get("api_base", None) or ""
fastapi_response.headers.update( fastapi_response.headers.update(
get_custom_headers( ProxyBaseLLMRequestProcessing.get_custom_headers(
user_api_key_dict=user_api_key_dict, user_api_key_dict=user_api_key_dict,
model_id=model_id, model_id=model_id,
cache_key=cache_key, cache_key=cache_key,
@ -309,7 +309,6 @@ async def get_file_content(
from litellm.proxy.proxy_server import ( from litellm.proxy.proxy_server import (
add_litellm_data_to_request, add_litellm_data_to_request,
general_settings, general_settings,
get_custom_headers,
proxy_config, proxy_config,
proxy_logging_obj, proxy_logging_obj,
version, version,
@ -351,7 +350,7 @@ async def get_file_content(
api_base = hidden_params.get("api_base", None) or "" api_base = hidden_params.get("api_base", None) or ""
fastapi_response.headers.update( fastapi_response.headers.update(
get_custom_headers( ProxyBaseLLMRequestProcessing.get_custom_headers(
user_api_key_dict=user_api_key_dict, user_api_key_dict=user_api_key_dict,
model_id=model_id, model_id=model_id,
cache_key=cache_key, cache_key=cache_key,
@ -437,7 +436,6 @@ async def get_file(
from litellm.proxy.proxy_server import ( from litellm.proxy.proxy_server import (
add_litellm_data_to_request, add_litellm_data_to_request,
general_settings, general_settings,
get_custom_headers,
proxy_config, proxy_config,
proxy_logging_obj, proxy_logging_obj,
version, version,
@ -477,7 +475,7 @@ async def get_file(
api_base = hidden_params.get("api_base", None) or "" api_base = hidden_params.get("api_base", None) or ""
fastapi_response.headers.update( fastapi_response.headers.update(
get_custom_headers( ProxyBaseLLMRequestProcessing.get_custom_headers(
user_api_key_dict=user_api_key_dict, user_api_key_dict=user_api_key_dict,
model_id=model_id, model_id=model_id,
cache_key=cache_key, cache_key=cache_key,
@ -554,7 +552,6 @@ async def delete_file(
from litellm.proxy.proxy_server import ( from litellm.proxy.proxy_server import (
add_litellm_data_to_request, add_litellm_data_to_request,
general_settings, general_settings,
get_custom_headers,
proxy_config, proxy_config,
proxy_logging_obj, proxy_logging_obj,
version, version,
@ -595,7 +592,7 @@ async def delete_file(
api_base = hidden_params.get("api_base", None) or "" api_base = hidden_params.get("api_base", None) or ""
fastapi_response.headers.update( fastapi_response.headers.update(
get_custom_headers( ProxyBaseLLMRequestProcessing.get_custom_headers(
user_api_key_dict=user_api_key_dict, user_api_key_dict=user_api_key_dict,
model_id=model_id, model_id=model_id,
cache_key=cache_key, cache_key=cache_key,
@ -671,7 +668,6 @@ async def list_files(
from litellm.proxy.proxy_server import ( from litellm.proxy.proxy_server import (
add_litellm_data_to_request, add_litellm_data_to_request,
general_settings, general_settings,
get_custom_headers,
proxy_config, proxy_config,
proxy_logging_obj, proxy_logging_obj,
version, version,
@ -712,7 +708,7 @@ async def list_files(
api_base = hidden_params.get("api_base", None) or "" api_base = hidden_params.get("api_base", None) or ""
fastapi_response.headers.update( fastapi_response.headers.update(
get_custom_headers( ProxyBaseLLMRequestProcessing.get_custom_headers(
user_api_key_dict=user_api_key_dict, user_api_key_dict=user_api_key_dict,
model_id=model_id, model_id=model_id,
cache_key=cache_key, cache_key=cache_key,

View file

@ -23,6 +23,7 @@ from litellm.proxy._types import (
UserAPIKeyAuth, UserAPIKeyAuth,
) )
from litellm.proxy.auth.user_api_key_auth import user_api_key_auth from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
from litellm.proxy.common_request_processing import ProxyBaseLLMRequestProcessing
from litellm.proxy.common_utils.http_parsing_utils import _read_request_body from litellm.proxy.common_utils.http_parsing_utils import _read_request_body
from litellm.secret_managers.main import get_secret_str from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.custom_http import httpxSpecialProvider from litellm.types.llms.custom_http import httpxSpecialProvider
@ -106,7 +107,6 @@ async def chat_completion_pass_through_endpoint( # noqa: PLR0915
from litellm.proxy.proxy_server import ( from litellm.proxy.proxy_server import (
add_litellm_data_to_request, add_litellm_data_to_request,
general_settings, general_settings,
get_custom_headers,
llm_router, llm_router,
proxy_config, proxy_config,
proxy_logging_obj, proxy_logging_obj,
@ -231,7 +231,7 @@ async def chat_completion_pass_through_endpoint( # noqa: PLR0915
verbose_proxy_logger.debug("final response: %s", response) verbose_proxy_logger.debug("final response: %s", response)
fastapi_response.headers.update( fastapi_response.headers.update(
get_custom_headers( ProxyBaseLLMRequestProcessing.get_custom_headers(
user_api_key_dict=user_api_key_dict, user_api_key_dict=user_api_key_dict,
model_id=model_id, model_id=model_id,
cache_key=cache_key, cache_key=cache_key,

View file

@ -7,10 +7,12 @@ from fastapi.responses import ORJSONResponse
from litellm._logging import verbose_proxy_logger from litellm._logging import verbose_proxy_logger
from litellm.proxy._types import * from litellm.proxy._types import *
from litellm.proxy.auth.user_api_key_auth import user_api_key_auth from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
from litellm.proxy.common_request_processing import ProxyBaseLLMRequestProcessing
router = APIRouter() router = APIRouter()
import asyncio import asyncio
@router.post( @router.post(
"/v2/rerank", "/v2/rerank",
dependencies=[Depends(user_api_key_auth)], dependencies=[Depends(user_api_key_auth)],
@ -37,7 +39,6 @@ async def rerank(
from litellm.proxy.proxy_server import ( from litellm.proxy.proxy_server import (
add_litellm_data_to_request, add_litellm_data_to_request,
general_settings, general_settings,
get_custom_headers,
llm_router, llm_router,
proxy_config, proxy_config,
proxy_logging_obj, proxy_logging_obj,
@ -89,7 +90,7 @@ async def rerank(
api_base = hidden_params.get("api_base", None) or "" api_base = hidden_params.get("api_base", None) or ""
additional_headers = hidden_params.get("additional_headers", None) or {} additional_headers = hidden_params.get("additional_headers", None) or {}
fastapi_response.headers.update( fastapi_response.headers.update(
get_custom_headers( ProxyBaseLLMRequestProcessing.get_custom_headers(
user_api_key_dict=user_api_key_dict, user_api_key_dict=user_api_key_dict,
model_id=model_id, model_id=model_id,
cache_key=cache_key, cache_key=cache_key,