mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
docs(pass_through.md): add doc on creating custom chat endpoints on proxy
Allows developers to call proxy with anthropic sdk/boto3/etc.
This commit is contained in:
parent
d2a0977af7
commit
c8a2782df8
5 changed files with 419 additions and 10 deletions
|
@ -1,4 +1,6 @@
|
|||
import ast
|
||||
import asyncio
|
||||
import json
|
||||
import traceback
|
||||
from base64 import b64encode
|
||||
|
||||
|
@ -16,7 +18,8 @@ from fastapi.responses import StreamingResponse
|
|||
|
||||
import litellm
|
||||
from litellm._logging import verbose_proxy_logger
|
||||
from litellm.proxy._types import ProxyException
|
||||
from litellm.integrations.custom_logger import CustomLogger
|
||||
from litellm.proxy._types import ProxyException, UserAPIKeyAuth
|
||||
from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
|
||||
|
||||
async_client = httpx.AsyncClient()
|
||||
|
@ -24,7 +27,7 @@ async_client = httpx.AsyncClient()
|
|||
|
||||
async def set_env_variables_in_header(custom_headers: dict):
|
||||
"""
|
||||
checks if nay headers on config.yaml are defined as os.environ/COHERE_API_KEY etc
|
||||
checks if any headers on config.yaml are defined as os.environ/COHERE_API_KEY etc
|
||||
|
||||
only runs for headers defined on config.yaml
|
||||
|
||||
|
@ -72,6 +75,171 @@ async def set_env_variables_in_header(custom_headers: dict):
|
|||
return headers
|
||||
|
||||
|
||||
async def chat_completion_pass_through_endpoint(
|
||||
fastapi_response: Response,
|
||||
request: Request,
|
||||
adapter_id: str,
|
||||
user_api_key_dict: UserAPIKeyAuth,
|
||||
):
|
||||
from litellm.proxy.proxy_server import (
|
||||
add_litellm_data_to_request,
|
||||
general_settings,
|
||||
get_custom_headers,
|
||||
llm_router,
|
||||
proxy_config,
|
||||
proxy_logging_obj,
|
||||
user_api_base,
|
||||
user_max_tokens,
|
||||
user_model,
|
||||
user_request_timeout,
|
||||
user_temperature,
|
||||
version,
|
||||
)
|
||||
|
||||
data = {}
|
||||
try:
|
||||
body = await request.body()
|
||||
body_str = body.decode()
|
||||
try:
|
||||
data = ast.literal_eval(body_str)
|
||||
except Exception:
|
||||
data = json.loads(body_str)
|
||||
|
||||
data["adapter_id"] = adapter_id
|
||||
|
||||
verbose_proxy_logger.debug(
|
||||
"Request received by LiteLLM:\n{}".format(json.dumps(data, indent=4)),
|
||||
)
|
||||
data["model"] = (
|
||||
general_settings.get("completion_model", None) # server default
|
||||
or user_model # model name passed via cli args
|
||||
or data["model"] # default passed in http request
|
||||
)
|
||||
if user_model:
|
||||
data["model"] = user_model
|
||||
|
||||
data = await add_litellm_data_to_request(
|
||||
data=data, # type: ignore
|
||||
request=request,
|
||||
general_settings=general_settings,
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
version=version,
|
||||
proxy_config=proxy_config,
|
||||
)
|
||||
|
||||
# override with user settings, these are params passed via cli
|
||||
if user_temperature:
|
||||
data["temperature"] = user_temperature
|
||||
if user_request_timeout:
|
||||
data["request_timeout"] = user_request_timeout
|
||||
if user_max_tokens:
|
||||
data["max_tokens"] = user_max_tokens
|
||||
if user_api_base:
|
||||
data["api_base"] = user_api_base
|
||||
|
||||
### MODEL ALIAS MAPPING ###
|
||||
# check if model name in model alias map
|
||||
# get the actual model name
|
||||
if data["model"] in litellm.model_alias_map:
|
||||
data["model"] = litellm.model_alias_map[data["model"]]
|
||||
|
||||
### CALL HOOKS ### - modify incoming data before calling the model
|
||||
data = await proxy_logging_obj.pre_call_hook( # type: ignore
|
||||
user_api_key_dict=user_api_key_dict, data=data, call_type="text_completion"
|
||||
)
|
||||
|
||||
### ROUTE THE REQUESTs ###
|
||||
router_model_names = llm_router.model_names if llm_router is not None else []
|
||||
# skip router if user passed their key
|
||||
if "api_key" in data:
|
||||
llm_response = asyncio.create_task(litellm.aadapter_completion(**data))
|
||||
elif (
|
||||
llm_router is not None and data["model"] in router_model_names
|
||||
): # model in router model list
|
||||
llm_response = asyncio.create_task(llm_router.aadapter_completion(**data))
|
||||
elif (
|
||||
llm_router is not None
|
||||
and llm_router.model_group_alias is not None
|
||||
and data["model"] in llm_router.model_group_alias
|
||||
): # model set in model_group_alias
|
||||
llm_response = asyncio.create_task(llm_router.aadapter_completion(**data))
|
||||
elif (
|
||||
llm_router is not None and data["model"] in llm_router.deployment_names
|
||||
): # model in router deployments, calling a specific deployment on the router
|
||||
llm_response = asyncio.create_task(
|
||||
llm_router.aadapter_completion(**data, specific_deployment=True)
|
||||
)
|
||||
elif (
|
||||
llm_router is not None and data["model"] in llm_router.get_model_ids()
|
||||
): # model in router model list
|
||||
llm_response = asyncio.create_task(llm_router.aadapter_completion(**data))
|
||||
elif (
|
||||
llm_router is not None
|
||||
and data["model"] not in router_model_names
|
||||
and llm_router.default_deployment is not None
|
||||
): # model in router deployments, calling a specific deployment on the router
|
||||
llm_response = asyncio.create_task(llm_router.aadapter_completion(**data))
|
||||
elif user_model is not None: # `litellm --model <your-model-name>`
|
||||
llm_response = asyncio.create_task(litellm.aadapter_completion(**data))
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail={
|
||||
"error": "completion: Invalid model name passed in model="
|
||||
+ data.get("model", "")
|
||||
},
|
||||
)
|
||||
|
||||
# Await the llm_response task
|
||||
response = await llm_response
|
||||
|
||||
hidden_params = getattr(response, "_hidden_params", {}) or {}
|
||||
model_id = hidden_params.get("model_id", None) or ""
|
||||
cache_key = hidden_params.get("cache_key", None) or ""
|
||||
api_base = hidden_params.get("api_base", None) or ""
|
||||
response_cost = hidden_params.get("response_cost", None) or ""
|
||||
|
||||
### ALERTING ###
|
||||
asyncio.create_task(
|
||||
proxy_logging_obj.update_request_status(
|
||||
litellm_call_id=data.get("litellm_call_id", ""), status="success"
|
||||
)
|
||||
)
|
||||
|
||||
verbose_proxy_logger.debug("final response: %s", response)
|
||||
|
||||
fastapi_response.headers.update(
|
||||
get_custom_headers(
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
model_id=model_id,
|
||||
cache_key=cache_key,
|
||||
api_base=api_base,
|
||||
version=version,
|
||||
response_cost=response_cost,
|
||||
)
|
||||
)
|
||||
|
||||
verbose_proxy_logger.info("\nResponse from Litellm:\n{}".format(response))
|
||||
return response
|
||||
except Exception as e:
|
||||
await proxy_logging_obj.post_call_failure_hook(
|
||||
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
|
||||
)
|
||||
verbose_proxy_logger.error(
|
||||
"litellm.proxy.proxy_server.completion(): Exception occured - {}\n{}".format(
|
||||
str(e), traceback.format_exc()
|
||||
)
|
||||
)
|
||||
verbose_proxy_logger.debug(traceback.format_exc())
|
||||
error_msg = f"{str(e)}"
|
||||
raise ProxyException(
|
||||
message=getattr(e, "message", error_msg),
|
||||
type=getattr(e, "type", "None"),
|
||||
param=getattr(e, "param", "None"),
|
||||
code=getattr(e, "status_code", 500),
|
||||
)
|
||||
|
||||
|
||||
async def pass_through_request(request: Request, target: str, custom_headers: dict):
|
||||
try:
|
||||
|
||||
|
@ -106,7 +274,7 @@ async def pass_through_request(request: Request, target: str, custom_headers: di
|
|||
)
|
||||
except Exception as e:
|
||||
verbose_proxy_logger.error(
|
||||
"litellm.proxy.proxy_server.pass through endpoint(): Exception occured - {}".format(
|
||||
"litellm.proxy.proxy_server.pass_through_endpoint(): Exception occured - {}".format(
|
||||
str(e)
|
||||
)
|
||||
)
|
||||
|
@ -128,9 +296,37 @@ async def pass_through_request(request: Request, target: str, custom_headers: di
|
|||
)
|
||||
|
||||
|
||||
def create_pass_through_route(endpoint, target, custom_headers=None):
|
||||
async def endpoint_func(request: Request):
|
||||
return await pass_through_request(request, target, custom_headers)
|
||||
def create_pass_through_route(endpoint, target: str, custom_headers=None):
|
||||
# check if target is an adapter.py or a url
|
||||
import uuid
|
||||
|
||||
from litellm.proxy.utils import get_instance_fn
|
||||
|
||||
try:
|
||||
if isinstance(target, CustomLogger):
|
||||
adapter = target
|
||||
else:
|
||||
adapter = get_instance_fn(value=target)
|
||||
adapter_id = str(uuid.uuid4())
|
||||
litellm.adapters = [{"id": adapter_id, "adapter": adapter}]
|
||||
|
||||
async def endpoint_func(
|
||||
request: Request,
|
||||
fastapi_response: Response,
|
||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||
):
|
||||
return await chat_completion_pass_through_endpoint(
|
||||
fastapi_response=fastapi_response,
|
||||
request=request,
|
||||
adapter_id=adapter_id,
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
)
|
||||
|
||||
except Exception:
|
||||
verbose_proxy_logger.warning("Defaulting to target being a url.")
|
||||
|
||||
async def endpoint_func(request: Request): # type: ignore
|
||||
return await pass_through_request(request, target, custom_headers)
|
||||
|
||||
return endpoint_func
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue