forked from phoenix/litellm-mirror
use 1 file for AnthropicPassthroughLoggingHandler
This commit is contained in:
parent
ddfe687b13
commit
c977677c93
2 changed files with 113 additions and 68 deletions
|
@ -0,0 +1,108 @@
|
|||
import json
|
||||
from datetime import datetime
|
||||
from typing import Union
|
||||
|
||||
import httpx
|
||||
|
||||
import litellm
|
||||
from litellm._logging import verbose_proxy_logger
|
||||
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
|
||||
from litellm.litellm_core_utils.litellm_logging import (
|
||||
get_standard_logging_object_payload,
|
||||
)
|
||||
from litellm.llms.anthropic.chat.transformation import AnthropicConfig
|
||||
|
||||
|
||||
class AnthropicPassthroughLoggingHandler:
|
||||
|
||||
@staticmethod
|
||||
async def anthropic_passthrough_handler(
|
||||
httpx_response: httpx.Response,
|
||||
response_body: dict,
|
||||
logging_obj: LiteLLMLoggingObj,
|
||||
url_route: str,
|
||||
result: str,
|
||||
start_time: datetime,
|
||||
end_time: datetime,
|
||||
cache_hit: bool,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Transforms Anthropic response to OpenAI response, generates a standard logging object so downstream logging can be handled
|
||||
"""
|
||||
model = response_body.get("model", "")
|
||||
litellm_model_response: litellm.ModelResponse = (
|
||||
AnthropicConfig._process_response(
|
||||
response=httpx_response,
|
||||
model_response=litellm.ModelResponse(),
|
||||
model=model,
|
||||
stream=False,
|
||||
messages=[],
|
||||
logging_obj=logging_obj,
|
||||
optional_params={},
|
||||
api_key="",
|
||||
data={},
|
||||
print_verbose=litellm.print_verbose,
|
||||
encoding=None,
|
||||
json_mode=False,
|
||||
)
|
||||
)
|
||||
|
||||
kwargs = AnthropicPassthroughLoggingHandler._create_anthropic_response_logging_payload(
|
||||
litellm_model_response=litellm_model_response,
|
||||
model=model,
|
||||
kwargs=kwargs,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
logging_obj=logging_obj,
|
||||
)
|
||||
|
||||
await logging_obj.async_success_handler(
|
||||
result=litellm_model_response,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
cache_hit=cache_hit,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _create_anthropic_response_logging_payload(
|
||||
litellm_model_response: Union[
|
||||
litellm.ModelResponse, litellm.TextCompletionResponse
|
||||
],
|
||||
model: str,
|
||||
kwargs: dict,
|
||||
start_time: datetime,
|
||||
end_time: datetime,
|
||||
logging_obj: LiteLLMLoggingObj,
|
||||
):
|
||||
"""
|
||||
Create the standard logging object for Anthropic passthrough
|
||||
|
||||
handles streaming and non-streaming responses
|
||||
"""
|
||||
response_cost = litellm.completion_cost(
|
||||
completion_response=litellm_model_response,
|
||||
model=model,
|
||||
)
|
||||
kwargs["response_cost"] = response_cost
|
||||
kwargs["model"] = model
|
||||
|
||||
# Make standard logging object for Vertex AI
|
||||
standard_logging_object = get_standard_logging_object_payload(
|
||||
kwargs=kwargs,
|
||||
init_response_obj=litellm_model_response,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
logging_obj=logging_obj,
|
||||
status="success",
|
||||
)
|
||||
|
||||
# pretty print standard logging object
|
||||
verbose_proxy_logger.debug(
|
||||
"standard_logging_object= %s", json.dumps(standard_logging_object, indent=4)
|
||||
)
|
||||
kwargs["standard_logging_object"] = standard_logging_object
|
||||
return kwargs
|
|
@ -12,13 +12,16 @@ from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLogging
|
|||
from litellm.litellm_core_utils.litellm_logging import (
|
||||
get_standard_logging_object_payload,
|
||||
)
|
||||
from litellm.llms.anthropic.chat.transformation import AnthropicConfig
|
||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
||||
VertexLLM,
|
||||
)
|
||||
from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
|
||||
from litellm.types.utils import StandardPassThroughResponseObject
|
||||
|
||||
from .llm_provider_handlers.anthropic_passthrough_logging_handler import (
|
||||
AnthropicPassthroughLoggingHandler,
|
||||
)
|
||||
|
||||
|
||||
class PassThroughEndpointLogging:
|
||||
def __init__(self):
|
||||
|
@ -55,7 +58,7 @@ class PassThroughEndpointLogging:
|
|||
**kwargs,
|
||||
)
|
||||
elif self.is_anthropic_route(url_route):
|
||||
await self.anthropic_passthrough_handler(
|
||||
await AnthropicPassthroughLoggingHandler.anthropic_passthrough_handler(
|
||||
httpx_response=httpx_response,
|
||||
response_body=response_body or {},
|
||||
logging_obj=logging_obj,
|
||||
|
@ -110,72 +113,6 @@ class PassThroughEndpointLogging:
|
|||
return match.group(1)
|
||||
return "unknown"
|
||||
|
||||
async def anthropic_passthrough_handler(
|
||||
self,
|
||||
httpx_response: httpx.Response,
|
||||
response_body: dict,
|
||||
logging_obj: LiteLLMLoggingObj,
|
||||
url_route: str,
|
||||
result: str,
|
||||
start_time: datetime,
|
||||
end_time: datetime,
|
||||
cache_hit: bool,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Transforms Anthropic response to OpenAI response, generates a standard logging object so downstream logging can be handled
|
||||
"""
|
||||
model = response_body.get("model", "")
|
||||
litellm_model_response: litellm.ModelResponse = (
|
||||
AnthropicConfig._process_response(
|
||||
response=httpx_response,
|
||||
model_response=litellm.ModelResponse(),
|
||||
model=model,
|
||||
stream=False,
|
||||
messages=[],
|
||||
logging_obj=logging_obj,
|
||||
optional_params={},
|
||||
api_key="",
|
||||
data={},
|
||||
print_verbose=litellm.print_verbose,
|
||||
encoding=None,
|
||||
json_mode=False,
|
||||
)
|
||||
)
|
||||
|
||||
response_cost = litellm.completion_cost(
|
||||
completion_response=litellm_model_response,
|
||||
model=model,
|
||||
)
|
||||
kwargs["response_cost"] = response_cost
|
||||
kwargs["model"] = model
|
||||
|
||||
# Make standard logging object for Vertex AI
|
||||
standard_logging_object = get_standard_logging_object_payload(
|
||||
kwargs=kwargs,
|
||||
init_response_obj=litellm_model_response,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
logging_obj=logging_obj,
|
||||
status="success",
|
||||
)
|
||||
|
||||
# pretty print standard logging object
|
||||
verbose_proxy_logger.debug(
|
||||
"standard_logging_object= %s", json.dumps(standard_logging_object, indent=4)
|
||||
)
|
||||
kwargs["standard_logging_object"] = standard_logging_object
|
||||
|
||||
await logging_obj.async_success_handler(
|
||||
result=litellm_model_response,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
cache_hit=cache_hit,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
pass
|
||||
|
||||
async def vertex_passthrough_handler(
|
||||
self,
|
||||
httpx_response: httpx.Response,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue