forked from phoenix/litellm-mirror
use common _create_vertex_response_logging_payload_for_generate_content
This commit is contained in:
parent
7422af75fd
commit
4972415372
2 changed files with 61 additions and 3 deletions
|
@ -100,7 +100,7 @@ class AnthropicPassthroughLoggingHandler:
|
|||
kwargs["response_cost"] = response_cost
|
||||
kwargs["model"] = model
|
||||
|
||||
# Make standard logging object for Vertex AI
|
||||
# Make standard logging object for Anthropic
|
||||
standard_logging_object = get_standard_logging_object_payload(
|
||||
kwargs=kwargs,
|
||||
init_response_obj=litellm_model_response,
|
||||
|
|
|
@ -56,8 +56,14 @@ class VertexPassthroughLoggingHandler:
|
|||
encoding=None,
|
||||
)
|
||||
)
|
||||
logging_obj.model = litellm_model_response.model or model
|
||||
logging_obj.model_call_details["model"] = logging_obj.model
|
||||
kwargs = VertexPassthroughLoggingHandler._create_vertex_response_logging_payload_for_generate_content(
|
||||
litellm_model_response=litellm_model_response,
|
||||
model=model,
|
||||
kwargs=kwargs,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
logging_obj=logging_obj,
|
||||
)
|
||||
|
||||
await logging_obj.async_success_handler(
|
||||
result=litellm_model_response,
|
||||
|
@ -147,6 +153,14 @@ class VertexPassthroughLoggingHandler:
|
|||
"Unable to build complete streaming response for Vertex passthrough endpoint, not logging..."
|
||||
)
|
||||
return
|
||||
kwargs = VertexPassthroughLoggingHandler._create_vertex_response_logging_payload_for_generate_content(
|
||||
litellm_model_response=complete_streaming_response,
|
||||
model=model,
|
||||
kwargs=kwargs,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
logging_obj=litellm_logging_obj,
|
||||
)
|
||||
await litellm_logging_obj.async_success_handler(
|
||||
result=complete_streaming_response,
|
||||
start_time=start_time,
|
||||
|
@ -193,3 +207,47 @@ class VertexPassthroughLoggingHandler:
|
|||
if match:
|
||||
return match.group(1)
|
||||
return "unknown"
|
||||
|
||||
@staticmethod
|
||||
def _create_vertex_response_logging_payload_for_generate_content(
|
||||
litellm_model_response: Union[
|
||||
litellm.ModelResponse, litellm.TextCompletionResponse
|
||||
],
|
||||
model: str,
|
||||
kwargs: dict,
|
||||
start_time: datetime,
|
||||
end_time: datetime,
|
||||
logging_obj: LiteLLMLoggingObj,
|
||||
):
|
||||
"""
|
||||
Create the standard logging object for Vertex passthrough generateContent (streaming and non-streaming)
|
||||
|
||||
"""
|
||||
response_cost = litellm.completion_cost(
|
||||
completion_response=litellm_model_response,
|
||||
model=model,
|
||||
)
|
||||
kwargs["response_cost"] = response_cost
|
||||
kwargs["model"] = model
|
||||
|
||||
# Make standard logging object for Vertex AI
|
||||
standard_logging_object = get_standard_logging_object_payload(
|
||||
kwargs=kwargs,
|
||||
init_response_obj=litellm_model_response,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
logging_obj=logging_obj,
|
||||
status="success",
|
||||
)
|
||||
|
||||
# pretty print standard logging object
|
||||
verbose_proxy_logger.debug(
|
||||
"standard_logging_object= %s", json.dumps(standard_logging_object, indent=4)
|
||||
)
|
||||
kwargs["standard_logging_object"] = standard_logging_object
|
||||
|
||||
# set litellm_call_id to logging response object
|
||||
litellm_model_response.id = logging_obj.litellm_call_id
|
||||
logging_obj.model = litellm_model_response.model or model
|
||||
logging_obj.model_call_details["model"] = logging_obj.model
|
||||
return kwargs
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue