fix(main.py): log hidden params for text completion calls

This commit is contained in:
Krrish Dholakia 2024-08-05 21:26:48 -07:00
parent 818a70838d
commit 7bf1b4d661
4 changed files with 23 additions and 24 deletions

View file

@ -11,6 +11,7 @@ from typing import Literal, Optional
import dotenv import dotenv
import httpx import httpx
from pydantic import BaseModel
import litellm import litellm
from litellm import verbose_logger from litellm import verbose_logger
@ -280,11 +281,6 @@ class BraintrustLogger(CustomLogger):
) # if litellm_params['metadata'] == None ) # if litellm_params['metadata'] == None
metadata = self.add_metadata_from_header(litellm_params, metadata) metadata = self.add_metadata_from_header(litellm_params, metadata)
clean_metadata = {} clean_metadata = {}
try:
metadata = copy.deepcopy(
metadata
) # Avoid modifying the original metadata
except:
new_metadata = {} new_metadata = {}
for key, value in metadata.items(): for key, value in metadata.items():
if ( if (
@ -294,7 +290,10 @@ class BraintrustLogger(CustomLogger):
or isinstance(value, int) or isinstance(value, int)
or isinstance(value, float) or isinstance(value, float)
): ):
new_metadata[key] = copy.deepcopy(value) new_metadata[key] = value
elif isinstance(value, BaseModel):
new_metadata[key] = value.model_dump_json()
metadata = new_metadata metadata = new_metadata
tags = [] tags = []

View file

@ -371,6 +371,7 @@ class PredibaseChatCompletion(BaseLLM):
response_headers["llm_provider-{}".format(k)] = v response_headers["llm_provider-{}".format(k)] = v
model_response._hidden_params["additional_headers"] = response_headers model_response._hidden_params["additional_headers"] = response_headers
return model_response return model_response
def completion( def completion(

View file

@ -128,6 +128,7 @@ from .types.llms.openai import HttpxBinaryResponseContent
from .types.utils import ( from .types.utils import (
AdapterCompletionStreamWrapper, AdapterCompletionStreamWrapper,
ChatCompletionMessageToolCall, ChatCompletionMessageToolCall,
HiddenParams,
all_litellm_params, all_litellm_params,
) )
@ -3709,6 +3710,9 @@ async def atext_completion(
text_choices["finish_reason"] = response["choices"][0]["finish_reason"] text_choices["finish_reason"] = response["choices"][0]["finish_reason"]
text_completion_response["choices"] = [text_choices] text_completion_response["choices"] = [text_choices]
text_completion_response["usage"] = response.get("usage", None) text_completion_response["usage"] = response.get("usage", None)
text_completion_response._hidden_params = HiddenParams(
**response._hidden_params
)
return text_completion_response return text_completion_response
except Exception as e: except Exception as e:
custom_llm_provider = custom_llm_provider or "openai" custom_llm_provider = custom_llm_provider or "openai"
@ -3980,6 +3984,7 @@ def text_completion(
text_choices["finish_reason"] = response["choices"][0]["finish_reason"] text_choices["finish_reason"] = response["choices"][0]["finish_reason"]
text_completion_response["choices"] = [text_choices] text_completion_response["choices"] = [text_choices]
text_completion_response["usage"] = response.get("usage", None) text_completion_response["usage"] = response.get("usage", None)
text_completion_response._hidden_params = HiddenParams(**response._hidden_params)
return text_completion_response return text_completion_response

View file

@ -1,10 +1,4 @@
model_list: model_list:
- model_name: "gpt-4" - model_name: "*"
litellm_params: litellm_params:
model: "gpt-4" model: "*"
- model_name: "gpt-4"
litellm_params:
model: "gpt-4o"
- model_name: "gpt-4o-mini"
litellm_params:
model: "gpt-4o-mini"