forked from phoenix/litellm-mirror
fix(helicone.py): fix logging
This commit is contained in:
parent
9947b0db96
commit
31b19a420f
2 changed files with 6 additions and 1 deletions
|
@ -2,6 +2,7 @@
|
|||
# On success, logs events to Helicone
|
||||
import dotenv, os
|
||||
import requests
|
||||
import litellm
|
||||
|
||||
dotenv.load_dotenv() # Loading env variables using dotenv
|
||||
import traceback
|
||||
|
@ -56,6 +57,10 @@ class HeliconeLogger:
|
|||
else "gpt-3.5-turbo"
|
||||
)
|
||||
provider_request = {"model": model, "messages": messages}
|
||||
if isinstance(response_obj, litellm.EmbeddingResponse) or isinstance(
|
||||
response_obj, litellm.ModelResponse
|
||||
):
|
||||
response_obj = response_obj.json()
|
||||
|
||||
if "claude" in model:
|
||||
provider_request, response_obj = self.claude_mapping(
|
||||
|
|
|
@ -1269,7 +1269,7 @@ class Logging:
|
|||
if callback == "helicone":
|
||||
print_verbose("reaches helicone for logging!")
|
||||
model = self.model
|
||||
messages = kwargs["messages"]
|
||||
messages = kwargs["input"]
|
||||
heliconeLogger.log_success(
|
||||
model=model,
|
||||
messages=messages,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue