mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
fix helicone-claude integration
This commit is contained in:
parent
c6cc88d1c7
commit
231a3e727b
4 changed files with 31 additions and 2 deletions
Binary file not shown.
Binary file not shown.
|
@ -2,6 +2,7 @@
|
|||
# On success, logs events to Helicone
|
||||
import dotenv, os
|
||||
import requests
|
||||
from anthropic import HUMAN_PROMPT, AI_PROMPT
|
||||
dotenv.load_dotenv() # Loading env variables using dotenv
|
||||
import traceback
|
||||
class HeliconeLogger:
|
||||
|
@ -11,6 +12,23 @@ class HeliconeLogger:
|
|||
# Instance variables
|
||||
self.provider_url = "https://api.openai.com/v1"
|
||||
self.key = os.getenv('HELICONE_API_KEY')
|
||||
|
||||
def claude_mapping(self, model, messages, response_obj):
|
||||
prompt = f"{HUMAN_PROMPT}"
|
||||
for message in messages:
|
||||
if "role" in message:
|
||||
if message["role"] == "user":
|
||||
prompt += f"{HUMAN_PROMPT}{message['content']}"
|
||||
else:
|
||||
prompt += f"{AI_PROMPT}{message['content']}"
|
||||
else:
|
||||
prompt += f"{HUMAN_PROMPT}{message['content']}"
|
||||
prompt += f"{AI_PROMPT}"
|
||||
claude_provider_request = {"model": model, "prompt": prompt}
|
||||
|
||||
claude_response_obj = {"completion": response_obj['choices'][0]['message']['content'], "model": model, "stop_reason": "stop_sequence"}
|
||||
|
||||
return claude_provider_request, claude_response_obj
|
||||
|
||||
def log_success(self, model, messages, response_obj, start_time, end_time):
|
||||
# Method definition
|
||||
|
@ -18,6 +36,9 @@ class HeliconeLogger:
|
|||
model = model if any(accepted_model in model for accepted_model in self.helicone_model_list) else "gpt-3.5-turbo"
|
||||
provider_request = {"model": model, "messages": messages}
|
||||
|
||||
if "claude" in model:
|
||||
provider_request, response_obj = self.claude_mapping(model=model, messages=messages, response_obj=response_obj)
|
||||
|
||||
providerResponse = {
|
||||
"json": response_obj,
|
||||
"headers": {"openai-version": "2020-10-01"},
|
||||
|
|
|
@ -30,14 +30,22 @@ def test_completion_openai():
|
|||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
test_completion_openai()
|
||||
def test_completion_non_openai():
|
||||
|
||||
|
||||
def test_completion_claude():
|
||||
try:
|
||||
response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn)
|
||||
# Add any assertions here to check the response
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
def test_completion_non_openai():
|
||||
try:
|
||||
response = completion(model="command-nightly", messages=messages, logger_fn=logger_fn)
|
||||
# Add any assertions here to check the response
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
def test_embedding_openai():
|
||||
try:
|
||||
response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue