diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index 76bf2d548a..11fdde95d0 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/integrations/__pycache__/helicone.cpython-311.pyc b/litellm/integrations/__pycache__/helicone.cpython-311.pyc index 731a7e377f..27ab716961 100644 Binary files a/litellm/integrations/__pycache__/helicone.cpython-311.pyc and b/litellm/integrations/__pycache__/helicone.cpython-311.pyc differ diff --git a/litellm/integrations/helicone.py b/litellm/integrations/helicone.py index 559481daa7..ec4e9b634d 100644 --- a/litellm/integrations/helicone.py +++ b/litellm/integrations/helicone.py @@ -2,6 +2,7 @@ # On success, logs events to Helicone import dotenv, os import requests +from anthropic import HUMAN_PROMPT, AI_PROMPT dotenv.load_dotenv() # Loading env variables using dotenv import traceback class HeliconeLogger: @@ -11,6 +12,23 @@ class HeliconeLogger: # Instance variables self.provider_url = "https://api.openai.com/v1" self.key = os.getenv('HELICONE_API_KEY') + + def claude_mapping(self, model, messages, response_obj): + prompt = f"{HUMAN_PROMPT}" + for message in messages: + if "role" in message: + if message["role"] == "user": + prompt += f"{HUMAN_PROMPT}{message['content']}" + else: + prompt += f"{AI_PROMPT}{message['content']}" + else: + prompt += f"{HUMAN_PROMPT}{message['content']}" + prompt += f"{AI_PROMPT}" + claude_provider_request = {"model": model, "prompt": prompt} + + claude_response_obj = {"completion": response_obj['choices'][0]['message']['content'], "model": model, "stop_reason": "stop_sequence"} + + return claude_provider_request, claude_response_obj def log_success(self, model, messages, response_obj, start_time, end_time): # Method definition @@ -18,6 +36,9 @@ class HeliconeLogger: model = model if any(accepted_model in model for accepted_model in self.helicone_model_list) else "gpt-3.5-turbo" provider_request = {"model": model, "messages": messages} + if "claude" in model: + provider_request, response_obj = self.claude_mapping(model=model, messages=messages, response_obj=response_obj) + providerResponse = { "json": response_obj, "headers": {"openai-version": "2020-10-01"}, diff --git a/litellm/tests/test_client.py b/litellm/tests/test_client.py index 1b95914cb7..d791f10ebd 100644 --- a/litellm/tests/test_client.py +++ b/litellm/tests/test_client.py @@ -30,14 +30,22 @@ def test_completion_openai(): except Exception as e: traceback.print_exc() pytest.fail(f"Error occurred: {e}") -test_completion_openai() -def test_completion_non_openai(): + + +def test_completion_claude(): try: response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn) # Add any assertions here to check the response except Exception as e: pytest.fail(f"Error occurred: {e}") +def test_completion_non_openai(): + try: + response = completion(model="command-nightly", messages=messages, logger_fn=logger_fn) + # Add any assertions here to check the response + except Exception as e: + pytest.fail(f"Error occurred: {e}") + def test_embedding_openai(): try: response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn)