diff --git a/litellm/llms/anthropic.py b/litellm/llms/anthropic.py index 629197d51..d3a3c38a4 100644 --- a/litellm/llms/anthropic.py +++ b/litellm/llms/anthropic.py @@ -780,8 +780,17 @@ class AnthropicChatCompletion(BaseLLM): system_prompt = "" for idx, message in enumerate(messages): if message["role"] == "system": - system_prompt += message["content"] - system_prompt_indices.append(idx) + valid_content: bool = False + if isinstance(message["content"], str): + system_prompt += message["content"] + valid_content = True + elif isinstance(message["content"], list): + for content in message["content"]: + system_prompt += content.get("text", "") + valid_content = True + + if valid_content: + system_prompt_indices.append(idx) if len(system_prompt_indices) > 0: for idx in reversed(system_prompt_indices): messages.pop(idx) diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index a1af38379..7e3c9a241 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -1,8 +1,7 @@ model_list: - - model_name: groq-llama3 + - model_name: anthropic-claude litellm_params: - model: groq/llama3-groq-70b-8192-tool-use-preview - api_key: os.environ/GROQ_API_KEY + model: claude-3-haiku-20240307 litellm_settings: callbacks: ["logfire"] diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index c2ce836ef..31b7b8355 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -346,7 +346,7 @@ def test_completion_claude_3_empty_response(): messages = [ { "role": "system", - "content": "You are 2twNLGfqk4GMOn3ffp4p.", + "content": [{"type": "text", "text": "You are 2twNLGfqk4GMOn3ffp4p."}], }, {"role": "user", "content": "Hi gm!", "name": "ishaan"}, {"role": "assistant", "content": "Good morning! How are you doing today?"},