forked from phoenix/litellm-mirror
fix(anthropic/chat/transformation.py): add json schema as values: json_schema
fixes passing pydantic obj to anthropic Fixes https://github.com/BerriAI/litellm/issues/6766
This commit is contained in:
parent
7550aba474
commit
f4ec93fbc3
4 changed files with 63 additions and 50 deletions
|
@ -374,7 +374,7 @@ class AnthropicConfig:
|
|||
_input_schema["additionalProperties"] = True
|
||||
_input_schema["properties"] = {}
|
||||
else:
|
||||
_input_schema["properties"] = json_schema
|
||||
_input_schema["properties"] = {"values": json_schema}
|
||||
|
||||
_tool = AnthropicMessagesTool(name="json_tool_call", input_schema=_input_schema)
|
||||
return _tool
|
||||
|
|
|
@ -1884,7 +1884,8 @@
|
|||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 264,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_prompt_caching": true
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"claude-3-5-haiku-20241022": {
|
||||
"max_tokens": 8192,
|
||||
|
@ -1900,7 +1901,8 @@
|
|||
"tool_use_system_prompt_tokens": 264,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_pdf_input": true
|
||||
"supports_pdf_input": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"claude-3-opus-20240229": {
|
||||
"max_tokens": 4096,
|
||||
|
@ -1916,7 +1918,8 @@
|
|||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 395,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_prompt_caching": true
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"claude-3-sonnet-20240229": {
|
||||
"max_tokens": 4096,
|
||||
|
@ -1930,7 +1933,8 @@
|
|||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 159,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_prompt_caching": true
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"claude-3-5-sonnet-20240620": {
|
||||
"max_tokens": 8192,
|
||||
|
@ -1946,7 +1950,8 @@
|
|||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 159,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_prompt_caching": true
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"claude-3-5-sonnet-20241022": {
|
||||
"max_tokens": 8192,
|
||||
|
@ -1962,7 +1967,8 @@
|
|||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 159,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_prompt_caching": true
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"text-bison": {
|
||||
"max_tokens": 2048,
|
||||
|
@ -3852,22 +3858,6 @@
|
|||
"supports_function_calling": true,
|
||||
"tool_use_system_prompt_tokens": 264
|
||||
},
|
||||
"anthropic/claude-3-5-sonnet-20241022": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 8192,
|
||||
"input_cost_per_token": 0.000003,
|
||||
"output_cost_per_token": 0.000015,
|
||||
"cache_creation_input_token_cost": 0.00000375,
|
||||
"cache_read_input_token_cost": 0.0000003,
|
||||
"litellm_provider": "anthropic",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 159,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"openrouter/anthropic/claude-3.5-sonnet": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 200000,
|
||||
|
|
|
@ -1884,7 +1884,8 @@
|
|||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 264,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_prompt_caching": true
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"claude-3-5-haiku-20241022": {
|
||||
"max_tokens": 8192,
|
||||
|
@ -1900,7 +1901,8 @@
|
|||
"tool_use_system_prompt_tokens": 264,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_pdf_input": true
|
||||
"supports_pdf_input": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"claude-3-opus-20240229": {
|
||||
"max_tokens": 4096,
|
||||
|
@ -1916,7 +1918,8 @@
|
|||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 395,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_prompt_caching": true
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"claude-3-sonnet-20240229": {
|
||||
"max_tokens": 4096,
|
||||
|
@ -1930,7 +1933,8 @@
|
|||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 159,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_prompt_caching": true
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"claude-3-5-sonnet-20240620": {
|
||||
"max_tokens": 8192,
|
||||
|
@ -1946,7 +1950,8 @@
|
|||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 159,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_prompt_caching": true
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"claude-3-5-sonnet-20241022": {
|
||||
"max_tokens": 8192,
|
||||
|
@ -1962,7 +1967,8 @@
|
|||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 159,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_prompt_caching": true
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"text-bison": {
|
||||
"max_tokens": 2048,
|
||||
|
@ -3852,22 +3858,6 @@
|
|||
"supports_function_calling": true,
|
||||
"tool_use_system_prompt_tokens": 264
|
||||
},
|
||||
"anthropic/claude-3-5-sonnet-20241022": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 8192,
|
||||
"input_cost_per_token": 0.000003,
|
||||
"output_cost_per_token": 0.000015,
|
||||
"cache_creation_input_token_cost": 0.00000375,
|
||||
"cache_read_input_token_cost": 0.0000003,
|
||||
"litellm_provider": "anthropic",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 159,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"openrouter/anthropic/claude-3.5-sonnet": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 200000,
|
||||
|
|
|
@ -42,11 +42,14 @@ class BaseLLMChatTest(ABC):
|
|||
"content": [{"type": "text", "text": "Hello, how are you?"}],
|
||||
}
|
||||
]
|
||||
response = litellm.completion(
|
||||
**base_completion_call_args,
|
||||
messages=messages,
|
||||
)
|
||||
assert response is not None
|
||||
try:
|
||||
response = litellm.completion(
|
||||
**base_completion_call_args,
|
||||
messages=messages,
|
||||
)
|
||||
assert response is not None
|
||||
except litellm.InternalServerError:
|
||||
pass
|
||||
|
||||
# for OpenAI the content contains the JSON schema, so we need to assert that the content is not None
|
||||
assert response.choices[0].message.content is not None
|
||||
|
@ -89,6 +92,36 @@ class BaseLLMChatTest(ABC):
|
|||
# relevant issue: https://github.com/BerriAI/litellm/issues/6741
|
||||
assert response.choices[0].message.content is not None
|
||||
|
||||
def test_json_response_pydantic_obj(self):
|
||||
from pydantic import BaseModel
|
||||
from litellm.utils import supports_response_schema
|
||||
|
||||
os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
|
||||
litellm.model_cost = litellm.get_model_cost_map(url="")
|
||||
|
||||
class TestModel(BaseModel):
|
||||
first_response: str
|
||||
|
||||
base_completion_call_args = self.get_base_completion_call_args()
|
||||
if not supports_response_schema(base_completion_call_args["model"], None):
|
||||
pytest.skip("Model does not support response schema")
|
||||
|
||||
try:
|
||||
res = litellm.completion(
|
||||
**base_completion_call_args,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "What is the capital of France?",
|
||||
},
|
||||
],
|
||||
response_format=TestModel,
|
||||
)
|
||||
assert res is not None
|
||||
except litellm.InternalServerError:
|
||||
pytest.skip("Model is overloaded")
|
||||
|
||||
def test_json_response_format_stream(self):
|
||||
"""
|
||||
Test that the JSON response format with streaming is supported by the LLM API
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue