test_json_response_format_stream

This commit is contained in:
Ishaan Jaff 2024-11-14 11:32:52 -08:00
parent eaf5723b94
commit f5f36fb96c

View file

@ -86,6 +86,45 @@ class BaseLLMChatTest(ABC):
# relevant issue: https://github.com/BerriAI/litellm/issues/6741
assert response.choices[0].message.content is not None
def test_json_response_format_stream(self):
"""
Test that the JSON response format with streaming is supported by the LLM API
"""
base_completion_call_args = self.get_base_completion_call_args()
litellm.set_verbose = True
messages = [
{
"role": "system",
"content": "Your output should be a JSON object with no additional properties. ",
},
{
"role": "user",
"content": "Respond with this in json. city=San Francisco, state=CA, weather=sunny, temp=60",
},
]
response = litellm.completion(
**base_completion_call_args,
messages=messages,
response_format={"type": "json_object"},
stream=True,
)
print(response)
content = ""
for chunk in response:
content += chunk.choices[0].delta.content or ""
print("content=", content)
# OpenAI guarantees that the JSON schema is returned in the content
# relevant issue: https://github.com/BerriAI/litellm/issues/6741
# we need to assert that the JSON schema was returned in the content, (for Anthropic we were returning it as part of the tool call)
assert content is not None
assert len(content) > 0
@pytest.fixture
def pdf_messages(self):
import base64