mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
BaseReasoningLLMTests
This commit is contained in:
parent
5856fdf99d
commit
cab2f399d2
1 changed files with 45 additions and 1 deletions
|
@ -1401,7 +1401,13 @@ class BaseAnthropicChatTest(ABC):
|
||||||
assert "reasoning_effort" not in optional_params
|
assert "reasoning_effort" not in optional_params
|
||||||
|
|
||||||
|
|
||||||
class BaseReasoningEffortTests(ABC):
|
class BaseReasoningLLMTests(ABC):
|
||||||
|
"""
|
||||||
|
Base class for testing reasoning llms
|
||||||
|
|
||||||
|
- test that the responses contain reasoning_content
|
||||||
|
- test that the usage contains reasoning_tokens
|
||||||
|
"""
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_base_completion_call_args(self) -> dict:
|
def get_base_completion_call_args(self) -> dict:
|
||||||
"""Must return the base completion call args"""
|
"""Must return the base completion call args"""
|
||||||
|
@ -1415,6 +1421,9 @@ class BaseReasoningEffortTests(ABC):
|
||||||
def test_non_streaming_reasoning_effort(self):
|
def test_non_streaming_reasoning_effort(self):
|
||||||
"""
|
"""
|
||||||
Base test for non-streaming reasoning effort
|
Base test for non-streaming reasoning effort
|
||||||
|
|
||||||
|
- Assert that `reasoning_content` is not None from response message
|
||||||
|
- Assert that `reasoning_tokens` is greater than 0 from usage
|
||||||
"""
|
"""
|
||||||
litellm._turn_on_debug()
|
litellm._turn_on_debug()
|
||||||
base_completion_call_args = self.get_base_completion_call_args()
|
base_completion_call_args = self.get_base_completion_call_args()
|
||||||
|
@ -1428,3 +1437,38 @@ class BaseReasoningEffortTests(ABC):
|
||||||
assert response.usage.completion_tokens_details.reasoning_tokens > 0
|
assert response.usage.completion_tokens_details.reasoning_tokens > 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_streaming_reasoning_effort(self):
|
||||||
|
"""
|
||||||
|
Base test for streaming reasoning effort
|
||||||
|
|
||||||
|
- Assert that `reasoning_content` is not None from streaming response
|
||||||
|
- Assert that `reasoning_tokens` is greater than 0 from usage
|
||||||
|
"""
|
||||||
|
#litellm._turn_on_debug()
|
||||||
|
base_completion_call_args = self.get_base_completion_call_args()
|
||||||
|
response: CustomStreamWrapper = self.completion_function(
|
||||||
|
**base_completion_call_args,
|
||||||
|
reasoning_effort="low",
|
||||||
|
stream=True,
|
||||||
|
stream_options={
|
||||||
|
"include_usage": True
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
resoning_content: str = ""
|
||||||
|
usage: Usage = None
|
||||||
|
for chunk in response:
|
||||||
|
print(chunk)
|
||||||
|
if hasattr(chunk.choices[0].delta, "reasoning_content"):
|
||||||
|
resoning_content += chunk.choices[0].delta.reasoning_content
|
||||||
|
if hasattr(chunk, "usage"):
|
||||||
|
usage = chunk.usage
|
||||||
|
|
||||||
|
assert resoning_content is not None
|
||||||
|
assert len(resoning_content) > 0
|
||||||
|
|
||||||
|
print(f"usage: {usage}")
|
||||||
|
assert usage.completion_tokens_details.reasoning_tokens > 0
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue