Add anthropic thinking + reasoning content support (#8778)

* feat(anthropic/chat/transformation.py): add anthropic thinking param support

* feat(anthropic/chat/transformation.py): support returning thinking content for anthropic on streaming responses

* feat(anthropic/chat/transformation.py): return list of thinking blocks (include block signature)

allows usage in tool call responses

* fix(types/utils.py): extract and map reasoning_content from anthropic as content str

* test: add testing to ensure thinking_blocks are returned at the root

* fix(anthropic/chat/handler.py): return thinking blocks on streaming - include signature

* feat(factory.py): handle anthropic thinking blocks translation if in assistant response

* test: handle openai internal instability

* test: handle openai audio instability

* ci: pin anthropic dep

* test: handle openai audio instability

* fix: fix linting error

* refactor(anthropic/chat/transformation.py): refactor function to remain <50 LOC

* fix: fix linting error

* fix: fix linting error

* fix: fix linting error

* fix: fix linting error
This commit is contained in:
Krish Dholakia 2025-02-24 21:54:30 -08:00 committed by GitHub
parent 9914c166b7
commit 142b195784
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 332 additions and 62 deletions

View file

@ -1320,13 +1320,19 @@ def test_standard_logging_payload_audio(turn_off_message_logging, stream):
with patch.object(
customHandler, "log_success_event", new=MagicMock()
) as mock_client:
response = litellm.completion(
model="gpt-4o-audio-preview",
modalities=["text", "audio"],
audio={"voice": "alloy", "format": "pcm16"},
messages=[{"role": "user", "content": "response in 1 word - yes or no"}],
stream=stream,
)
try:
response = litellm.completion(
model="gpt-4o-audio-preview",
modalities=["text", "audio"],
audio={"voice": "alloy", "format": "pcm16"},
messages=[
{"role": "user", "content": "response in 1 word - yes or no"}
],
stream=stream,
)
except Exception as e:
if "openai-internal" in str(e):
pytest.skip("Skipping test due to openai-internal error")
if stream:
for chunk in response: