diff --git a/litellm/llms/anthropic.py b/litellm/llms/anthropic.py index cff0dad359..8e4879e014 100644 --- a/litellm/llms/anthropic.py +++ b/litellm/llms/anthropic.py @@ -12,6 +12,7 @@ import requests # type: ignore import litellm import litellm.litellm_core_utils +from litellm import verbose_logger from litellm.litellm_core_utils.core_helpers import map_finish_reason from litellm.llms.custom_httpx.http_handler import ( AsyncHTTPHandler, @@ -730,6 +731,7 @@ class ModelResponseIterator: def chunk_parser(self, chunk: dict) -> GenericStreamingChunk: try: + verbose_logger.debug(f"\n\nRaw chunk:\n{chunk}\n") type_chunk = chunk.get("type", "") or "" text = "" @@ -770,9 +772,7 @@ class ModelResponseIterator: "type": "function", "function": { "name": content_block_start["content_block"]["name"], - "arguments": json.dumps( - content_block_start["content_block"]["input"] - ), + "arguments": "", }, } elif type_chunk == "message_delta": diff --git a/litellm/proxy/_experimental/out/404.html b/litellm/proxy/_experimental/out/404.html deleted file mode 100644 index d0feae1123..0000000000 --- a/litellm/proxy/_experimental/out/404.html +++ /dev/null @@ -1 +0,0 @@ -404: This page could not be found.LiteLLM Dashboard

404

This page could not be found.

\ No newline at end of file diff --git a/litellm/proxy/_experimental/out/model_hub.html b/litellm/proxy/_experimental/out/model_hub.html deleted file mode 100644 index 17b882a7cd..0000000000 --- a/litellm/proxy/_experimental/out/model_hub.html +++ /dev/null @@ -1 +0,0 @@ -LiteLLM Dashboard \ No newline at end of file diff --git a/litellm/proxy/_experimental/out/onboarding.html b/litellm/proxy/_experimental/out/onboarding.html deleted file mode 100644 index d91b817622..0000000000 --- a/litellm/proxy/_experimental/out/onboarding.html +++ /dev/null @@ -1 +0,0 @@ -LiteLLM Dashboard \ No newline at end of file diff --git a/litellm/proxy/_super_secret_config.yaml b/litellm/proxy/_super_secret_config.yaml index e5fdbf802e..b22f506c69 100644 --- a/litellm/proxy/_super_secret_config.yaml +++ b/litellm/proxy/_super_secret_config.yaml @@ -38,6 +38,9 @@ model_list: - litellm_params: model: anthropic.claude-3-sonnet-20240229-v1:0 model_name: bedrock-anthropic-claude-3 +- litellm_params: + model: claude-3-haiku-20240307 + model_name: anthropic-claude-3 - litellm_params: api_base: https://openai-gpt-4-test-v-1.openai.azure.com/ api_key: os.environ/AZURE_API_KEY diff --git a/litellm/tests/test_streaming.py b/litellm/tests/test_streaming.py index d664a69bc5..b2e2b29b71 100644 --- a/litellm/tests/test_streaming.py +++ b/litellm/tests/test_streaming.py @@ -2559,9 +2559,16 @@ def streaming_and_function_calling_format_tests(idx, chunk): @pytest.mark.parametrize( - "model", ["gpt-3.5-turbo", "anthropic.claude-3-sonnet-20240229-v1:0"] + "model", + [ + "gpt-3.5-turbo", + "anthropic.claude-3-sonnet-20240229-v1:0", + "claude-3-haiku-20240307", + ], ) def test_streaming_and_function_calling(model): + import json + tools = [ { "type": "function", @@ -2594,6 +2601,7 @@ def test_streaming_and_function_calling(model): tool_choice="required", ) # type: ignore # Add any assertions here to check the response + json_str = "" for idx, chunk in enumerate(response): # continue print("\n{}\n".format(chunk)) @@ -2604,7 +2612,10 @@ def test_streaming_and_function_calling(model): assert isinstance( chunk.choices[0].delta.tool_calls[0].function.arguments, str ) - # assert False + if chunk.choices[0].delta.tool_calls is not None: + json_str += chunk.choices[0].delta.tool_calls[0].function.arguments + + print(json.loads(json_str)) except Exception as e: pytest.fail(f"Error occurred: {e}") raise e