From 50d2510b60fd7b825bb1a5d79ba3bb741cfccc1b Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Thu, 21 Nov 2024 23:44:40 +0530 Subject: [PATCH 1/6] test: cleanup mistral model --- tests/local_testing/test_router.py | 2 +- tests/local_testing/test_streaming.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/local_testing/test_router.py b/tests/local_testing/test_router.py index cd5e8f6b2..20867e766 100644 --- a/tests/local_testing/test_router.py +++ b/tests/local_testing/test_router.py @@ -1450,7 +1450,7 @@ async def test_mistral_on_router(): { "model_name": "gpt-3.5-turbo", "litellm_params": { - "model": "mistral/mistral-medium", + "model": "mistral/mistral-small-latest", }, }, ] diff --git a/tests/local_testing/test_streaming.py b/tests/local_testing/test_streaming.py index 0bc6953f9..757ff4d61 100644 --- a/tests/local_testing/test_streaming.py +++ b/tests/local_testing/test_streaming.py @@ -683,7 +683,7 @@ def test_completion_ollama_hosted_stream(): [ # "claude-3-5-haiku-20241022", # "claude-2", - # "mistral/mistral-medium", + # "mistral/mistral-small-latest", "openrouter/openai/gpt-4o-mini", ], ) From a7d55368722436c86c8ede406543088bd353bf7c Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 21 Nov 2024 11:46:50 -0800 Subject: [PATCH 2/6] (fix) passthrough - allow internal users to access /anthropic (#6843) * fix /anthropic/ * test llm_passthrough_router * fix test_gemini_pass_through_endpoint --- litellm/proxy/auth/route_checks.py | 4 ++++ .../llm_passthrough_endpoints.py} | 4 +--- litellm/proxy/proxy_server.py | 8 ++++---- .../test_route_check_unit_tests.py | 12 ++++++++++++ tests/proxy_unit_tests/test_proxy_server.py | 2 +- 5 files changed, 22 insertions(+), 8 deletions(-) rename litellm/proxy/{vertex_ai_endpoints/google_ai_studio_endpoints.py => pass_through_endpoints/llm_passthrough_endpoints.py} (98%) diff --git a/litellm/proxy/auth/route_checks.py b/litellm/proxy/auth/route_checks.py index c75c1e66c..9496776a8 100644 --- a/litellm/proxy/auth/route_checks.py +++ b/litellm/proxy/auth/route_checks.py @@ -192,6 +192,10 @@ class RouteChecks: return True if "/langfuse/" in route: return True + if "/anthropic/" in route: + return True + if "/azure/" in route: + return True return False @staticmethod diff --git a/litellm/proxy/vertex_ai_endpoints/google_ai_studio_endpoints.py b/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py similarity index 98% rename from litellm/proxy/vertex_ai_endpoints/google_ai_studio_endpoints.py rename to litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py index c4a64fa21..0834102b3 100644 --- a/litellm/proxy/vertex_ai_endpoints/google_ai_studio_endpoints.py +++ b/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py @@ -2,10 +2,8 @@ What is this? Provider-specific Pass-Through Endpoints -""" -""" -1. Create pass-through endpoints for any LITELLM_BASE_URL/gemini/ map to https://generativelanguage.googleapis.com/ +Use litellm with Anthropic SDK, Vertex AI SDK, Cohere SDK, etc. """ import ast diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 1551330d1..9d7c120a7 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -203,6 +203,9 @@ from litellm.proxy.openai_files_endpoints.files_endpoints import ( router as openai_files_router, ) from litellm.proxy.openai_files_endpoints.files_endpoints import set_files_config +from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import ( + router as llm_passthrough_router, +) from litellm.proxy.pass_through_endpoints.pass_through_endpoints import ( initialize_pass_through_endpoints, ) @@ -233,9 +236,6 @@ from litellm.proxy.utils import ( reset_budget, update_spend, ) -from litellm.proxy.vertex_ai_endpoints.google_ai_studio_endpoints import ( - router as gemini_router, -) from litellm.proxy.vertex_ai_endpoints.langfuse_endpoints import ( router as langfuse_router, ) @@ -9128,7 +9128,7 @@ app.include_router(router) app.include_router(rerank_router) app.include_router(fine_tuning_router) app.include_router(vertex_router) -app.include_router(gemini_router) +app.include_router(llm_passthrough_router) app.include_router(langfuse_router) app.include_router(pass_through_router) app.include_router(health_router) diff --git a/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py b/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py index 001cc0640..a8bba211f 100644 --- a/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py +++ b/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py @@ -27,6 +27,9 @@ from fastapi import HTTPException, Request import pytest from litellm.proxy.auth.route_checks import RouteChecks from litellm.proxy._types import LiteLLM_UserTable, LitellmUserRoles, UserAPIKeyAuth +from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import ( + router as llm_passthrough_router, +) # Replace the actual hash_token function with our mock import litellm.proxy.auth.route_checks @@ -56,12 +59,21 @@ def test_is_llm_api_route(): assert RouteChecks.is_llm_api_route("/vertex-ai/text") is True assert RouteChecks.is_llm_api_route("/gemini/generate") is True assert RouteChecks.is_llm_api_route("/cohere/generate") is True + assert RouteChecks.is_llm_api_route("/anthropic/messages") is True + assert RouteChecks.is_llm_api_route("/anthropic/v1/messages") is True + assert RouteChecks.is_llm_api_route("/azure/endpoint") is True # check non-matching routes assert RouteChecks.is_llm_api_route("/some/random/route") is False assert RouteChecks.is_llm_api_route("/key/regenerate/82akk800000000jjsk") is False assert RouteChecks.is_llm_api_route("/key/82akk800000000jjsk/delete") is False + # check all routes in llm_passthrough_router, ensure they are considered llm api routes + for route in llm_passthrough_router.routes: + route_path = str(route.path) + print("route_path", route_path) + assert RouteChecks.is_llm_api_route(route_path) is True + # Test _route_matches_pattern def test_route_matches_pattern(): diff --git a/tests/proxy_unit_tests/test_proxy_server.py b/tests/proxy_unit_tests/test_proxy_server.py index b1c00ce75..d70962858 100644 --- a/tests/proxy_unit_tests/test_proxy_server.py +++ b/tests/proxy_unit_tests/test_proxy_server.py @@ -1794,7 +1794,7 @@ async def test_add_callback_via_key_litellm_pre_call_utils_langsmith( async def test_gemini_pass_through_endpoint(): from starlette.datastructures import URL - from litellm.proxy.vertex_ai_endpoints.google_ai_studio_endpoints import ( + from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import ( Request, Response, gemini_proxy_route, From 7e5085dc7b0219686282f3fe510300f1e8134dc2 Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Fri, 22 Nov 2024 01:53:52 +0530 Subject: [PATCH 3/6] Litellm dev 11 21 2024 (#6837) * Fix Vertex AI function calling invoke: use JSON format instead of protobuf text format. (#6702) * test: test tool_call conversion when arguments is empty dict Fixes https://github.com/BerriAI/litellm/issues/6833 * fix(openai_like/handler.py): return more descriptive error message Fixes https://github.com/BerriAI/litellm/issues/6812 * test: skip overloaded model * docs(anthropic.md): update anthropic docs to show how to route to any new model * feat(groq/): fake stream when 'response_format' param is passed Groq doesn't support streaming when response_format is set * feat(groq/): add response_format support for groq Closes https://github.com/BerriAI/litellm/issues/6845 * fix(o1_handler.py): remove fake streaming for o1 Closes https://github.com/BerriAI/litellm/issues/6801 * build(model_prices_and_context_window.json): add groq llama3.2b model pricing Closes https://github.com/BerriAI/litellm/issues/6807 * fix(utils.py): fix handling ollama response format param Fixes https://github.com/BerriAI/litellm/issues/6848#issuecomment-2491215485 * docs(sidebars.js): refactor chat endpoint placement * fix: fix linting errors * test: fix test * test: fix test * fix(openai_like/handler): handle max retries * fix(streaming_handler.py): fix streaming check for openai-compatible providers * test: update test * test: correctly handle model is overloaded error * test: update test * test: fix test * test: mark flaky test --------- Co-authored-by: Guowang Li --- .../docs/embedding/supported_embedding.md | 2 +- docs/my-website/docs/image_generation.md | 2 +- docs/my-website/docs/providers/anthropic.md | 46 +++++--- docs/my-website/sidebars.js | 74 ++++++------ .../litellm_core_utils/streaming_handler.py | 2 +- litellm/llms/OpenAI/chat/o1_handler.py | 36 +----- litellm/llms/groq/chat/handler.py | 79 +++++++------ litellm/llms/groq/chat/transformation.py | 74 +++++++++++- litellm/llms/ollama.py | 24 ++++ litellm/llms/openai_like/chat/handler.py | 108 ++++++++++++------ .../llms/openai_like/chat/transformation.py | 98 ++++++++++++++++ litellm/llms/openai_like/embedding/handler.py | 2 +- litellm/llms/prompt_templates/factory.py | 79 +++---------- litellm/llms/watsonx/chat/handler.py | 6 +- litellm/main.py | 3 +- ...odel_prices_and_context_window_backup.json | 99 ++++++++++++++-- litellm/proxy/_new_secret_config.yaml | 1 - litellm/types/llms/vertex_ai.py | 13 +-- litellm/utils.py | 84 ++++++-------- model_prices_and_context_window.json | 99 ++++++++++++++-- tests/llm_translation/base_llm_unit_tests.py | 43 +++++-- .../test_anthropic_completion.py | 9 ++ .../test_deepseek_completion.py | 4 + tests/llm_translation/test_groq.py | 12 ++ tests/llm_translation/test_mistral_api.py | 4 + tests/llm_translation/test_optional_params.py | 14 +++ tests/llm_translation/test_vertex.py | 97 +++++----------- .../test_amazing_vertex_completion.py | 31 ++--- tests/local_testing/test_ollama.py | 3 +- .../test_router_batch_completion.py | 1 + tests/local_testing/test_utils.py | 1 + 31 files changed, 747 insertions(+), 403 deletions(-) create mode 100644 litellm/llms/openai_like/chat/transformation.py create mode 100644 tests/llm_translation/test_groq.py diff --git a/docs/my-website/docs/embedding/supported_embedding.md b/docs/my-website/docs/embedding/supported_embedding.md index 5250ea403..603e04dd9 100644 --- a/docs/my-website/docs/embedding/supported_embedding.md +++ b/docs/my-website/docs/embedding/supported_embedding.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Embedding Models +# Embeddings ## Quick Start ```python diff --git a/docs/my-website/docs/image_generation.md b/docs/my-website/docs/image_generation.md index 5a7ef6f4f..958ff4c02 100644 --- a/docs/my-website/docs/image_generation.md +++ b/docs/my-website/docs/image_generation.md @@ -1,4 +1,4 @@ -# Image Generation +# Images ## Quick Start diff --git a/docs/my-website/docs/providers/anthropic.md b/docs/my-website/docs/providers/anthropic.md index d4660b807..b3bfe333c 100644 --- a/docs/my-website/docs/providers/anthropic.md +++ b/docs/my-website/docs/providers/anthropic.md @@ -10,6 +10,35 @@ LiteLLM supports all anthropic models. - `claude-2.1` - `claude-instant-1.2` + +| Property | Details | +|-------|-------| +| Description | Claude is a highly performant, trustworthy, and intelligent AI platform built by Anthropic. Claude excels at tasks involving language, reasoning, analysis, coding, and more. | +| Provider Route on LiteLLM | `anthropic/` (add this prefix to the model name, to route any requests to Anthropic - e.g. `anthropic/claude-3-5-sonnet-20240620`) | +| Provider Doc | [Anthropic ↗](https://docs.anthropic.com/en/docs/build-with-claude/overview) | +| API Endpoint for Provider | https://api.anthropic.com | +| Supported Endpoints | `/chat/completions` | + + +## Supported OpenAI Parameters + +Check this in code, [here](../completion/input.md#translated-openai-params) + +``` +"stream", +"stop", +"temperature", +"top_p", +"max_tokens", +"max_completion_tokens", +"tools", +"tool_choice", +"extra_headers", +"parallel_tool_calls", +"response_format", +"user" +``` + :::info Anthropic API fails requests when `max_tokens` are not passed. Due to this litellm passes `max_tokens=4096` when no `max_tokens` are passed. @@ -1006,20 +1035,3 @@ curl http://0.0.0.0:4000/v1/chat/completions \ - -## All Supported OpenAI Params - -``` -"stream", -"stop", -"temperature", -"top_p", -"max_tokens", -"max_completion_tokens", -"tools", -"tool_choice", -"extra_headers", -"parallel_tool_calls", -"response_format", -"user" -``` \ No newline at end of file diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index 50cc83c08..f01402299 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -199,46 +199,52 @@ const sidebars = { ], }, - { - type: "category", - label: "Guides", - link: { - type: "generated-index", - title: "Chat Completions", - description: "Details on the completion() function", - slug: "/completion", - }, - items: [ - "completion/input", - "completion/provider_specific_params", - "completion/json_mode", - "completion/prompt_caching", - "completion/audio", - "completion/vision", - "completion/predict_outputs", - "completion/prefix", - "completion/drop_params", - "completion/prompt_formatting", - "completion/output", - "completion/usage", - "exception_mapping", - "completion/stream", - "completion/message_trimming", - "completion/function_call", - "completion/model_alias", - "completion/batching", - "completion/mock_requests", - "completion/reliable_completions", - ], - }, { type: "category", label: "Supported Endpoints", items: [ + { + type: "category", + label: "Chat", + link: { + type: "generated-index", + title: "Chat Completions", + description: "Details on the completion() function", + slug: "/completion", + }, + items: [ + "completion/input", + "completion/provider_specific_params", + "completion/json_mode", + "completion/prompt_caching", + "completion/audio", + "completion/vision", + "completion/predict_outputs", + "completion/prefix", + "completion/drop_params", + "completion/prompt_formatting", + "completion/output", + "completion/usage", + "exception_mapping", + "completion/stream", + "completion/message_trimming", + "completion/function_call", + "completion/model_alias", + "completion/batching", + "completion/mock_requests", + "completion/reliable_completions", + ], + }, "embedding/supported_embedding", "image_generation", - "audio_transcription", - "text_to_speech", + { + type: "category", + label: "Audio", + "items": [ + "audio_transcription", + "text_to_speech", + ] + }, "rerank", "assistants", "batches", diff --git a/litellm/litellm_core_utils/streaming_handler.py b/litellm/litellm_core_utils/streaming_handler.py index 5c18ff512..483121c38 100644 --- a/litellm/litellm_core_utils/streaming_handler.py +++ b/litellm/litellm_core_utils/streaming_handler.py @@ -1793,7 +1793,7 @@ class CustomStreamWrapper: or self.custom_llm_provider == "bedrock" or self.custom_llm_provider == "triton" or self.custom_llm_provider == "watsonx" - or self.custom_llm_provider in litellm.openai_compatible_endpoints + or self.custom_llm_provider in litellm.openai_compatible_providers or self.custom_llm_provider in litellm._custom_providers ): async for chunk in self.completion_stream: diff --git a/litellm/llms/OpenAI/chat/o1_handler.py b/litellm/llms/OpenAI/chat/o1_handler.py index 55dfe3715..5ff53a896 100644 --- a/litellm/llms/OpenAI/chat/o1_handler.py +++ b/litellm/llms/OpenAI/chat/o1_handler.py @@ -17,22 +17,6 @@ from litellm.utils import CustomStreamWrapper class OpenAIO1ChatCompletion(OpenAIChatCompletion): - async def mock_async_streaming( - self, - response: Any, - model: Optional[str], - logging_obj: Any, - ): - model_response = await response - completion_stream = MockResponseIterator(model_response=model_response) - streaming_response = CustomStreamWrapper( - completion_stream=completion_stream, - model=model, - custom_llm_provider="openai", - logging_obj=logging_obj, - ) - return streaming_response - def completion( self, model_response: ModelResponse, @@ -54,7 +38,7 @@ class OpenAIO1ChatCompletion(OpenAIChatCompletion): custom_llm_provider: Optional[str] = None, drop_params: Optional[bool] = None, ): - stream: Optional[bool] = optional_params.pop("stream", False) + # stream: Optional[bool] = optional_params.pop("stream", False) response = super().completion( model_response, timeout, @@ -76,20 +60,4 @@ class OpenAIO1ChatCompletion(OpenAIChatCompletion): drop_params, ) - if stream is True: - if asyncio.iscoroutine(response): - return self.mock_async_streaming( - response=response, model=model, logging_obj=logging_obj # type: ignore - ) - - completion_stream = MockResponseIterator(model_response=response) - streaming_response = CustomStreamWrapper( - completion_stream=completion_stream, - model=model, - custom_llm_provider="openai", - logging_obj=logging_obj, - ) - - return streaming_response - else: - return response + return response diff --git a/litellm/llms/groq/chat/handler.py b/litellm/llms/groq/chat/handler.py index f4a16abc8..1fe87844c 100644 --- a/litellm/llms/groq/chat/handler.py +++ b/litellm/llms/groq/chat/handler.py @@ -6,55 +6,68 @@ from typing import Any, Callable, Optional, Union from httpx._config import Timeout +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler +from litellm.types.utils import CustomStreamingDecoder from litellm.utils import ModelResponse from ...groq.chat.transformation import GroqChatConfig -from ...OpenAI.openai import OpenAIChatCompletion +from ...openai_like.chat.handler import OpenAILikeChatHandler -class GroqChatCompletion(OpenAIChatCompletion): +class GroqChatCompletion(OpenAILikeChatHandler): def __init__(self, **kwargs): super().__init__(**kwargs) def completion( self, + *, + model: str, + messages: list, + api_base: str, + custom_llm_provider: str, + custom_prompt_dict: dict, model_response: ModelResponse, - timeout: Union[float, Timeout], + print_verbose: Callable, + encoding, + api_key: Optional[str], + logging_obj, optional_params: dict, - logging_obj: Any, - model: Optional[str] = None, - messages: Optional[list] = None, - print_verbose: Optional[Callable[..., Any]] = None, - api_key: Optional[str] = None, - api_base: Optional[str] = None, - acompletion: bool = False, + acompletion=None, litellm_params=None, logger_fn=None, headers: Optional[dict] = None, - custom_prompt_dict: dict = {}, - client=None, - organization: Optional[str] = None, - custom_llm_provider: Optional[str] = None, - drop_params: Optional[bool] = None, + timeout: Optional[Union[float, Timeout]] = None, + client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, + custom_endpoint: Optional[bool] = None, + streaming_decoder: Optional[CustomStreamingDecoder] = None, + fake_stream: bool = False ): messages = GroqChatConfig()._transform_messages(messages) # type: ignore + + if optional_params.get("stream") is True: + fake_stream = GroqChatConfig()._should_fake_stream(optional_params) + else: + fake_stream = False + return super().completion( - model_response, - timeout, - optional_params, - logging_obj, - model, - messages, - print_verbose, - api_key, - api_base, - acompletion, - litellm_params, - logger_fn, - headers, - custom_prompt_dict, - client, - organization, - custom_llm_provider, - drop_params, + model=model, + messages=messages, + api_base=api_base, + custom_llm_provider=custom_llm_provider, + custom_prompt_dict=custom_prompt_dict, + model_response=model_response, + print_verbose=print_verbose, + encoding=encoding, + api_key=api_key, + logging_obj=logging_obj, + optional_params=optional_params, + acompletion=acompletion, + litellm_params=litellm_params, + logger_fn=logger_fn, + headers=headers, + timeout=timeout, + client=client, + custom_endpoint=custom_endpoint, + streaming_decoder=streaming_decoder, + fake_stream=fake_stream, ) diff --git a/litellm/llms/groq/chat/transformation.py b/litellm/llms/groq/chat/transformation.py index 4baba7657..dddc56a2c 100644 --- a/litellm/llms/groq/chat/transformation.py +++ b/litellm/llms/groq/chat/transformation.py @@ -2,6 +2,7 @@ Translate from OpenAI's `/v1/chat/completions` to Groq's `/v1/chat/completions` """ +import json import types from typing import List, Optional, Tuple, Union @@ -9,7 +10,12 @@ from pydantic import BaseModel import litellm from litellm.secret_managers.main import get_secret_str -from litellm.types.llms.openai import AllMessageValues, ChatCompletionAssistantMessage +from litellm.types.llms.openai import ( + AllMessageValues, + ChatCompletionAssistantMessage, + ChatCompletionToolParam, + ChatCompletionToolParamFunctionChunk, +) from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig @@ -99,3 +105,69 @@ class GroqChatConfig(OpenAIGPTConfig): ) # type: ignore dynamic_api_key = api_key or get_secret_str("GROQ_API_KEY") return api_base, dynamic_api_key + + def _should_fake_stream(self, optional_params: dict) -> bool: + """ + Groq doesn't support 'response_format' while streaming + """ + if optional_params.get("response_format") is not None: + return True + + return False + + def _create_json_tool_call_for_response_format( + self, + json_schema: dict, + ): + """ + Handles creating a tool call for getting responses in JSON format. + + Args: + json_schema (Optional[dict]): The JSON schema the response should be in + + Returns: + AnthropicMessagesTool: The tool call to send to Anthropic API to get responses in JSON format + """ + return ChatCompletionToolParam( + type="function", + function=ChatCompletionToolParamFunctionChunk( + name="json_tool_call", + parameters=json_schema, + ), + ) + + def map_openai_params( + self, + non_default_params: dict, + optional_params: dict, + model: str, + drop_params: bool = False, + ) -> dict: + _response_format = non_default_params.get("response_format") + if _response_format is not None and isinstance(_response_format, dict): + json_schema: Optional[dict] = None + if "response_schema" in _response_format: + json_schema = _response_format["response_schema"] + elif "json_schema" in _response_format: + json_schema = _response_format["json_schema"]["schema"] + """ + When using tools in this way: - https://docs.anthropic.com/en/docs/build-with-claude/tool-use#json-mode + - You usually want to provide a single tool + - You should set tool_choice (see Forcing tool use) to instruct the model to explicitly use that tool + - Remember that the model will pass the input to the tool, so the name of the tool and description should be from the model’s perspective. + """ + if json_schema is not None: + _tool_choice = { + "type": "function", + "function": {"name": "json_tool_call"}, + } + _tool = self._create_json_tool_call_for_response_format( + json_schema=json_schema, + ) + optional_params["tools"] = [_tool] + optional_params["tool_choice"] = _tool_choice + optional_params["json_mode"] = True + non_default_params.pop("response_format", None) + return super().map_openai_params( + non_default_params, optional_params, model, drop_params + ) diff --git a/litellm/llms/ollama.py b/litellm/llms/ollama.py index 842d946c6..896b93be5 100644 --- a/litellm/llms/ollama.py +++ b/litellm/llms/ollama.py @@ -164,6 +164,30 @@ class OllamaConfig: "response_format", ] + def map_openai_params( + self, optional_params: dict, non_default_params: dict + ) -> dict: + for param, value in non_default_params.items(): + if param == "max_tokens": + optional_params["num_predict"] = value + if param == "stream": + optional_params["stream"] = value + if param == "temperature": + optional_params["temperature"] = value + if param == "seed": + optional_params["seed"] = value + if param == "top_p": + optional_params["top_p"] = value + if param == "frequency_penalty": + optional_params["repeat_penalty"] = value + if param == "stop": + optional_params["stop"] = value + if param == "response_format" and isinstance(value, dict): + if value["type"] == "json_object": + optional_params["format"] = "json" + + return optional_params + def _supports_function_calling(self, ollama_model_info: dict) -> bool: """ Check if the 'template' field in the ollama_model_info contains a 'tools' or 'function' key. diff --git a/litellm/llms/openai_like/chat/handler.py b/litellm/llms/openai_like/chat/handler.py index 0dbc3a978..baa970304 100644 --- a/litellm/llms/openai_like/chat/handler.py +++ b/litellm/llms/openai_like/chat/handler.py @@ -17,7 +17,9 @@ import httpx # type: ignore import requests # type: ignore import litellm +from litellm import LlmProviders from litellm.litellm_core_utils.core_helpers import map_finish_reason +from litellm.llms.bedrock.chat.invoke_handler import MockResponseIterator from litellm.llms.custom_httpx.http_handler import ( AsyncHTTPHandler, HTTPHandler, @@ -25,9 +27,19 @@ from litellm.llms.custom_httpx.http_handler import ( ) from litellm.llms.databricks.streaming_utils import ModelResponseIterator from litellm.types.utils import CustomStreamingDecoder, ModelResponse -from litellm.utils import CustomStreamWrapper, EmbeddingResponse +from litellm.utils import ( + Choices, + CustomStreamWrapper, + EmbeddingResponse, + Message, + ProviderConfigManager, + TextCompletionResponse, + Usage, + convert_to_model_response_object, +) from ..common_utils import OpenAILikeBase, OpenAILikeError +from .transformation import OpenAILikeChatConfig async def make_call( @@ -39,16 +51,22 @@ async def make_call( messages: list, logging_obj, streaming_decoder: Optional[CustomStreamingDecoder] = None, + fake_stream: bool = False, ): if client is None: client = litellm.module_level_aclient - response = await client.post(api_base, headers=headers, data=data, stream=True) + response = await client.post( + api_base, headers=headers, data=data, stream=not fake_stream + ) if streaming_decoder is not None: completion_stream: Any = streaming_decoder.aiter_bytes( response.aiter_bytes(chunk_size=1024) ) + elif fake_stream: + model_response = ModelResponse(**response.json()) + completion_stream = MockResponseIterator(model_response=model_response) else: completion_stream = ModelResponseIterator( streaming_response=response.aiter_lines(), sync_stream=False @@ -73,11 +91,12 @@ def make_sync_call( messages: list, logging_obj, streaming_decoder: Optional[CustomStreamingDecoder] = None, + fake_stream: bool = False, ): if client is None: client = litellm.module_level_client # Create a new client if none provided - response = client.post(api_base, headers=headers, data=data, stream=True) + response = client.post(api_base, headers=headers, data=data, stream=not fake_stream) if response.status_code != 200: raise OpenAILikeError(status_code=response.status_code, message=response.read()) @@ -86,6 +105,9 @@ def make_sync_call( completion_stream = streaming_decoder.iter_bytes( response.iter_bytes(chunk_size=1024) ) + elif fake_stream: + model_response = ModelResponse(**response.json()) + completion_stream = MockResponseIterator(model_response=model_response) else: completion_stream = ModelResponseIterator( streaming_response=response.iter_lines(), sync_stream=True @@ -126,8 +148,8 @@ class OpenAILikeChatHandler(OpenAILikeBase): headers={}, client: Optional[AsyncHTTPHandler] = None, streaming_decoder: Optional[CustomStreamingDecoder] = None, + fake_stream: bool = False, ) -> CustomStreamWrapper: - data["stream"] = True completion_stream = await make_call( client=client, @@ -169,6 +191,7 @@ class OpenAILikeChatHandler(OpenAILikeBase): logger_fn=None, headers={}, timeout: Optional[Union[float, httpx.Timeout]] = None, + json_mode: bool = False, ) -> ModelResponse: if timeout is None: timeout = httpx.Timeout(timeout=600.0, connect=5.0) @@ -181,8 +204,6 @@ class OpenAILikeChatHandler(OpenAILikeBase): api_base, headers=headers, data=json.dumps(data), timeout=timeout ) response.raise_for_status() - - response_json = response.json() except httpx.HTTPStatusError as e: raise OpenAILikeError( status_code=e.response.status_code, @@ -193,22 +214,26 @@ class OpenAILikeChatHandler(OpenAILikeBase): except Exception as e: raise OpenAILikeError(status_code=500, message=str(e)) - logging_obj.post_call( - input=messages, - api_key="", - original_response=response_json, - additional_args={"complete_input_dict": data}, + return OpenAILikeChatConfig._transform_response( + model=model, + response=response, + model_response=model_response, + stream=stream, + logging_obj=logging_obj, + optional_params=optional_params, + api_key=api_key, + data=data, + messages=messages, + print_verbose=print_verbose, + encoding=encoding, + json_mode=json_mode, + custom_llm_provider=custom_llm_provider, + base_model=base_model, ) - response = ModelResponse(**response_json) - - response.model = custom_llm_provider + "/" + (response.model or "") - - if base_model is not None: - response._hidden_params["model"] = base_model - return response def completion( self, + *, model: str, messages: list, api_base: str, @@ -230,6 +255,7 @@ class OpenAILikeChatHandler(OpenAILikeBase): streaming_decoder: Optional[ CustomStreamingDecoder ] = None, # if openai-compatible api needs custom stream decoder - e.g. sagemaker + fake_stream: bool = False, ): custom_endpoint = custom_endpoint or optional_params.pop( "custom_endpoint", None @@ -243,13 +269,24 @@ class OpenAILikeChatHandler(OpenAILikeBase): headers=headers, ) - stream: bool = optional_params.get("stream", None) or False - optional_params["stream"] = stream + stream: bool = optional_params.pop("stream", None) or False + extra_body = optional_params.pop("extra_body", {}) + json_mode = optional_params.pop("json_mode", None) + optional_params.pop("max_retries", None) + if not fake_stream: + optional_params["stream"] = stream + + if messages is not None and custom_llm_provider is not None: + provider_config = ProviderConfigManager.get_provider_config( + model=model, provider=LlmProviders(custom_llm_provider) + ) + messages = provider_config._transform_messages(messages) data = { "model": model, "messages": messages, **optional_params, + **extra_body, } ## LOGGING @@ -288,6 +325,7 @@ class OpenAILikeChatHandler(OpenAILikeBase): client=client, custom_llm_provider=custom_llm_provider, streaming_decoder=streaming_decoder, + fake_stream=fake_stream, ) else: return self.acompletion_function( @@ -327,6 +365,7 @@ class OpenAILikeChatHandler(OpenAILikeBase): messages=messages, logging_obj=logging_obj, streaming_decoder=streaming_decoder, + fake_stream=fake_stream, ) # completion_stream.__iter__() return CustomStreamWrapper( @@ -344,7 +383,6 @@ class OpenAILikeChatHandler(OpenAILikeBase): ) response.raise_for_status() - response_json = response.json() except httpx.HTTPStatusError as e: raise OpenAILikeError( status_code=e.response.status_code, @@ -356,17 +394,19 @@ class OpenAILikeChatHandler(OpenAILikeBase): ) except Exception as e: raise OpenAILikeError(status_code=500, message=str(e)) - logging_obj.post_call( - input=messages, - api_key="", - original_response=response_json, - additional_args={"complete_input_dict": data}, + return OpenAILikeChatConfig._transform_response( + model=model, + response=response, + model_response=model_response, + stream=stream, + logging_obj=logging_obj, + optional_params=optional_params, + api_key=api_key, + data=data, + messages=messages, + print_verbose=print_verbose, + encoding=encoding, + json_mode=json_mode, + custom_llm_provider=custom_llm_provider, + base_model=base_model, ) - response = ModelResponse(**response_json) - - response.model = custom_llm_provider + "/" + (response.model or "") - - if base_model is not None: - response._hidden_params["model"] = base_model - - return response diff --git a/litellm/llms/openai_like/chat/transformation.py b/litellm/llms/openai_like/chat/transformation.py new file mode 100644 index 000000000..c355cf330 --- /dev/null +++ b/litellm/llms/openai_like/chat/transformation.py @@ -0,0 +1,98 @@ +""" +OpenAI-like chat completion transformation +""" + +import types +from typing import List, Optional, Tuple, Union + +import httpx +from pydantic import BaseModel + +import litellm +from litellm.secret_managers.main import get_secret_str +from litellm.types.llms.openai import AllMessageValues, ChatCompletionAssistantMessage +from litellm.types.utils import ModelResponse + +from ....utils import _remove_additional_properties, _remove_strict_from_schema +from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig + + +class OpenAILikeChatConfig(OpenAIGPTConfig): + def _get_openai_compatible_provider_info( + self, api_base: Optional[str], api_key: Optional[str] + ) -> Tuple[Optional[str], Optional[str]]: + api_base = api_base or get_secret_str("OPENAI_LIKE_API_BASE") # type: ignore + dynamic_api_key = ( + api_key or get_secret_str("OPENAI_LIKE_API_KEY") or "" + ) # vllm does not require an api key + return api_base, dynamic_api_key + + @staticmethod + def _convert_tool_response_to_message( + message: ChatCompletionAssistantMessage, json_mode: bool + ) -> ChatCompletionAssistantMessage: + """ + if json_mode is true, convert the returned tool call response to a content with json str + + e.g. input: + + {"role": "assistant", "tool_calls": [{"id": "call_5ms4", "type": "function", "function": {"name": "json_tool_call", "arguments": "{\"key\": \"question\", \"value\": \"What is the capital of France?\"}"}}]} + + output: + + {"role": "assistant", "content": "{\"key\": \"question\", \"value\": \"What is the capital of France?\"}"} + """ + if not json_mode: + return message + + _tool_calls = message.get("tool_calls") + + if _tool_calls is None or len(_tool_calls) != 1: + return message + + message["content"] = _tool_calls[0]["function"].get("arguments") or "" + message["tool_calls"] = None + + return message + + @staticmethod + def _transform_response( + model: str, + response: httpx.Response, + model_response: ModelResponse, + stream: bool, + logging_obj: litellm.litellm_core_utils.litellm_logging.Logging, # type: ignore + optional_params: dict, + api_key: Optional[str], + data: Union[dict, str], + messages: List, + print_verbose, + encoding, + json_mode: bool, + custom_llm_provider: str, + base_model: Optional[str], + ) -> ModelResponse: + response_json = response.json() + logging_obj.post_call( + input=messages, + api_key="", + original_response=response_json, + additional_args={"complete_input_dict": data}, + ) + + if json_mode: + for choice in response_json["choices"]: + message = OpenAILikeChatConfig._convert_tool_response_to_message( + choice.get("message"), json_mode + ) + choice["message"] = message + + returned_response = ModelResponse(**response_json) + + returned_response.model = ( + custom_llm_provider + "/" + (returned_response.model or "") + ) + + if base_model is not None: + returned_response._hidden_params["model"] = base_model + return returned_response diff --git a/litellm/llms/openai_like/embedding/handler.py b/litellm/llms/openai_like/embedding/handler.py index ce0860724..7ddf43cb8 100644 --- a/litellm/llms/openai_like/embedding/handler.py +++ b/litellm/llms/openai_like/embedding/handler.py @@ -62,7 +62,7 @@ class OpenAILikeEmbeddingHandler(OpenAILikeBase): except httpx.HTTPStatusError as e: raise OpenAILikeError( status_code=e.response.status_code, - message=response.text if response else str(e), + message=e.response.text if e.response else str(e), ) except httpx.TimeoutException: raise OpenAILikeError( diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index 29028e053..45b7a6c5b 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -943,17 +943,10 @@ def _gemini_tool_call_invoke_helper( name = function_call_params.get("name", "") or "" arguments = function_call_params.get("arguments", "") arguments_dict = json.loads(arguments) - function_call: Optional[litellm.types.llms.vertex_ai.FunctionCall] = None - for k, v in arguments_dict.items(): - inferred_protocol_value = infer_protocol_value(value=v) - _field = litellm.types.llms.vertex_ai.Field( - key=k, value={inferred_protocol_value: v} - ) - _fields = litellm.types.llms.vertex_ai.FunctionCallArgs(fields=_field) - function_call = litellm.types.llms.vertex_ai.FunctionCall( - name=name, - args=_fields, - ) + function_call = litellm.types.llms.vertex_ai.FunctionCall( + name=name, + args=arguments_dict, + ) return function_call @@ -978,54 +971,26 @@ def convert_to_gemini_tool_call_invoke( }, """ """ - Gemini tool call invokes: - https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling#submit-api-output - content { - role: "model" - parts [ + Gemini tool call invokes: + { + "role": "model", + "parts": [ { - function_call { - name: "get_current_weather" - args { - fields { - key: "unit" - value { - string_value: "fahrenheit" - } - } - fields { - key: "predicted_temperature" - value { - number_value: 45 - } - } - fields { - key: "location" - value { - string_value: "Boston, MA" - } - } - } - }, - { - function_call { - name: "get_current_weather" - args { - fields { - key: "location" - value { - string_value: "San Francisco" - } - } - } + "functionCall": { + "name": "get_current_weather", + "args": { + "unit": "fahrenheit", + "predicted_temperature": 45, + "location": "Boston, MA", } + } } - ] + ] } """ """ - - json.load the arguments - - iterate through arguments -> create a FunctionCallArgs for each field + - json.load the arguments """ try: _parts_list: List[litellm.types.llms.vertex_ai.PartType] = [] @@ -1128,16 +1093,8 @@ def convert_to_gemini_tool_call_result( # We can't determine from openai message format whether it's a successful or # error call result so default to the successful result template - inferred_content_value = infer_protocol_value(value=content_str) - - _field = litellm.types.llms.vertex_ai.Field( - key="content", value={inferred_content_value: content_str} - ) - - _function_call_args = litellm.types.llms.vertex_ai.FunctionCallArgs(fields=_field) - _function_response = litellm.types.llms.vertex_ai.FunctionResponse( - name=name, response=_function_call_args # type: ignore + name=name, response={"content": content_str} # type: ignore ) _part = litellm.types.llms.vertex_ai.PartType(function_response=_function_response) diff --git a/litellm/llms/watsonx/chat/handler.py b/litellm/llms/watsonx/chat/handler.py index b016bb0a7..932946d3c 100644 --- a/litellm/llms/watsonx/chat/handler.py +++ b/litellm/llms/watsonx/chat/handler.py @@ -57,6 +57,7 @@ class WatsonXChatHandler(OpenAILikeChatHandler): def completion( self, + *, model: str, messages: list, api_base: str, @@ -75,9 +76,8 @@ class WatsonXChatHandler(OpenAILikeChatHandler): timeout: Optional[Union[float, httpx.Timeout]] = None, client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, custom_endpoint: Optional[bool] = None, - streaming_decoder: Optional[ - CustomStreamingDecoder - ] = None, # if openai-compatible api needs custom stream decoder - e.g. sagemaker + streaming_decoder: Optional[CustomStreamingDecoder] = None, + fake_stream: bool = False, ): api_params = _get_api_params(optional_params, print_verbose=print_verbose) diff --git a/litellm/main.py b/litellm/main.py index 32055eb9d..5d433eb36 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -1495,8 +1495,8 @@ def completion( # type: ignore # noqa: PLR0915 timeout=timeout, # type: ignore custom_prompt_dict=custom_prompt_dict, client=client, # pass AsyncOpenAI, OpenAI client - organization=organization, custom_llm_provider=custom_llm_provider, + encoding=encoding, ) elif ( model in litellm.open_ai_chat_completion_models @@ -3182,6 +3182,7 @@ async def aembedding(*args, **kwargs) -> EmbeddingResponse: or custom_llm_provider == "azure_ai" or custom_llm_provider == "together_ai" or custom_llm_provider == "openai_like" + or custom_llm_provider == "jina_ai" ): # currently implemented aiohttp calls for just azure and openai, soon all. # Await normally init_response = await loop.run_in_executor(None, func_with_context) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 606a2756b..a56472f7f 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -1745,7 +1745,8 @@ "output_cost_per_token": 0.00000080, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/llama3-8b-8192": { "max_tokens": 8192, @@ -1755,7 +1756,74 @@ "output_cost_per_token": 0.00000008, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true + }, + "groq/llama-3.2-1b-preview": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.00000004, + "output_cost_per_token": 0.00000004, + "litellm_provider": "groq", + "mode": "chat", + "supports_function_calling": true, + "supports_response_schema": true + }, + "groq/llama-3.2-3b-preview": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.00000006, + "output_cost_per_token": 0.00000006, + "litellm_provider": "groq", + "mode": "chat", + "supports_function_calling": true, + "supports_response_schema": true + }, + "groq/llama-3.2-11b-text-preview": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.00000018, + "output_cost_per_token": 0.00000018, + "litellm_provider": "groq", + "mode": "chat", + "supports_function_calling": true, + "supports_response_schema": true + }, + "groq/llama-3.2-11b-vision-preview": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.00000018, + "output_cost_per_token": 0.00000018, + "litellm_provider": "groq", + "mode": "chat", + "supports_function_calling": true, + "supports_response_schema": true + }, + "groq/llama-3.2-90b-text-preview": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.0000009, + "output_cost_per_token": 0.0000009, + "litellm_provider": "groq", + "mode": "chat", + "supports_function_calling": true, + "supports_response_schema": true + }, + "groq/llama-3.2-90b-vision-preview": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.0000009, + "output_cost_per_token": 0.0000009, + "litellm_provider": "groq", + "mode": "chat", + "supports_function_calling": true, + "supports_response_schema": true }, "groq/llama3-70b-8192": { "max_tokens": 8192, @@ -1765,7 +1833,8 @@ "output_cost_per_token": 0.00000079, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/llama-3.1-8b-instant": { "max_tokens": 8192, @@ -1775,7 +1844,8 @@ "output_cost_per_token": 0.00000008, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/llama-3.1-70b-versatile": { "max_tokens": 8192, @@ -1785,7 +1855,8 @@ "output_cost_per_token": 0.00000079, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/llama-3.1-405b-reasoning": { "max_tokens": 8192, @@ -1795,7 +1866,8 @@ "output_cost_per_token": 0.00000079, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/mixtral-8x7b-32768": { "max_tokens": 32768, @@ -1805,7 +1877,8 @@ "output_cost_per_token": 0.00000024, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/gemma-7b-it": { "max_tokens": 8192, @@ -1815,7 +1888,8 @@ "output_cost_per_token": 0.00000007, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/gemma2-9b-it": { "max_tokens": 8192, @@ -1825,7 +1899,8 @@ "output_cost_per_token": 0.00000020, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/llama3-groq-70b-8192-tool-use-preview": { "max_tokens": 8192, @@ -1835,7 +1910,8 @@ "output_cost_per_token": 0.00000089, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/llama3-groq-8b-8192-tool-use-preview": { "max_tokens": 8192, @@ -1845,7 +1921,8 @@ "output_cost_per_token": 0.00000019, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "cerebras/llama3.1-8b": { "max_tokens": 128000, diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index 1155e0466..974b091cf 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -12,7 +12,6 @@ model_list: vertex_ai_project: "adroit-crow-413218" vertex_ai_location: "us-east5" - router_settings: model_group_alias: "gpt-4-turbo": # Aliased model name diff --git a/litellm/types/llms/vertex_ai.py b/litellm/types/llms/vertex_ai.py index d55cf3ec6..54d4c1af2 100644 --- a/litellm/types/llms/vertex_ai.py +++ b/litellm/types/llms/vertex_ai.py @@ -13,23 +13,14 @@ from typing_extensions import ( ) -class Field(TypedDict): - key: str - value: Dict[str, Any] - - -class FunctionCallArgs(TypedDict): - fields: Field - - class FunctionResponse(TypedDict): name: str - response: FunctionCallArgs + response: Optional[dict] class FunctionCall(TypedDict): name: str - args: FunctionCallArgs + args: Optional[dict] class FileDataType(TypedDict): diff --git a/litellm/utils.py b/litellm/utils.py index 2dce9db89..003971142 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1739,15 +1739,15 @@ def supports_response_schema(model: str, custom_llm_provider: Optional[str]) -> Does not raise error. Defaults to 'False'. Outputs logging.error. """ + ## GET LLM PROVIDER ## + model, custom_llm_provider, _, _ = get_llm_provider( + model=model, custom_llm_provider=custom_llm_provider + ) + + if custom_llm_provider == "predibase": # predibase supports this globally + return True + try: - ## GET LLM PROVIDER ## - model, custom_llm_provider, _, _ = get_llm_provider( - model=model, custom_llm_provider=custom_llm_provider - ) - - if custom_llm_provider == "predibase": # predibase supports this globally - return True - ## GET MODEL INFO model_info = litellm.get_model_info( model=model, custom_llm_provider=custom_llm_provider @@ -1755,12 +1755,17 @@ def supports_response_schema(model: str, custom_llm_provider: Optional[str]) -> if model_info.get("supports_response_schema", False) is True: return True - return False except Exception: - verbose_logger.error( - f"Model not supports response_schema. You passed model={model}, custom_llm_provider={custom_llm_provider}." + ## check if provider supports response schema globally + supported_params = get_supported_openai_params( + model=model, + custom_llm_provider=custom_llm_provider, + request_type="chat_completion", ) - return False + if supported_params is not None and "response_schema" in supported_params: + return True + + return False def supports_function_calling( @@ -2710,6 +2715,7 @@ def get_optional_params( # noqa: PLR0915 non_default_params["response_format"] = type_to_response_format_param( response_format=non_default_params["response_format"] ) + if "tools" in non_default_params and isinstance( non_default_params, list ): # fixes https://github.com/BerriAI/litellm/issues/4933 @@ -3259,24 +3265,14 @@ def get_optional_params( # noqa: PLR0915 ) _check_valid_arg(supported_params=supported_params) - if max_tokens is not None: - optional_params["num_predict"] = max_tokens - if stream: - optional_params["stream"] = stream - if temperature is not None: - optional_params["temperature"] = temperature - if seed is not None: - optional_params["seed"] = seed - if top_p is not None: - optional_params["top_p"] = top_p - if frequency_penalty is not None: - optional_params["repeat_penalty"] = frequency_penalty - if stop is not None: - optional_params["stop"] = stop - if response_format is not None and response_format["type"] == "json_object": - optional_params["format"] = "json" + optional_params = litellm.OllamaConfig().map_openai_params( + non_default_params=non_default_params, + optional_params=optional_params, + ) elif custom_llm_provider == "ollama_chat": - supported_params = litellm.OllamaChatConfig().get_supported_openai_params() + supported_params = get_supported_openai_params( + model=model, custom_llm_provider=custom_llm_provider + ) _check_valid_arg(supported_params=supported_params) @@ -3494,24 +3490,16 @@ def get_optional_params( # noqa: PLR0915 ) _check_valid_arg(supported_params=supported_params) - if temperature is not None: - optional_params["temperature"] = temperature - if max_tokens is not None: - optional_params["max_tokens"] = max_tokens - if top_p is not None: - optional_params["top_p"] = top_p - if stream is not None: - optional_params["stream"] = stream - if stop is not None: - optional_params["stop"] = stop - if tools is not None: - optional_params["tools"] = tools - if tool_choice is not None: - optional_params["tool_choice"] = tool_choice - if response_format is not None: - optional_params["response_format"] = response_format - if seed is not None: - optional_params["seed"] = seed + optional_params = litellm.GroqChatConfig().map_openai_params( + non_default_params=non_default_params, + optional_params=optional_params, + model=model, + drop_params=( + drop_params + if drop_params is not None and isinstance(drop_params, bool) + else False + ), + ) elif custom_llm_provider == "deepseek": supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider @@ -6178,5 +6166,7 @@ class ProviderConfigManager: return litellm.OpenAIO1Config() elif litellm.LlmProviders.DEEPSEEK == provider: return litellm.DeepSeekChatConfig() + elif litellm.LlmProviders.GROQ == provider: + return litellm.GroqChatConfig() return OpenAIGPTConfig() diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 606a2756b..a56472f7f 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -1745,7 +1745,8 @@ "output_cost_per_token": 0.00000080, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/llama3-8b-8192": { "max_tokens": 8192, @@ -1755,7 +1756,74 @@ "output_cost_per_token": 0.00000008, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true + }, + "groq/llama-3.2-1b-preview": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.00000004, + "output_cost_per_token": 0.00000004, + "litellm_provider": "groq", + "mode": "chat", + "supports_function_calling": true, + "supports_response_schema": true + }, + "groq/llama-3.2-3b-preview": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.00000006, + "output_cost_per_token": 0.00000006, + "litellm_provider": "groq", + "mode": "chat", + "supports_function_calling": true, + "supports_response_schema": true + }, + "groq/llama-3.2-11b-text-preview": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.00000018, + "output_cost_per_token": 0.00000018, + "litellm_provider": "groq", + "mode": "chat", + "supports_function_calling": true, + "supports_response_schema": true + }, + "groq/llama-3.2-11b-vision-preview": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.00000018, + "output_cost_per_token": 0.00000018, + "litellm_provider": "groq", + "mode": "chat", + "supports_function_calling": true, + "supports_response_schema": true + }, + "groq/llama-3.2-90b-text-preview": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.0000009, + "output_cost_per_token": 0.0000009, + "litellm_provider": "groq", + "mode": "chat", + "supports_function_calling": true, + "supports_response_schema": true + }, + "groq/llama-3.2-90b-vision-preview": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.0000009, + "output_cost_per_token": 0.0000009, + "litellm_provider": "groq", + "mode": "chat", + "supports_function_calling": true, + "supports_response_schema": true }, "groq/llama3-70b-8192": { "max_tokens": 8192, @@ -1765,7 +1833,8 @@ "output_cost_per_token": 0.00000079, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/llama-3.1-8b-instant": { "max_tokens": 8192, @@ -1775,7 +1844,8 @@ "output_cost_per_token": 0.00000008, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/llama-3.1-70b-versatile": { "max_tokens": 8192, @@ -1785,7 +1855,8 @@ "output_cost_per_token": 0.00000079, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/llama-3.1-405b-reasoning": { "max_tokens": 8192, @@ -1795,7 +1866,8 @@ "output_cost_per_token": 0.00000079, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/mixtral-8x7b-32768": { "max_tokens": 32768, @@ -1805,7 +1877,8 @@ "output_cost_per_token": 0.00000024, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/gemma-7b-it": { "max_tokens": 8192, @@ -1815,7 +1888,8 @@ "output_cost_per_token": 0.00000007, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/gemma2-9b-it": { "max_tokens": 8192, @@ -1825,7 +1899,8 @@ "output_cost_per_token": 0.00000020, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/llama3-groq-70b-8192-tool-use-preview": { "max_tokens": 8192, @@ -1835,7 +1910,8 @@ "output_cost_per_token": 0.00000089, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "groq/llama3-groq-8b-8192-tool-use-preview": { "max_tokens": 8192, @@ -1845,7 +1921,8 @@ "output_cost_per_token": 0.00000019, "litellm_provider": "groq", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_response_schema": true }, "cerebras/llama3.1-8b": { "max_tokens": 128000, diff --git a/tests/llm_translation/base_llm_unit_tests.py b/tests/llm_translation/base_llm_unit_tests.py index 74fff60a4..88fce6dac 100644 --- a/tests/llm_translation/base_llm_unit_tests.py +++ b/tests/llm_translation/base_llm_unit_tests.py @@ -49,7 +49,7 @@ class BaseLLMChatTest(ABC): ) assert response is not None except litellm.InternalServerError: - pass + pytest.skip("Model is overloaded") # for OpenAI the content contains the JSON schema, so we need to assert that the content is not None assert response.choices[0].message.content is not None @@ -92,7 +92,9 @@ class BaseLLMChatTest(ABC): # relevant issue: https://github.com/BerriAI/litellm/issues/6741 assert response.choices[0].message.content is not None + @pytest.mark.flaky(retries=6, delay=1) def test_json_response_pydantic_obj(self): + litellm.set_verbose = True from pydantic import BaseModel from litellm.utils import supports_response_schema @@ -119,6 +121,11 @@ class BaseLLMChatTest(ABC): response_format=TestModel, ) assert res is not None + + print(res.choices[0].message) + + assert res.choices[0].message.content is not None + assert res.choices[0].message.tool_calls is None except litellm.InternalServerError: pytest.skip("Model is overloaded") @@ -140,12 +147,15 @@ class BaseLLMChatTest(ABC): }, ] - response = litellm.completion( - **base_completion_call_args, - messages=messages, - response_format={"type": "json_object"}, - stream=True, - ) + try: + response = litellm.completion( + **base_completion_call_args, + messages=messages, + response_format={"type": "json_object"}, + stream=True, + ) + except litellm.InternalServerError: + pytest.skip("Model is overloaded") print(response) @@ -161,6 +171,25 @@ class BaseLLMChatTest(ABC): assert content is not None assert len(content) > 0 + @pytest.fixture + def tool_call_no_arguments(self): + return { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_2c384bc6-de46-4f29-8adc-60dd5805d305", + "function": {"name": "Get-FAQ", "arguments": "{}"}, + "type": "function", + } + ], + } + + @abstractmethod + def test_tool_call_no_arguments(self, tool_call_no_arguments): + """Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833""" + pass + @pytest.fixture def pdf_messages(self): import base64 diff --git a/tests/llm_translation/test_anthropic_completion.py b/tests/llm_translation/test_anthropic_completion.py index d6ee074b1..812291767 100644 --- a/tests/llm_translation/test_anthropic_completion.py +++ b/tests/llm_translation/test_anthropic_completion.py @@ -697,6 +697,15 @@ class TestAnthropicCompletion(BaseLLMChatTest): assert _document_validation["source"]["media_type"] == "application/pdf" assert _document_validation["source"]["type"] == "base64" + def test_tool_call_no_arguments(self, tool_call_no_arguments): + """Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833""" + from litellm.llms.prompt_templates.factory import ( + convert_to_anthropic_tool_invoke, + ) + + result = convert_to_anthropic_tool_invoke([tool_call_no_arguments]) + print(result) + def test_convert_tool_response_to_message_with_values(): """Test converting a tool response with 'values' key to a message""" diff --git a/tests/llm_translation/test_deepseek_completion.py b/tests/llm_translation/test_deepseek_completion.py index b0f7ee663..17b0a340b 100644 --- a/tests/llm_translation/test_deepseek_completion.py +++ b/tests/llm_translation/test_deepseek_completion.py @@ -7,3 +7,7 @@ class TestDeepSeekChatCompletion(BaseLLMChatTest): return { "model": "deepseek/deepseek-chat", } + + def test_tool_call_no_arguments(self, tool_call_no_arguments): + """Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833""" + pass diff --git a/tests/llm_translation/test_groq.py b/tests/llm_translation/test_groq.py new file mode 100644 index 000000000..359787b2d --- /dev/null +++ b/tests/llm_translation/test_groq.py @@ -0,0 +1,12 @@ +from base_llm_unit_tests import BaseLLMChatTest + + +class TestGroq(BaseLLMChatTest): + def get_base_completion_call_args(self) -> dict: + return { + "model": "groq/llama-3.1-70b-versatile", + } + + def test_tool_call_no_arguments(self, tool_call_no_arguments): + """Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833""" + pass diff --git a/tests/llm_translation/test_mistral_api.py b/tests/llm_translation/test_mistral_api.py index b2cb36541..bb8cb3c60 100644 --- a/tests/llm_translation/test_mistral_api.py +++ b/tests/llm_translation/test_mistral_api.py @@ -32,3 +32,7 @@ class TestMistralCompletion(BaseLLMChatTest): def get_base_completion_call_args(self) -> dict: litellm.set_verbose = True return {"model": "mistral/mistral-small-latest"} + + def test_tool_call_no_arguments(self, tool_call_no_arguments): + """Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833""" + pass diff --git a/tests/llm_translation/test_optional_params.py b/tests/llm_translation/test_optional_params.py index 7fe8baeb5..34ecdfaca 100644 --- a/tests/llm_translation/test_optional_params.py +++ b/tests/llm_translation/test_optional_params.py @@ -952,3 +952,17 @@ def test_lm_studio_embedding_params(): drop_params=True, ) assert len(optional_params) == 0 + + +def test_ollama_pydantic_obj(): + from pydantic import BaseModel + + class ResponseFormat(BaseModel): + x: str + y: str + + get_optional_params( + model="qwen2:0.5b", + custom_llm_provider="ollama", + response_format=ResponseFormat, + ) diff --git a/tests/llm_translation/test_vertex.py b/tests/llm_translation/test_vertex.py index 73960020d..3e1087536 100644 --- a/tests/llm_translation/test_vertex.py +++ b/tests/llm_translation/test_vertex.py @@ -306,6 +306,8 @@ def test_multiple_function_call(): ) assert len(r.choices) > 0 + print(mock_post.call_args.kwargs["json"]) + assert mock_post.call_args.kwargs["json"] == { "contents": [ {"role": "user", "parts": [{"text": "do test"}]}, @@ -313,28 +315,8 @@ def test_multiple_function_call(): "role": "model", "parts": [ {"text": "test"}, - { - "function_call": { - "name": "test", - "args": { - "fields": { - "key": "arg", - "value": {"string_value": "test"}, - } - }, - } - }, - { - "function_call": { - "name": "test2", - "args": { - "fields": { - "key": "arg", - "value": {"string_value": "test2"}, - } - }, - } - }, + {"function_call": {"name": "test", "args": {"arg": "test"}}}, + {"function_call": {"name": "test2", "args": {"arg": "test2"}}}, ], }, { @@ -342,23 +324,13 @@ def test_multiple_function_call(): { "function_response": { "name": "test", - "response": { - "fields": { - "key": "content", - "value": {"string_value": "42"}, - } - }, + "response": {"content": "42"}, } }, { "function_response": { "name": "test2", - "response": { - "fields": { - "key": "content", - "value": {"string_value": "15"}, - } - }, + "response": {"content": "15"}, } }, ] @@ -441,34 +413,16 @@ def test_multiple_function_call_changed_text_pos(): assert len(resp.choices) > 0 mock_post.assert_called_once() + print(mock_post.call_args.kwargs["json"]["contents"]) + assert mock_post.call_args.kwargs["json"]["contents"] == [ {"role": "user", "parts": [{"text": "do test"}]}, { "role": "model", "parts": [ {"text": "test"}, - { - "function_call": { - "name": "test", - "args": { - "fields": { - "key": "arg", - "value": {"string_value": "test"}, - } - }, - } - }, - { - "function_call": { - "name": "test2", - "args": { - "fields": { - "key": "arg", - "value": {"string_value": "test2"}, - } - }, - } - }, + {"function_call": {"name": "test", "args": {"arg": "test"}}}, + {"function_call": {"name": "test2", "args": {"arg": "test2"}}}, ], }, { @@ -476,23 +430,13 @@ def test_multiple_function_call_changed_text_pos(): { "function_response": { "name": "test2", - "response": { - "fields": { - "key": "content", - "value": {"string_value": "15"}, - } - }, + "response": {"content": "15"}, } }, { "function_response": { "name": "test", - "response": { - "fields": { - "key": "content", - "value": {"string_value": "42"}, - } - }, + "response": {"content": "42"}, } }, ] @@ -1354,3 +1298,20 @@ def test_vertex_embedding_url(model, expected_url): assert url == expected_url assert endpoint == "predict" + + +from base_llm_unit_tests import BaseLLMChatTest + + +class TestVertexGemini(BaseLLMChatTest): + def get_base_completion_call_args(self) -> dict: + return {"model": "gemini/gemini-1.5-flash"} + + def test_tool_call_no_arguments(self, tool_call_no_arguments): + """Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833""" + from litellm.llms.prompt_templates.factory import ( + convert_to_gemini_tool_call_invoke, + ) + + result = convert_to_gemini_tool_call_invoke(tool_call_no_arguments) + print(result) diff --git a/tests/local_testing/test_amazing_vertex_completion.py b/tests/local_testing/test_amazing_vertex_completion.py index f801a53ce..50a39b242 100644 --- a/tests/local_testing/test_amazing_vertex_completion.py +++ b/tests/local_testing/test_amazing_vertex_completion.py @@ -2867,6 +2867,7 @@ def test_gemini_function_call_parameter_in_messages(): print(e) # mock_client.assert_any_call() + assert { "contents": [ { @@ -2879,12 +2880,7 @@ def test_gemini_function_call_parameter_in_messages(): { "function_call": { "name": "search", - "args": { - "fields": { - "key": "queries", - "value": {"list_value": ["weather in boston"]}, - } - }, + "args": {"queries": ["weather in boston"]}, } } ], @@ -2895,12 +2891,7 @@ def test_gemini_function_call_parameter_in_messages(): "function_response": { "name": "search", "response": { - "fields": { - "key": "content", - "value": { - "string_value": "The current weather in Boston is 22°F." - }, - } + "content": "The current weather in Boston is 22°F." }, } } @@ -2935,6 +2926,7 @@ def test_gemini_function_call_parameter_in_messages(): def test_gemini_function_call_parameter_in_messages_2(): + litellm.set_verbose = True from litellm.llms.vertex_ai_and_google_ai_studio.gemini.transformation import ( _gemini_convert_messages_with_history, ) @@ -2958,6 +2950,7 @@ def test_gemini_function_call_parameter_in_messages_2(): returned_contents = _gemini_convert_messages_with_history(messages=messages) + print(f"returned_contents: {returned_contents}") assert returned_contents == [ { "role": "user", @@ -2970,12 +2963,7 @@ def test_gemini_function_call_parameter_in_messages_2(): { "function_call": { "name": "search", - "args": { - "fields": { - "key": "queries", - "value": {"list_value": ["weather in boston"]}, - } - }, + "args": {"queries": ["weather in boston"]}, } }, ], @@ -2986,12 +2974,7 @@ def test_gemini_function_call_parameter_in_messages_2(): "function_response": { "name": "search", "response": { - "fields": { - "key": "content", - "value": { - "string_value": "The weather in Boston is 100 degrees." - }, - } + "content": "The weather in Boston is 100 degrees." }, } } diff --git a/tests/local_testing/test_ollama.py b/tests/local_testing/test_ollama.py index de41e24b8..34c0791c3 100644 --- a/tests/local_testing/test_ollama.py +++ b/tests/local_testing/test_ollama.py @@ -67,7 +67,8 @@ def test_ollama_json_mode(): assert converted_params == { "temperature": 0.5, "format": "json", - }, f"{converted_params} != {'temperature': 0.5, 'format': 'json'}" + "stream": False, + }, f"{converted_params} != {'temperature': 0.5, 'format': 'json', 'stream': False}" except Exception as e: pytest.fail(f"Error occurred: {e}") diff --git a/tests/local_testing/test_router_batch_completion.py b/tests/local_testing/test_router_batch_completion.py index 3de61c0a6..065730d48 100644 --- a/tests/local_testing/test_router_batch_completion.py +++ b/tests/local_testing/test_router_batch_completion.py @@ -64,6 +64,7 @@ async def test_batch_completion_multiple_models(mode): models_in_responses = [] print(f"response: {response}") for individual_response in response: + print(f"individual_response: {individual_response}") _model = individual_response["model"] models_in_responses.append(_model) diff --git a/tests/local_testing/test_utils.py b/tests/local_testing/test_utils.py index 6e7b0ff05..52946ca30 100644 --- a/tests/local_testing/test_utils.py +++ b/tests/local_testing/test_utils.py @@ -749,6 +749,7 @@ def test_convert_model_response_object(): ("gemini/gemini-1.5-pro", True), ("predibase/llama3-8b-instruct", True), ("gpt-3.5-turbo", False), + ("groq/llama3-70b-8192", True), ], ) def test_supports_response_schema(model, expected_bool): From b8edef389c0b4a53a02ad6c50675e242da02ee99 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 22 Nov 2024 02:29:16 +0530 Subject: [PATCH 4/6] =?UTF-8?q?bump:=20version=201.52.12=20=E2=86=92=201.5?= =?UTF-8?q?2.13?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3e69461ae..d5cf3fb92 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.52.12" +version = "1.52.13" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -91,7 +91,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.52.12" +version = "1.52.13" version_files = [ "pyproject.toml:^version" ] From 2903fd4164010645db7ea3c77ddebb2aae870cf4 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 22 Nov 2024 03:00:45 +0530 Subject: [PATCH 5/6] docs: update json mode docs --- docs/my-website/docs/completion/json_mode.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/my-website/docs/completion/json_mode.md b/docs/my-website/docs/completion/json_mode.md index 51f76b7a6..379775bf2 100644 --- a/docs/my-website/docs/completion/json_mode.md +++ b/docs/my-website/docs/completion/json_mode.md @@ -76,6 +76,8 @@ Works for: - Vertex AI models (Gemini + Anthropic) - Bedrock Models - Anthropic API Models +- Groq Models +- Ollama Models From 71ebf47cef3a64694068a1ae8717e8352602bc98 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 21 Nov 2024 19:02:08 -0800 Subject: [PATCH 6/6] fix latency issues on google ai studio (#6852) --- .../vertex_ai_context_caching.py | 29 ++++++++++--------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/litellm/llms/vertex_ai_and_google_ai_studio/context_caching/vertex_ai_context_caching.py b/litellm/llms/vertex_ai_and_google_ai_studio/context_caching/vertex_ai_context_caching.py index e60a17052..b9be8a3bd 100644 --- a/litellm/llms/vertex_ai_and_google_ai_studio/context_caching/vertex_ai_context_caching.py +++ b/litellm/llms/vertex_ai_and_google_ai_studio/context_caching/vertex_ai_context_caching.py @@ -6,7 +6,11 @@ import httpx import litellm from litellm.caching.caching import Cache, LiteLLMCacheType from litellm.litellm_core_utils.litellm_logging import Logging -from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler +from litellm.llms.custom_httpx.http_handler import ( + AsyncHTTPHandler, + HTTPHandler, + get_async_httpx_client, +) from litellm.llms.OpenAI.openai import AllMessageValues from litellm.types.llms.vertex_ai import ( CachedContentListAllResponseBody, @@ -331,6 +335,13 @@ class ContextCachingEndpoints(VertexBase): if cached_content is not None: return messages, cached_content + cached_messages, non_cached_messages = separate_cached_messages( + messages=messages + ) + + if len(cached_messages) == 0: + return messages, None + ## AUTHORIZATION ## token, url = self._get_token_and_url_context_caching( gemini_api_key=api_key, @@ -347,22 +358,12 @@ class ContextCachingEndpoints(VertexBase): headers.update(extra_headers) if client is None or not isinstance(client, AsyncHTTPHandler): - _params = {} - if timeout is not None: - if isinstance(timeout, float) or isinstance(timeout, int): - timeout = httpx.Timeout(timeout) - _params["timeout"] = timeout - client = AsyncHTTPHandler(**_params) # type: ignore + client = get_async_httpx_client( + params={"timeout": timeout}, llm_provider=litellm.LlmProviders.VERTEX_AI + ) else: client = client - cached_messages, non_cached_messages = separate_cached_messages( - messages=messages - ) - - if len(cached_messages) == 0: - return messages, None - ## CHECK IF CACHED ALREADY generated_cache_key = local_cache_obj.get_cache_key(messages=cached_messages) google_cache_name = await self.async_check_cache(