Merge branch 'main' into litellm_add_streaming_usage_tracking

This commit is contained in:
Ishaan Jaff 2024-11-21 19:35:47 -08:00
commit f049f841db
66 changed files with 1078 additions and 491 deletions

View file

@ -771,6 +771,7 @@ jobs:
- run: python ./tests/code_coverage_tests/litellm_logging_code_coverage.py - run: python ./tests/code_coverage_tests/litellm_logging_code_coverage.py
- run: python ./tests/documentation_tests/test_env_keys.py - run: python ./tests/documentation_tests/test_env_keys.py
- run: python ./tests/documentation_tests/test_api_docs.py - run: python ./tests/documentation_tests/test_api_docs.py
- run: python ./tests/code_coverage_tests/ensure_async_clients_test.py
- run: helm lint ./deploy/charts/litellm-helm - run: helm lint ./deploy/charts/litellm-helm
db_migration_disable_update_check: db_migration_disable_update_check:

View file

@ -76,6 +76,8 @@ Works for:
- Vertex AI models (Gemini + Anthropic) - Vertex AI models (Gemini + Anthropic)
- Bedrock Models - Bedrock Models
- Anthropic API Models - Anthropic API Models
- Groq Models
- Ollama Models
<Tabs> <Tabs>
<TabItem value="sdk" label="SDK"> <TabItem value="sdk" label="SDK">

View file

@ -1,7 +1,7 @@
import Tabs from '@theme/Tabs'; import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem'; import TabItem from '@theme/TabItem';
# Embedding Models # Embeddings
## Quick Start ## Quick Start
```python ```python

View file

@ -1,4 +1,4 @@
# Image Generation # Images
## Quick Start ## Quick Start

View file

@ -10,6 +10,35 @@ LiteLLM supports all anthropic models.
- `claude-2.1` - `claude-2.1`
- `claude-instant-1.2` - `claude-instant-1.2`
| Property | Details |
|-------|-------|
| Description | Claude is a highly performant, trustworthy, and intelligent AI platform built by Anthropic. Claude excels at tasks involving language, reasoning, analysis, coding, and more. |
| Provider Route on LiteLLM | `anthropic/` (add this prefix to the model name, to route any requests to Anthropic - e.g. `anthropic/claude-3-5-sonnet-20240620`) |
| Provider Doc | [Anthropic ↗](https://docs.anthropic.com/en/docs/build-with-claude/overview) |
| API Endpoint for Provider | https://api.anthropic.com |
| Supported Endpoints | `/chat/completions` |
## Supported OpenAI Parameters
Check this in code, [here](../completion/input.md#translated-openai-params)
```
"stream",
"stop",
"temperature",
"top_p",
"max_tokens",
"max_completion_tokens",
"tools",
"tool_choice",
"extra_headers",
"parallel_tool_calls",
"response_format",
"user"
```
:::info :::info
Anthropic API fails requests when `max_tokens` are not passed. Due to this litellm passes `max_tokens=4096` when no `max_tokens` are passed. Anthropic API fails requests when `max_tokens` are not passed. Due to this litellm passes `max_tokens=4096` when no `max_tokens` are passed.
@ -1006,20 +1035,3 @@ curl http://0.0.0.0:4000/v1/chat/completions \
</TabItem> </TabItem>
</Tabs> </Tabs>
## All Supported OpenAI Params
```
"stream",
"stop",
"temperature",
"top_p",
"max_tokens",
"max_completion_tokens",
"tools",
"tool_choice",
"extra_headers",
"parallel_tool_calls",
"response_format",
"user"
```

View file

@ -199,46 +199,52 @@ const sidebars = {
], ],
}, },
{
type: "category",
label: "Guides",
link: {
type: "generated-index",
title: "Chat Completions",
description: "Details on the completion() function",
slug: "/completion",
},
items: [
"completion/input",
"completion/provider_specific_params",
"completion/json_mode",
"completion/prompt_caching",
"completion/audio",
"completion/vision",
"completion/predict_outputs",
"completion/prefix",
"completion/drop_params",
"completion/prompt_formatting",
"completion/output",
"completion/usage",
"exception_mapping",
"completion/stream",
"completion/message_trimming",
"completion/function_call",
"completion/model_alias",
"completion/batching",
"completion/mock_requests",
"completion/reliable_completions",
],
},
{ {
type: "category", type: "category",
label: "Supported Endpoints", label: "Supported Endpoints",
items: [ items: [
{
type: "category",
label: "Chat",
link: {
type: "generated-index",
title: "Chat Completions",
description: "Details on the completion() function",
slug: "/completion",
},
items: [
"completion/input",
"completion/provider_specific_params",
"completion/json_mode",
"completion/prompt_caching",
"completion/audio",
"completion/vision",
"completion/predict_outputs",
"completion/prefix",
"completion/drop_params",
"completion/prompt_formatting",
"completion/output",
"completion/usage",
"exception_mapping",
"completion/stream",
"completion/message_trimming",
"completion/function_call",
"completion/model_alias",
"completion/batching",
"completion/mock_requests",
"completion/reliable_completions",
],
},
"embedding/supported_embedding", "embedding/supported_embedding",
"image_generation", "image_generation",
"audio_transcription", {
"text_to_speech", type: "category",
label: "Audio",
"items": [
"audio_transcription",
"text_to_speech",
]
},
"rerank", "rerank",
"assistants", "assistants",
"batches", "batches",

View file

@ -133,7 +133,7 @@ use_client: bool = False
ssl_verify: Union[str, bool] = True ssl_verify: Union[str, bool] = True
ssl_certificate: Optional[str] = None ssl_certificate: Optional[str] = None
disable_streaming_logging: bool = False disable_streaming_logging: bool = False
in_memory_llm_clients_cache: dict = {} in_memory_llm_clients_cache: InMemoryCache = InMemoryCache()
safe_memory_mode: bool = False safe_memory_mode: bool = False
enable_azure_ad_token_refresh: Optional[bool] = False enable_azure_ad_token_refresh: Optional[bool] = False
### DEFAULT AZURE API VERSION ### ### DEFAULT AZURE API VERSION ###

View file

@ -1793,7 +1793,7 @@ class CustomStreamWrapper:
or self.custom_llm_provider == "bedrock" or self.custom_llm_provider == "bedrock"
or self.custom_llm_provider == "triton" or self.custom_llm_provider == "triton"
or self.custom_llm_provider == "watsonx" or self.custom_llm_provider == "watsonx"
or self.custom_llm_provider in litellm.openai_compatible_endpoints or self.custom_llm_provider in litellm.openai_compatible_providers
or self.custom_llm_provider in litellm._custom_providers or self.custom_llm_provider in litellm._custom_providers
): ):
async for chunk in self.completion_stream: async for chunk in self.completion_stream:

View file

@ -12,7 +12,11 @@ from typing_extensions import overload
import litellm import litellm
from litellm.caching.caching import DualCache from litellm.caching.caching import DualCache
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
HTTPHandler,
get_async_httpx_client,
)
from litellm.types.utils import EmbeddingResponse from litellm.types.utils import EmbeddingResponse
from litellm.utils import ( from litellm.utils import (
CustomStreamWrapper, CustomStreamWrapper,
@ -977,7 +981,10 @@ class AzureChatCompletion(BaseLLM):
else: else:
_params["timeout"] = httpx.Timeout(timeout=600.0, connect=5.0) _params["timeout"] = httpx.Timeout(timeout=600.0, connect=5.0)
async_handler = AsyncHTTPHandler(**_params) # type: ignore async_handler = get_async_httpx_client(
llm_provider=litellm.LlmProviders.AZURE,
params=_params,
)
else: else:
async_handler = client # type: ignore async_handler = client # type: ignore

View file

@ -17,22 +17,6 @@ from litellm.utils import CustomStreamWrapper
class OpenAIO1ChatCompletion(OpenAIChatCompletion): class OpenAIO1ChatCompletion(OpenAIChatCompletion):
async def mock_async_streaming(
self,
response: Any,
model: Optional[str],
logging_obj: Any,
):
model_response = await response
completion_stream = MockResponseIterator(model_response=model_response)
streaming_response = CustomStreamWrapper(
completion_stream=completion_stream,
model=model,
custom_llm_provider="openai",
logging_obj=logging_obj,
)
return streaming_response
def completion( def completion(
self, self,
model_response: ModelResponse, model_response: ModelResponse,
@ -54,7 +38,7 @@ class OpenAIO1ChatCompletion(OpenAIChatCompletion):
custom_llm_provider: Optional[str] = None, custom_llm_provider: Optional[str] = None,
drop_params: Optional[bool] = None, drop_params: Optional[bool] = None,
): ):
stream: Optional[bool] = optional_params.pop("stream", False) # stream: Optional[bool] = optional_params.pop("stream", False)
response = super().completion( response = super().completion(
model_response, model_response,
timeout, timeout,
@ -76,20 +60,4 @@ class OpenAIO1ChatCompletion(OpenAIChatCompletion):
drop_params, drop_params,
) )
if stream is True: return response
if asyncio.iscoroutine(response):
return self.mock_async_streaming(
response=response, model=model, logging_obj=logging_obj # type: ignore
)
completion_stream = MockResponseIterator(model_response=response)
streaming_response = CustomStreamWrapper(
completion_stream=completion_stream,
model=model,
custom_llm_provider="openai",
logging_obj=logging_obj,
)
return streaming_response
else:
return response

View file

@ -18,6 +18,7 @@ import litellm
from litellm import LlmProviders from litellm import LlmProviders
from litellm._logging import verbose_logger from litellm._logging import verbose_logger
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.llms.custom_httpx.http_handler import _DEFAULT_TTL_FOR_HTTPX_CLIENTS
from litellm.secret_managers.main import get_secret_str from litellm.secret_managers.main import get_secret_str
from litellm.types.utils import ProviderField from litellm.types.utils import ProviderField
from litellm.utils import ( from litellm.utils import (
@ -562,8 +563,9 @@ class OpenAIChatCompletion(BaseLLM):
_cache_key = f"hashed_api_key={hashed_api_key},api_base={api_base},timeout={timeout},max_retries={max_retries},organization={organization},is_async={is_async}" _cache_key = f"hashed_api_key={hashed_api_key},api_base={api_base},timeout={timeout},max_retries={max_retries},organization={organization},is_async={is_async}"
if _cache_key in litellm.in_memory_llm_clients_cache: _cached_client = litellm.in_memory_llm_clients_cache.get_cache(_cache_key)
return litellm.in_memory_llm_clients_cache[_cache_key] if _cached_client:
return _cached_client
if is_async: if is_async:
_new_client: Union[OpenAI, AsyncOpenAI] = AsyncOpenAI( _new_client: Union[OpenAI, AsyncOpenAI] = AsyncOpenAI(
api_key=api_key, api_key=api_key,
@ -584,7 +586,11 @@ class OpenAIChatCompletion(BaseLLM):
) )
## SAVE CACHE KEY ## SAVE CACHE KEY
litellm.in_memory_llm_clients_cache[_cache_key] = _new_client litellm.in_memory_llm_clients_cache.set_cache(
key=_cache_key,
value=_new_client,
ttl=_DEFAULT_TTL_FOR_HTTPX_CLIENTS,
)
return _new_client return _new_client
else: else:

View file

@ -13,7 +13,11 @@ import httpx
import requests import requests
import litellm import litellm
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
HTTPHandler,
get_async_httpx_client,
)
from litellm.utils import CustomStreamWrapper, ModelResponse, Usage from litellm.utils import CustomStreamWrapper, ModelResponse, Usage
from ..base import BaseLLM from ..base import BaseLLM
@ -162,7 +166,10 @@ class AnthropicTextCompletion(BaseLLM):
client=None, client=None,
): ):
if client is None: if client is None:
client = AsyncHTTPHandler(timeout=httpx.Timeout(timeout=600.0, connect=5.0)) client = get_async_httpx_client(
llm_provider=litellm.LlmProviders.ANTHROPIC,
params={"timeout": httpx.Timeout(timeout=600.0, connect=5.0)},
)
response = await client.post(api_base, headers=headers, data=json.dumps(data)) response = await client.post(api_base, headers=headers, data=json.dumps(data))
@ -198,7 +205,10 @@ class AnthropicTextCompletion(BaseLLM):
client=None, client=None,
): ):
if client is None: if client is None:
client = AsyncHTTPHandler(timeout=httpx.Timeout(timeout=600.0, connect=5.0)) client = get_async_httpx_client(
llm_provider=litellm.LlmProviders.ANTHROPIC,
params={"timeout": httpx.Timeout(timeout=600.0, connect=5.0)},
)
response = await client.post(api_base, headers=headers, data=json.dumps(data)) response = await client.post(api_base, headers=headers, data=json.dumps(data))

View file

@ -74,7 +74,10 @@ class AzureAIEmbedding(OpenAIChatCompletion):
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
) -> EmbeddingResponse: ) -> EmbeddingResponse:
if client is None or not isinstance(client, AsyncHTTPHandler): if client is None or not isinstance(client, AsyncHTTPHandler):
client = AsyncHTTPHandler(timeout=timeout, concurrent_limit=1) client = get_async_httpx_client(
llm_provider=litellm.LlmProviders.AZURE_AI,
params={"timeout": timeout},
)
url = "{}/images/embeddings".format(api_base) url = "{}/images/embeddings".format(api_base)

View file

@ -9,7 +9,10 @@ import httpx
import requests import requests
import litellm import litellm
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
get_async_httpx_client,
)
from litellm.utils import Choices, CustomStreamWrapper, Message, ModelResponse, Usage from litellm.utils import Choices, CustomStreamWrapper, Message, ModelResponse, Usage
from .prompt_templates.factory import custom_prompt, prompt_factory from .prompt_templates.factory import custom_prompt, prompt_factory
@ -185,7 +188,10 @@ async def async_completion(
headers={}, headers={},
): ):
async_handler = AsyncHTTPHandler(timeout=httpx.Timeout(timeout=600.0, connect=5.0)) async_handler = get_async_httpx_client(
llm_provider=litellm.LlmProviders.CLARIFAI,
params={"timeout": 600.0},
)
response = await async_handler.post( response = await async_handler.post(
url=model, headers=headers, data=json.dumps(data) url=model, headers=headers, data=json.dumps(data)
) )

View file

@ -11,7 +11,11 @@ import requests # type: ignore
import litellm import litellm
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
HTTPHandler,
get_async_httpx_client,
)
from litellm.types.llms.bedrock import CohereEmbeddingRequest from litellm.types.llms.bedrock import CohereEmbeddingRequest
from litellm.utils import Choices, Message, ModelResponse, Usage from litellm.utils import Choices, Message, ModelResponse, Usage
@ -71,7 +75,10 @@ async def async_embedding(
) )
## COMPLETION CALL ## COMPLETION CALL
if client is None: if client is None:
client = AsyncHTTPHandler(concurrent_limit=1, timeout=timeout) client = get_async_httpx_client(
llm_provider=litellm.LlmProviders.COHERE,
params={"timeout": timeout},
)
try: try:
response = await client.post(api_base, headers=headers, data=json.dumps(data)) response = await client.post(api_base, headers=headers, data=json.dumps(data))

View file

@ -7,6 +7,7 @@ import httpx
from httpx import USE_CLIENT_DEFAULT, AsyncHTTPTransport, HTTPTransport from httpx import USE_CLIENT_DEFAULT, AsyncHTTPTransport, HTTPTransport
import litellm import litellm
from litellm.caching import InMemoryCache
from .types import httpxSpecialProvider from .types import httpxSpecialProvider
@ -26,6 +27,7 @@ headers = {
# https://www.python-httpx.org/advanced/timeouts # https://www.python-httpx.org/advanced/timeouts
_DEFAULT_TIMEOUT = httpx.Timeout(timeout=5.0, connect=5.0) _DEFAULT_TIMEOUT = httpx.Timeout(timeout=5.0, connect=5.0)
_DEFAULT_TTL_FOR_HTTPX_CLIENTS = 3600 # 1 hour, re-use the same httpx client for 1 hour
class AsyncHTTPHandler: class AsyncHTTPHandler:
@ -476,8 +478,9 @@ def get_async_httpx_client(
pass pass
_cache_key_name = "async_httpx_client" + _params_key_name + llm_provider _cache_key_name = "async_httpx_client" + _params_key_name + llm_provider
if _cache_key_name in litellm.in_memory_llm_clients_cache: _cached_client = litellm.in_memory_llm_clients_cache.get_cache(_cache_key_name)
return litellm.in_memory_llm_clients_cache[_cache_key_name] if _cached_client:
return _cached_client
if params is not None: if params is not None:
_new_client = AsyncHTTPHandler(**params) _new_client = AsyncHTTPHandler(**params)
@ -485,7 +488,11 @@ def get_async_httpx_client(
_new_client = AsyncHTTPHandler( _new_client = AsyncHTTPHandler(
timeout=httpx.Timeout(timeout=600.0, connect=5.0) timeout=httpx.Timeout(timeout=600.0, connect=5.0)
) )
litellm.in_memory_llm_clients_cache[_cache_key_name] = _new_client litellm.in_memory_llm_clients_cache.set_cache(
key=_cache_key_name,
value=_new_client,
ttl=_DEFAULT_TTL_FOR_HTTPX_CLIENTS,
)
return _new_client return _new_client
@ -505,13 +512,18 @@ def _get_httpx_client(params: Optional[dict] = None) -> HTTPHandler:
pass pass
_cache_key_name = "httpx_client" + _params_key_name _cache_key_name = "httpx_client" + _params_key_name
if _cache_key_name in litellm.in_memory_llm_clients_cache: _cached_client = litellm.in_memory_llm_clients_cache.get_cache(_cache_key_name)
return litellm.in_memory_llm_clients_cache[_cache_key_name] if _cached_client:
return _cached_client
if params is not None: if params is not None:
_new_client = HTTPHandler(**params) _new_client = HTTPHandler(**params)
else: else:
_new_client = HTTPHandler(timeout=httpx.Timeout(timeout=600.0, connect=5.0)) _new_client = HTTPHandler(timeout=httpx.Timeout(timeout=600.0, connect=5.0))
litellm.in_memory_llm_clients_cache[_cache_key_name] = _new_client litellm.in_memory_llm_clients_cache.set_cache(
key=_cache_key_name,
value=_new_client,
ttl=_DEFAULT_TTL_FOR_HTTPX_CLIENTS,
)
return _new_client return _new_client

View file

@ -393,7 +393,10 @@ class DatabricksChatCompletion(BaseLLM):
if timeout is None: if timeout is None:
timeout = httpx.Timeout(timeout=600.0, connect=5.0) timeout = httpx.Timeout(timeout=600.0, connect=5.0)
self.async_handler = AsyncHTTPHandler(timeout=timeout) self.async_handler = get_async_httpx_client(
llm_provider=litellm.LlmProviders.DATABRICKS,
params={"timeout": timeout},
)
try: try:
response = await self.async_handler.post( response = await self.async_handler.post(
@ -610,7 +613,10 @@ class DatabricksChatCompletion(BaseLLM):
response = None response = None
try: try:
if client is None or isinstance(client, AsyncHTTPHandler): if client is None or isinstance(client, AsyncHTTPHandler):
self.async_client = AsyncHTTPHandler(timeout=timeout) # type: ignore self.async_client = get_async_httpx_client(
llm_provider=litellm.LlmProviders.DATABRICKS,
params={"timeout": timeout},
)
else: else:
self.async_client = client self.async_client = client

View file

@ -5,9 +5,14 @@ from typing import Any, Coroutine, Literal, Optional, Union
import httpx import httpx
from openai.types.fine_tuning.fine_tuning_job import FineTuningJob, Hyperparameters from openai.types.fine_tuning.fine_tuning_job import FineTuningJob, Hyperparameters
import litellm
from litellm._logging import verbose_logger from litellm._logging import verbose_logger
from litellm.llms.base import BaseLLM from litellm.llms.base import BaseLLM
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
HTTPHandler,
get_async_httpx_client,
)
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import ( from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
VertexLLM, VertexLLM,
) )
@ -26,8 +31,9 @@ class VertexFineTuningAPI(VertexLLM):
def __init__(self) -> None: def __init__(self) -> None:
super().__init__() super().__init__()
self.async_handler = AsyncHTTPHandler( self.async_handler = get_async_httpx_client(
timeout=httpx.Timeout(timeout=600.0, connect=5.0) llm_provider=litellm.LlmProviders.VERTEX_AI,
params={"timeout": 600.0},
) )
def convert_response_created_at(self, response: ResponseTuningJob): def convert_response_created_at(self, response: ResponseTuningJob):

View file

@ -6,55 +6,68 @@ from typing import Any, Callable, Optional, Union
from httpx._config import Timeout from httpx._config import Timeout
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
from litellm.types.utils import CustomStreamingDecoder
from litellm.utils import ModelResponse from litellm.utils import ModelResponse
from ...groq.chat.transformation import GroqChatConfig from ...groq.chat.transformation import GroqChatConfig
from ...OpenAI.openai import OpenAIChatCompletion from ...openai_like.chat.handler import OpenAILikeChatHandler
class GroqChatCompletion(OpenAIChatCompletion): class GroqChatCompletion(OpenAILikeChatHandler):
def __init__(self, **kwargs): def __init__(self, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
def completion( def completion(
self, self,
*,
model: str,
messages: list,
api_base: str,
custom_llm_provider: str,
custom_prompt_dict: dict,
model_response: ModelResponse, model_response: ModelResponse,
timeout: Union[float, Timeout], print_verbose: Callable,
encoding,
api_key: Optional[str],
logging_obj,
optional_params: dict, optional_params: dict,
logging_obj: Any, acompletion=None,
model: Optional[str] = None,
messages: Optional[list] = None,
print_verbose: Optional[Callable[..., Any]] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
acompletion: bool = False,
litellm_params=None, litellm_params=None,
logger_fn=None, logger_fn=None,
headers: Optional[dict] = None, headers: Optional[dict] = None,
custom_prompt_dict: dict = {}, timeout: Optional[Union[float, Timeout]] = None,
client=None, client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
organization: Optional[str] = None, custom_endpoint: Optional[bool] = None,
custom_llm_provider: Optional[str] = None, streaming_decoder: Optional[CustomStreamingDecoder] = None,
drop_params: Optional[bool] = None, fake_stream: bool = False
): ):
messages = GroqChatConfig()._transform_messages(messages) # type: ignore messages = GroqChatConfig()._transform_messages(messages) # type: ignore
if optional_params.get("stream") is True:
fake_stream = GroqChatConfig()._should_fake_stream(optional_params)
else:
fake_stream = False
return super().completion( return super().completion(
model_response, model=model,
timeout, messages=messages,
optional_params, api_base=api_base,
logging_obj, custom_llm_provider=custom_llm_provider,
model, custom_prompt_dict=custom_prompt_dict,
messages, model_response=model_response,
print_verbose, print_verbose=print_verbose,
api_key, encoding=encoding,
api_base, api_key=api_key,
acompletion, logging_obj=logging_obj,
litellm_params, optional_params=optional_params,
logger_fn, acompletion=acompletion,
headers, litellm_params=litellm_params,
custom_prompt_dict, logger_fn=logger_fn,
client, headers=headers,
organization, timeout=timeout,
custom_llm_provider, client=client,
drop_params, custom_endpoint=custom_endpoint,
streaming_decoder=streaming_decoder,
fake_stream=fake_stream,
) )

View file

@ -2,6 +2,7 @@
Translate from OpenAI's `/v1/chat/completions` to Groq's `/v1/chat/completions` Translate from OpenAI's `/v1/chat/completions` to Groq's `/v1/chat/completions`
""" """
import json
import types import types
from typing import List, Optional, Tuple, Union from typing import List, Optional, Tuple, Union
@ -9,7 +10,12 @@ from pydantic import BaseModel
import litellm import litellm
from litellm.secret_managers.main import get_secret_str from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import AllMessageValues, ChatCompletionAssistantMessage from litellm.types.llms.openai import (
AllMessageValues,
ChatCompletionAssistantMessage,
ChatCompletionToolParam,
ChatCompletionToolParamFunctionChunk,
)
from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig
@ -99,3 +105,69 @@ class GroqChatConfig(OpenAIGPTConfig):
) # type: ignore ) # type: ignore
dynamic_api_key = api_key or get_secret_str("GROQ_API_KEY") dynamic_api_key = api_key or get_secret_str("GROQ_API_KEY")
return api_base, dynamic_api_key return api_base, dynamic_api_key
def _should_fake_stream(self, optional_params: dict) -> bool:
"""
Groq doesn't support 'response_format' while streaming
"""
if optional_params.get("response_format") is not None:
return True
return False
def _create_json_tool_call_for_response_format(
self,
json_schema: dict,
):
"""
Handles creating a tool call for getting responses in JSON format.
Args:
json_schema (Optional[dict]): The JSON schema the response should be in
Returns:
AnthropicMessagesTool: The tool call to send to Anthropic API to get responses in JSON format
"""
return ChatCompletionToolParam(
type="function",
function=ChatCompletionToolParamFunctionChunk(
name="json_tool_call",
parameters=json_schema,
),
)
def map_openai_params(
self,
non_default_params: dict,
optional_params: dict,
model: str,
drop_params: bool = False,
) -> dict:
_response_format = non_default_params.get("response_format")
if _response_format is not None and isinstance(_response_format, dict):
json_schema: Optional[dict] = None
if "response_schema" in _response_format:
json_schema = _response_format["response_schema"]
elif "json_schema" in _response_format:
json_schema = _response_format["json_schema"]["schema"]
"""
When using tools in this way: - https://docs.anthropic.com/en/docs/build-with-claude/tool-use#json-mode
- You usually want to provide a single tool
- You should set tool_choice (see Forcing tool use) to instruct the model to explicitly use that tool
- Remember that the model will pass the input to the tool, so the name of the tool and description should be from the models perspective.
"""
if json_schema is not None:
_tool_choice = {
"type": "function",
"function": {"name": "json_tool_call"},
}
_tool = self._create_json_tool_call_for_response_format(
json_schema=json_schema,
)
optional_params["tools"] = [_tool]
optional_params["tool_choice"] = _tool_choice
optional_params["json_mode"] = True
non_default_params.pop("response_format", None)
return super().map_openai_params(
non_default_params, optional_params, model, drop_params
)

View file

@ -263,7 +263,11 @@ def get_hf_task_for_model(model: str) -> Tuple[hf_tasks, str]:
return "text-generation-inference", model # default to tgi return "text-generation-inference", model # default to tgi
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
HTTPHandler,
get_async_httpx_client,
)
def get_hf_task_embedding_for_model( def get_hf_task_embedding_for_model(
@ -301,7 +305,9 @@ async def async_get_hf_task_embedding_for_model(
task_type, hf_tasks_embeddings task_type, hf_tasks_embeddings
) )
) )
http_client = AsyncHTTPHandler(concurrent_limit=1) http_client = get_async_httpx_client(
llm_provider=litellm.LlmProviders.HUGGINGFACE,
)
model_info = await http_client.get(url=api_base) model_info = await http_client.get(url=api_base)
@ -1067,7 +1073,9 @@ class Huggingface(BaseLLM):
) )
## COMPLETION CALL ## COMPLETION CALL
if client is None: if client is None:
client = AsyncHTTPHandler(concurrent_limit=1) client = get_async_httpx_client(
llm_provider=litellm.LlmProviders.HUGGINGFACE,
)
response = await client.post(api_base, headers=headers, data=json.dumps(data)) response = await client.post(api_base, headers=headers, data=json.dumps(data))

View file

@ -164,6 +164,30 @@ class OllamaConfig:
"response_format", "response_format",
] ]
def map_openai_params(
self, optional_params: dict, non_default_params: dict
) -> dict:
for param, value in non_default_params.items():
if param == "max_tokens":
optional_params["num_predict"] = value
if param == "stream":
optional_params["stream"] = value
if param == "temperature":
optional_params["temperature"] = value
if param == "seed":
optional_params["seed"] = value
if param == "top_p":
optional_params["top_p"] = value
if param == "frequency_penalty":
optional_params["repeat_penalty"] = value
if param == "stop":
optional_params["stop"] = value
if param == "response_format" and isinstance(value, dict):
if value["type"] == "json_object":
optional_params["format"] = "json"
return optional_params
def _supports_function_calling(self, ollama_model_info: dict) -> bool: def _supports_function_calling(self, ollama_model_info: dict) -> bool:
""" """
Check if the 'template' field in the ollama_model_info contains a 'tools' or 'function' key. Check if the 'template' field in the ollama_model_info contains a 'tools' or 'function' key.

View file

@ -17,7 +17,9 @@ import httpx # type: ignore
import requests # type: ignore import requests # type: ignore
import litellm import litellm
from litellm import LlmProviders
from litellm.litellm_core_utils.core_helpers import map_finish_reason from litellm.litellm_core_utils.core_helpers import map_finish_reason
from litellm.llms.bedrock.chat.invoke_handler import MockResponseIterator
from litellm.llms.custom_httpx.http_handler import ( from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler, AsyncHTTPHandler,
HTTPHandler, HTTPHandler,
@ -25,9 +27,19 @@ from litellm.llms.custom_httpx.http_handler import (
) )
from litellm.llms.databricks.streaming_utils import ModelResponseIterator from litellm.llms.databricks.streaming_utils import ModelResponseIterator
from litellm.types.utils import CustomStreamingDecoder, ModelResponse from litellm.types.utils import CustomStreamingDecoder, ModelResponse
from litellm.utils import CustomStreamWrapper, EmbeddingResponse from litellm.utils import (
Choices,
CustomStreamWrapper,
EmbeddingResponse,
Message,
ProviderConfigManager,
TextCompletionResponse,
Usage,
convert_to_model_response_object,
)
from ..common_utils import OpenAILikeBase, OpenAILikeError from ..common_utils import OpenAILikeBase, OpenAILikeError
from .transformation import OpenAILikeChatConfig
async def make_call( async def make_call(
@ -39,16 +51,22 @@ async def make_call(
messages: list, messages: list,
logging_obj, logging_obj,
streaming_decoder: Optional[CustomStreamingDecoder] = None, streaming_decoder: Optional[CustomStreamingDecoder] = None,
fake_stream: bool = False,
): ):
if client is None: if client is None:
client = litellm.module_level_aclient client = litellm.module_level_aclient
response = await client.post(api_base, headers=headers, data=data, stream=True) response = await client.post(
api_base, headers=headers, data=data, stream=not fake_stream
)
if streaming_decoder is not None: if streaming_decoder is not None:
completion_stream: Any = streaming_decoder.aiter_bytes( completion_stream: Any = streaming_decoder.aiter_bytes(
response.aiter_bytes(chunk_size=1024) response.aiter_bytes(chunk_size=1024)
) )
elif fake_stream:
model_response = ModelResponse(**response.json())
completion_stream = MockResponseIterator(model_response=model_response)
else: else:
completion_stream = ModelResponseIterator( completion_stream = ModelResponseIterator(
streaming_response=response.aiter_lines(), sync_stream=False streaming_response=response.aiter_lines(), sync_stream=False
@ -73,11 +91,12 @@ def make_sync_call(
messages: list, messages: list,
logging_obj, logging_obj,
streaming_decoder: Optional[CustomStreamingDecoder] = None, streaming_decoder: Optional[CustomStreamingDecoder] = None,
fake_stream: bool = False,
): ):
if client is None: if client is None:
client = litellm.module_level_client # Create a new client if none provided client = litellm.module_level_client # Create a new client if none provided
response = client.post(api_base, headers=headers, data=data, stream=True) response = client.post(api_base, headers=headers, data=data, stream=not fake_stream)
if response.status_code != 200: if response.status_code != 200:
raise OpenAILikeError(status_code=response.status_code, message=response.read()) raise OpenAILikeError(status_code=response.status_code, message=response.read())
@ -86,6 +105,9 @@ def make_sync_call(
completion_stream = streaming_decoder.iter_bytes( completion_stream = streaming_decoder.iter_bytes(
response.iter_bytes(chunk_size=1024) response.iter_bytes(chunk_size=1024)
) )
elif fake_stream:
model_response = ModelResponse(**response.json())
completion_stream = MockResponseIterator(model_response=model_response)
else: else:
completion_stream = ModelResponseIterator( completion_stream = ModelResponseIterator(
streaming_response=response.iter_lines(), sync_stream=True streaming_response=response.iter_lines(), sync_stream=True
@ -126,8 +148,8 @@ class OpenAILikeChatHandler(OpenAILikeBase):
headers={}, headers={},
client: Optional[AsyncHTTPHandler] = None, client: Optional[AsyncHTTPHandler] = None,
streaming_decoder: Optional[CustomStreamingDecoder] = None, streaming_decoder: Optional[CustomStreamingDecoder] = None,
fake_stream: bool = False,
) -> CustomStreamWrapper: ) -> CustomStreamWrapper:
data["stream"] = True data["stream"] = True
completion_stream = await make_call( completion_stream = await make_call(
client=client, client=client,
@ -169,6 +191,7 @@ class OpenAILikeChatHandler(OpenAILikeBase):
logger_fn=None, logger_fn=None,
headers={}, headers={},
timeout: Optional[Union[float, httpx.Timeout]] = None, timeout: Optional[Union[float, httpx.Timeout]] = None,
json_mode: bool = False,
) -> ModelResponse: ) -> ModelResponse:
if timeout is None: if timeout is None:
timeout = httpx.Timeout(timeout=600.0, connect=5.0) timeout = httpx.Timeout(timeout=600.0, connect=5.0)
@ -181,8 +204,6 @@ class OpenAILikeChatHandler(OpenAILikeBase):
api_base, headers=headers, data=json.dumps(data), timeout=timeout api_base, headers=headers, data=json.dumps(data), timeout=timeout
) )
response.raise_for_status() response.raise_for_status()
response_json = response.json()
except httpx.HTTPStatusError as e: except httpx.HTTPStatusError as e:
raise OpenAILikeError( raise OpenAILikeError(
status_code=e.response.status_code, status_code=e.response.status_code,
@ -193,22 +214,26 @@ class OpenAILikeChatHandler(OpenAILikeBase):
except Exception as e: except Exception as e:
raise OpenAILikeError(status_code=500, message=str(e)) raise OpenAILikeError(status_code=500, message=str(e))
logging_obj.post_call( return OpenAILikeChatConfig._transform_response(
input=messages, model=model,
api_key="", response=response,
original_response=response_json, model_response=model_response,
additional_args={"complete_input_dict": data}, stream=stream,
logging_obj=logging_obj,
optional_params=optional_params,
api_key=api_key,
data=data,
messages=messages,
print_verbose=print_verbose,
encoding=encoding,
json_mode=json_mode,
custom_llm_provider=custom_llm_provider,
base_model=base_model,
) )
response = ModelResponse(**response_json)
response.model = custom_llm_provider + "/" + (response.model or "")
if base_model is not None:
response._hidden_params["model"] = base_model
return response
def completion( def completion(
self, self,
*,
model: str, model: str,
messages: list, messages: list,
api_base: str, api_base: str,
@ -230,6 +255,7 @@ class OpenAILikeChatHandler(OpenAILikeBase):
streaming_decoder: Optional[ streaming_decoder: Optional[
CustomStreamingDecoder CustomStreamingDecoder
] = None, # if openai-compatible api needs custom stream decoder - e.g. sagemaker ] = None, # if openai-compatible api needs custom stream decoder - e.g. sagemaker
fake_stream: bool = False,
): ):
custom_endpoint = custom_endpoint or optional_params.pop( custom_endpoint = custom_endpoint or optional_params.pop(
"custom_endpoint", None "custom_endpoint", None
@ -243,13 +269,24 @@ class OpenAILikeChatHandler(OpenAILikeBase):
headers=headers, headers=headers,
) )
stream: bool = optional_params.get("stream", None) or False stream: bool = optional_params.pop("stream", None) or False
optional_params["stream"] = stream extra_body = optional_params.pop("extra_body", {})
json_mode = optional_params.pop("json_mode", None)
optional_params.pop("max_retries", None)
if not fake_stream:
optional_params["stream"] = stream
if messages is not None and custom_llm_provider is not None:
provider_config = ProviderConfigManager.get_provider_config(
model=model, provider=LlmProviders(custom_llm_provider)
)
messages = provider_config._transform_messages(messages)
data = { data = {
"model": model, "model": model,
"messages": messages, "messages": messages,
**optional_params, **optional_params,
**extra_body,
} }
## LOGGING ## LOGGING
@ -288,6 +325,7 @@ class OpenAILikeChatHandler(OpenAILikeBase):
client=client, client=client,
custom_llm_provider=custom_llm_provider, custom_llm_provider=custom_llm_provider,
streaming_decoder=streaming_decoder, streaming_decoder=streaming_decoder,
fake_stream=fake_stream,
) )
else: else:
return self.acompletion_function( return self.acompletion_function(
@ -327,6 +365,7 @@ class OpenAILikeChatHandler(OpenAILikeBase):
messages=messages, messages=messages,
logging_obj=logging_obj, logging_obj=logging_obj,
streaming_decoder=streaming_decoder, streaming_decoder=streaming_decoder,
fake_stream=fake_stream,
) )
# completion_stream.__iter__() # completion_stream.__iter__()
return CustomStreamWrapper( return CustomStreamWrapper(
@ -344,7 +383,6 @@ class OpenAILikeChatHandler(OpenAILikeBase):
) )
response.raise_for_status() response.raise_for_status()
response_json = response.json()
except httpx.HTTPStatusError as e: except httpx.HTTPStatusError as e:
raise OpenAILikeError( raise OpenAILikeError(
status_code=e.response.status_code, status_code=e.response.status_code,
@ -356,17 +394,19 @@ class OpenAILikeChatHandler(OpenAILikeBase):
) )
except Exception as e: except Exception as e:
raise OpenAILikeError(status_code=500, message=str(e)) raise OpenAILikeError(status_code=500, message=str(e))
logging_obj.post_call( return OpenAILikeChatConfig._transform_response(
input=messages, model=model,
api_key="", response=response,
original_response=response_json, model_response=model_response,
additional_args={"complete_input_dict": data}, stream=stream,
logging_obj=logging_obj,
optional_params=optional_params,
api_key=api_key,
data=data,
messages=messages,
print_verbose=print_verbose,
encoding=encoding,
json_mode=json_mode,
custom_llm_provider=custom_llm_provider,
base_model=base_model,
) )
response = ModelResponse(**response_json)
response.model = custom_llm_provider + "/" + (response.model or "")
if base_model is not None:
response._hidden_params["model"] = base_model
return response

View file

@ -0,0 +1,98 @@
"""
OpenAI-like chat completion transformation
"""
import types
from typing import List, Optional, Tuple, Union
import httpx
from pydantic import BaseModel
import litellm
from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import AllMessageValues, ChatCompletionAssistantMessage
from litellm.types.utils import ModelResponse
from ....utils import _remove_additional_properties, _remove_strict_from_schema
from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig
class OpenAILikeChatConfig(OpenAIGPTConfig):
def _get_openai_compatible_provider_info(
self, api_base: Optional[str], api_key: Optional[str]
) -> Tuple[Optional[str], Optional[str]]:
api_base = api_base or get_secret_str("OPENAI_LIKE_API_BASE") # type: ignore
dynamic_api_key = (
api_key or get_secret_str("OPENAI_LIKE_API_KEY") or ""
) # vllm does not require an api key
return api_base, dynamic_api_key
@staticmethod
def _convert_tool_response_to_message(
message: ChatCompletionAssistantMessage, json_mode: bool
) -> ChatCompletionAssistantMessage:
"""
if json_mode is true, convert the returned tool call response to a content with json str
e.g. input:
{"role": "assistant", "tool_calls": [{"id": "call_5ms4", "type": "function", "function": {"name": "json_tool_call", "arguments": "{\"key\": \"question\", \"value\": \"What is the capital of France?\"}"}}]}
output:
{"role": "assistant", "content": "{\"key\": \"question\", \"value\": \"What is the capital of France?\"}"}
"""
if not json_mode:
return message
_tool_calls = message.get("tool_calls")
if _tool_calls is None or len(_tool_calls) != 1:
return message
message["content"] = _tool_calls[0]["function"].get("arguments") or ""
message["tool_calls"] = None
return message
@staticmethod
def _transform_response(
model: str,
response: httpx.Response,
model_response: ModelResponse,
stream: bool,
logging_obj: litellm.litellm_core_utils.litellm_logging.Logging, # type: ignore
optional_params: dict,
api_key: Optional[str],
data: Union[dict, str],
messages: List,
print_verbose,
encoding,
json_mode: bool,
custom_llm_provider: str,
base_model: Optional[str],
) -> ModelResponse:
response_json = response.json()
logging_obj.post_call(
input=messages,
api_key="",
original_response=response_json,
additional_args={"complete_input_dict": data},
)
if json_mode:
for choice in response_json["choices"]:
message = OpenAILikeChatConfig._convert_tool_response_to_message(
choice.get("message"), json_mode
)
choice["message"] = message
returned_response = ModelResponse(**response_json)
returned_response.model = (
custom_llm_provider + "/" + (returned_response.model or "")
)
if base_model is not None:
returned_response._hidden_params["model"] = base_model
return returned_response

View file

@ -45,7 +45,10 @@ class OpenAILikeEmbeddingHandler(OpenAILikeBase):
response = None response = None
try: try:
if client is None or isinstance(client, AsyncHTTPHandler): if client is None or isinstance(client, AsyncHTTPHandler):
self.async_client = AsyncHTTPHandler(timeout=timeout) # type: ignore self.async_client = get_async_httpx_client(
llm_provider=litellm.LlmProviders.OPENAI,
params={"timeout": timeout},
)
else: else:
self.async_client = client self.async_client = client
@ -62,7 +65,7 @@ class OpenAILikeEmbeddingHandler(OpenAILikeBase):
except httpx.HTTPStatusError as e: except httpx.HTTPStatusError as e:
raise OpenAILikeError( raise OpenAILikeError(
status_code=e.response.status_code, status_code=e.response.status_code,
message=response.text if response else str(e), message=e.response.text if e.response else str(e),
) )
except httpx.TimeoutException: except httpx.TimeoutException:
raise OpenAILikeError( raise OpenAILikeError(

View file

@ -19,7 +19,10 @@ import litellm.litellm_core_utils
import litellm.litellm_core_utils.litellm_logging import litellm.litellm_core_utils.litellm_logging
from litellm import verbose_logger from litellm import verbose_logger
from litellm.litellm_core_utils.core_helpers import map_finish_reason from litellm.litellm_core_utils.core_helpers import map_finish_reason
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
get_async_httpx_client,
)
from litellm.utils import Choices, CustomStreamWrapper, Message, ModelResponse, Usage from litellm.utils import Choices, CustomStreamWrapper, Message, ModelResponse, Usage
from .base import BaseLLM from .base import BaseLLM
@ -549,7 +552,10 @@ class PredibaseChatCompletion(BaseLLM):
headers={}, headers={},
) -> ModelResponse: ) -> ModelResponse:
async_handler = AsyncHTTPHandler(timeout=httpx.Timeout(timeout=timeout)) async_handler = get_async_httpx_client(
llm_provider=litellm.LlmProviders.PREDIBASE,
params={"timeout": timeout},
)
try: try:
response = await async_handler.post( response = await async_handler.post(
api_base, headers=headers, data=json.dumps(data) api_base, headers=headers, data=json.dumps(data)

View file

@ -943,17 +943,10 @@ def _gemini_tool_call_invoke_helper(
name = function_call_params.get("name", "") or "" name = function_call_params.get("name", "") or ""
arguments = function_call_params.get("arguments", "") arguments = function_call_params.get("arguments", "")
arguments_dict = json.loads(arguments) arguments_dict = json.loads(arguments)
function_call: Optional[litellm.types.llms.vertex_ai.FunctionCall] = None function_call = litellm.types.llms.vertex_ai.FunctionCall(
for k, v in arguments_dict.items(): name=name,
inferred_protocol_value = infer_protocol_value(value=v) args=arguments_dict,
_field = litellm.types.llms.vertex_ai.Field( )
key=k, value={inferred_protocol_value: v}
)
_fields = litellm.types.llms.vertex_ai.FunctionCallArgs(fields=_field)
function_call = litellm.types.llms.vertex_ai.FunctionCall(
name=name,
args=_fields,
)
return function_call return function_call
@ -978,54 +971,26 @@ def convert_to_gemini_tool_call_invoke(
}, },
""" """
""" """
Gemini tool call invokes: - https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling#submit-api-output Gemini tool call invokes:
content { {
role: "model" "role": "model",
parts [ "parts": [
{ {
function_call { "functionCall": {
name: "get_current_weather" "name": "get_current_weather",
args { "args": {
fields { "unit": "fahrenheit",
key: "unit" "predicted_temperature": 45,
value { "location": "Boston, MA",
string_value: "fahrenheit"
}
}
fields {
key: "predicted_temperature"
value {
number_value: 45
}
}
fields {
key: "location"
value {
string_value: "Boston, MA"
}
}
}
},
{
function_call {
name: "get_current_weather"
args {
fields {
key: "location"
value {
string_value: "San Francisco"
}
}
}
} }
}
} }
] ]
} }
""" """
""" """
- json.load the arguments - json.load the arguments
- iterate through arguments -> create a FunctionCallArgs for each field
""" """
try: try:
_parts_list: List[litellm.types.llms.vertex_ai.PartType] = [] _parts_list: List[litellm.types.llms.vertex_ai.PartType] = []
@ -1128,16 +1093,8 @@ def convert_to_gemini_tool_call_result(
# We can't determine from openai message format whether it's a successful or # We can't determine from openai message format whether it's a successful or
# error call result so default to the successful result template # error call result so default to the successful result template
inferred_content_value = infer_protocol_value(value=content_str)
_field = litellm.types.llms.vertex_ai.Field(
key="content", value={inferred_content_value: content_str}
)
_function_call_args = litellm.types.llms.vertex_ai.FunctionCallArgs(fields=_field)
_function_response = litellm.types.llms.vertex_ai.FunctionResponse( _function_response = litellm.types.llms.vertex_ai.FunctionResponse(
name=name, response=_function_call_args # type: ignore name=name, response={"content": content_str} # type: ignore
) )
_part = litellm.types.llms.vertex_ai.PartType(function_response=_function_response) _part = litellm.types.llms.vertex_ai.PartType(function_response=_function_response)

View file

@ -9,7 +9,10 @@ import httpx # type: ignore
import requests # type: ignore import requests # type: ignore
import litellm import litellm
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
get_async_httpx_client,
)
from litellm.utils import CustomStreamWrapper, ModelResponse, Usage from litellm.utils import CustomStreamWrapper, ModelResponse, Usage
from .prompt_templates.factory import custom_prompt, prompt_factory from .prompt_templates.factory import custom_prompt, prompt_factory
@ -325,7 +328,7 @@ def handle_prediction_response_streaming(prediction_url, api_token, print_verbos
async def async_handle_prediction_response_streaming( async def async_handle_prediction_response_streaming(
prediction_url, api_token, print_verbose prediction_url, api_token, print_verbose
): ):
http_handler = AsyncHTTPHandler(concurrent_limit=1) http_handler = get_async_httpx_client(llm_provider=litellm.LlmProviders.REPLICATE)
previous_output = "" previous_output = ""
output_string = "" output_string = ""
@ -560,7 +563,9 @@ async def async_completion(
logging_obj, logging_obj,
print_verbose, print_verbose,
) -> Union[ModelResponse, CustomStreamWrapper]: ) -> Union[ModelResponse, CustomStreamWrapper]:
http_handler = AsyncHTTPHandler(concurrent_limit=1) http_handler = get_async_httpx_client(
llm_provider=litellm.LlmProviders.REPLICATE,
)
prediction_url = await async_start_prediction( prediction_url = await async_start_prediction(
version_id, version_id,
input_data, input_data,

View file

@ -18,7 +18,10 @@ import litellm
from litellm import verbose_logger from litellm import verbose_logger
from litellm.litellm_core_utils.core_helpers import map_finish_reason from litellm.litellm_core_utils.core_helpers import map_finish_reason
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLogging from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLogging
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
get_async_httpx_client,
)
from litellm.types.llms.databricks import GenericStreamingChunk from litellm.types.llms.databricks import GenericStreamingChunk
from litellm.utils import ( from litellm.utils import (
Choices, Choices,
@ -479,8 +482,9 @@ class CodestralTextCompletion(BaseLLM):
headers={}, headers={},
) -> TextCompletionResponse: ) -> TextCompletionResponse:
async_handler = AsyncHTTPHandler( async_handler = get_async_httpx_client(
timeout=httpx.Timeout(timeout=timeout), concurrent_limit=1 llm_provider=litellm.LlmProviders.TEXT_COMPLETION_CODESTRAL,
params={"timeout": timeout},
) )
try: try:

View file

@ -8,7 +8,11 @@ import httpx # type: ignore
import requests # type: ignore import requests # type: ignore
import litellm import litellm
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
HTTPHandler,
get_async_httpx_client,
)
from litellm.utils import ( from litellm.utils import (
Choices, Choices,
CustomStreamWrapper, CustomStreamWrapper,
@ -50,8 +54,8 @@ class TritonChatCompletion(BaseLLM):
logging_obj: Any, logging_obj: Any,
api_key: Optional[str] = None, api_key: Optional[str] = None,
) -> EmbeddingResponse: ) -> EmbeddingResponse:
async_handler = AsyncHTTPHandler( async_handler = get_async_httpx_client(
timeout=httpx.Timeout(timeout=600.0, connect=5.0) llm_provider=litellm.LlmProviders.TRITON, params={"timeout": 600.0}
) )
response = await async_handler.post(url=api_base, data=json.dumps(data)) response = await async_handler.post(url=api_base, data=json.dumps(data))
@ -261,7 +265,9 @@ class TritonChatCompletion(BaseLLM):
model_response, model_response,
type_of_model, type_of_model,
) -> ModelResponse: ) -> ModelResponse:
handler = AsyncHTTPHandler() handler = get_async_httpx_client(
llm_provider=litellm.LlmProviders.TRITON, params={"timeout": 600.0}
)
if stream: if stream:
return self._ahandle_stream( # type: ignore return self._ahandle_stream( # type: ignore
handler, api_base, data_for_triton, model, logging_obj handler, api_base, data_for_triton, model, logging_obj

View file

@ -6,7 +6,11 @@ import httpx
import litellm import litellm
from litellm.caching.caching import Cache, LiteLLMCacheType from litellm.caching.caching import Cache, LiteLLMCacheType
from litellm.litellm_core_utils.litellm_logging import Logging from litellm.litellm_core_utils.litellm_logging import Logging
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
HTTPHandler,
get_async_httpx_client,
)
from litellm.llms.OpenAI.openai import AllMessageValues from litellm.llms.OpenAI.openai import AllMessageValues
from litellm.types.llms.vertex_ai import ( from litellm.types.llms.vertex_ai import (
CachedContentListAllResponseBody, CachedContentListAllResponseBody,
@ -331,6 +335,13 @@ class ContextCachingEndpoints(VertexBase):
if cached_content is not None: if cached_content is not None:
return messages, cached_content return messages, cached_content
cached_messages, non_cached_messages = separate_cached_messages(
messages=messages
)
if len(cached_messages) == 0:
return messages, None
## AUTHORIZATION ## ## AUTHORIZATION ##
token, url = self._get_token_and_url_context_caching( token, url = self._get_token_and_url_context_caching(
gemini_api_key=api_key, gemini_api_key=api_key,
@ -347,22 +358,12 @@ class ContextCachingEndpoints(VertexBase):
headers.update(extra_headers) headers.update(extra_headers)
if client is None or not isinstance(client, AsyncHTTPHandler): if client is None or not isinstance(client, AsyncHTTPHandler):
_params = {} client = get_async_httpx_client(
if timeout is not None: params={"timeout": timeout}, llm_provider=litellm.LlmProviders.VERTEX_AI
if isinstance(timeout, float) or isinstance(timeout, int): )
timeout = httpx.Timeout(timeout)
_params["timeout"] = timeout
client = AsyncHTTPHandler(**_params) # type: ignore
else: else:
client = client client = client
cached_messages, non_cached_messages = separate_cached_messages(
messages=messages
)
if len(cached_messages) == 0:
return messages, None
## CHECK IF CACHED ALREADY ## CHECK IF CACHED ALREADY
generated_cache_key = local_cache_obj.get_cache_key(messages=cached_messages) generated_cache_key = local_cache_obj.get_cache_key(messages=cached_messages)
google_cache_name = await self.async_check_cache( google_cache_name = await self.async_check_cache(

View file

@ -1026,7 +1026,9 @@ async def make_call(
logging_obj, logging_obj,
): ):
if client is None: if client is None:
client = AsyncHTTPHandler() # Create a new client if none provided client = get_async_httpx_client(
llm_provider=litellm.LlmProviders.VERTEX_AI,
)
try: try:
response = await client.post(api_base, headers=headers, data=data, stream=True) response = await client.post(api_base, headers=headers, data=data, stream=True)

View file

@ -7,8 +7,13 @@ from typing import Any, List, Literal, Optional, Union
import httpx import httpx
import litellm
from litellm import EmbeddingResponse from litellm import EmbeddingResponse
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
HTTPHandler,
get_async_httpx_client,
)
from litellm.types.llms.openai import EmbeddingInput from litellm.types.llms.openai import EmbeddingInput
from litellm.types.llms.vertex_ai import ( from litellm.types.llms.vertex_ai import (
VertexAIBatchEmbeddingsRequestBody, VertexAIBatchEmbeddingsRequestBody,
@ -150,7 +155,10 @@ class GoogleBatchEmbeddings(VertexLLM):
else: else:
_params["timeout"] = httpx.Timeout(timeout=600.0, connect=5.0) _params["timeout"] = httpx.Timeout(timeout=600.0, connect=5.0)
async_handler: AsyncHTTPHandler = AsyncHTTPHandler(**_params) # type: ignore async_handler: AsyncHTTPHandler = get_async_httpx_client(
llm_provider=litellm.LlmProviders.VERTEX_AI,
params={"timeout": timeout},
)
else: else:
async_handler = client # type: ignore async_handler = client # type: ignore

View file

@ -5,7 +5,11 @@ import httpx
from openai.types.image import Image from openai.types.image import Image
import litellm import litellm
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
HTTPHandler,
get_async_httpx_client,
)
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import ( from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
VertexLLM, VertexLLM,
) )
@ -156,7 +160,10 @@ class VertexImageGeneration(VertexLLM):
else: else:
_params["timeout"] = httpx.Timeout(timeout=600.0, connect=5.0) _params["timeout"] = httpx.Timeout(timeout=600.0, connect=5.0)
self.async_handler = AsyncHTTPHandler(**_params) # type: ignore self.async_handler = get_async_httpx_client(
llm_provider=litellm.LlmProviders.VERTEX_AI,
params={"timeout": timeout},
)
else: else:
self.async_handler = client # type: ignore self.async_handler = client # type: ignore

View file

@ -5,7 +5,11 @@ import httpx
import litellm import litellm
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
HTTPHandler,
get_async_httpx_client,
)
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import ( from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
VertexAIError, VertexAIError,
VertexLLM, VertexLLM,
@ -172,7 +176,10 @@ class VertexMultimodalEmbedding(VertexLLM):
if isinstance(timeout, float) or isinstance(timeout, int): if isinstance(timeout, float) or isinstance(timeout, int):
timeout = httpx.Timeout(timeout) timeout = httpx.Timeout(timeout)
_params["timeout"] = timeout _params["timeout"] = timeout
client = AsyncHTTPHandler(**_params) # type: ignore client = get_async_httpx_client(
llm_provider=litellm.LlmProviders.VERTEX_AI,
params={"timeout": timeout},
)
else: else:
client = client # type: ignore client = client # type: ignore

View file

@ -14,6 +14,7 @@ from pydantic import BaseModel
import litellm import litellm
from litellm._logging import verbose_logger from litellm._logging import verbose_logger
from litellm.litellm_core_utils.core_helpers import map_finish_reason from litellm.litellm_core_utils.core_helpers import map_finish_reason
from litellm.llms.custom_httpx.http_handler import _DEFAULT_TTL_FOR_HTTPX_CLIENTS
from litellm.llms.prompt_templates.factory import ( from litellm.llms.prompt_templates.factory import (
convert_to_anthropic_image_obj, convert_to_anthropic_image_obj,
convert_to_gemini_tool_call_invoke, convert_to_gemini_tool_call_invoke,
@ -93,11 +94,15 @@ def _get_client_cache_key(
def _get_client_from_cache(client_cache_key: str): def _get_client_from_cache(client_cache_key: str):
return litellm.in_memory_llm_clients_cache.get(client_cache_key, None) return litellm.in_memory_llm_clients_cache.get_cache(client_cache_key)
def _set_client_in_cache(client_cache_key: str, vertex_llm_model: Any): def _set_client_in_cache(client_cache_key: str, vertex_llm_model: Any):
litellm.in_memory_llm_clients_cache[client_cache_key] = vertex_llm_model litellm.in_memory_llm_clients_cache.set_cache(
key=client_cache_key,
value=vertex_llm_model,
ttl=_DEFAULT_TTL_FOR_HTTPX_CLIENTS,
)
def completion( # noqa: PLR0915 def completion( # noqa: PLR0915

View file

@ -57,6 +57,7 @@ class WatsonXChatHandler(OpenAILikeChatHandler):
def completion( def completion(
self, self,
*,
model: str, model: str,
messages: list, messages: list,
api_base: str, api_base: str,
@ -75,9 +76,8 @@ class WatsonXChatHandler(OpenAILikeChatHandler):
timeout: Optional[Union[float, httpx.Timeout]] = None, timeout: Optional[Union[float, httpx.Timeout]] = None,
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
custom_endpoint: Optional[bool] = None, custom_endpoint: Optional[bool] = None,
streaming_decoder: Optional[ streaming_decoder: Optional[CustomStreamingDecoder] = None,
CustomStreamingDecoder fake_stream: bool = False,
] = None, # if openai-compatible api needs custom stream decoder - e.g. sagemaker
): ):
api_params = _get_api_params(optional_params, print_verbose=print_verbose) api_params = _get_api_params(optional_params, print_verbose=print_verbose)

View file

@ -24,7 +24,10 @@ import httpx # type: ignore
import requests # type: ignore import requests # type: ignore
import litellm import litellm
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
get_async_httpx_client,
)
from litellm.secret_managers.main import get_secret_str from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.watsonx import WatsonXAIEndpoint from litellm.types.llms.watsonx import WatsonXAIEndpoint
from litellm.utils import EmbeddingResponse, ModelResponse, Usage, map_finish_reason from litellm.utils import EmbeddingResponse, ModelResponse, Usage, map_finish_reason
@ -710,10 +713,13 @@ class RequestManager:
if stream: if stream:
request_params["stream"] = stream request_params["stream"] = stream
try: try:
self.async_handler = AsyncHTTPHandler( self.async_handler = get_async_httpx_client(
timeout=httpx.Timeout( llm_provider=litellm.LlmProviders.WATSONX,
timeout=request_params.pop("timeout", 600.0), connect=5.0 params={
), "timeout": httpx.Timeout(
timeout=request_params.pop("timeout", 600.0), connect=5.0
),
},
) )
if "json" in request_params: if "json" in request_params:
request_params["data"] = json.dumps(request_params.pop("json", {})) request_params["data"] = json.dumps(request_params.pop("json", {}))

View file

@ -1495,8 +1495,8 @@ def completion( # type: ignore # noqa: PLR0915
timeout=timeout, # type: ignore timeout=timeout, # type: ignore
custom_prompt_dict=custom_prompt_dict, custom_prompt_dict=custom_prompt_dict,
client=client, # pass AsyncOpenAI, OpenAI client client=client, # pass AsyncOpenAI, OpenAI client
organization=organization,
custom_llm_provider=custom_llm_provider, custom_llm_provider=custom_llm_provider,
encoding=encoding,
) )
elif ( elif (
model in litellm.open_ai_chat_completion_models model in litellm.open_ai_chat_completion_models
@ -3182,6 +3182,7 @@ async def aembedding(*args, **kwargs) -> EmbeddingResponse:
or custom_llm_provider == "azure_ai" or custom_llm_provider == "azure_ai"
or custom_llm_provider == "together_ai" or custom_llm_provider == "together_ai"
or custom_llm_provider == "openai_like" or custom_llm_provider == "openai_like"
or custom_llm_provider == "jina_ai"
): # currently implemented aiohttp calls for just azure and openai, soon all. ): # currently implemented aiohttp calls for just azure and openai, soon all.
# Await normally # Await normally
init_response = await loop.run_in_executor(None, func_with_context) init_response = await loop.run_in_executor(None, func_with_context)

View file

@ -1745,7 +1745,8 @@
"output_cost_per_token": 0.00000080, "output_cost_per_token": 0.00000080,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/llama3-8b-8192": { "groq/llama3-8b-8192": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1755,7 +1756,74 @@
"output_cost_per_token": 0.00000008, "output_cost_per_token": 0.00000008,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
},
"groq/llama-3.2-1b-preview": {
"max_tokens": 8192,
"max_input_tokens": 8192,
"max_output_tokens": 8192,
"input_cost_per_token": 0.00000004,
"output_cost_per_token": 0.00000004,
"litellm_provider": "groq",
"mode": "chat",
"supports_function_calling": true,
"supports_response_schema": true
},
"groq/llama-3.2-3b-preview": {
"max_tokens": 8192,
"max_input_tokens": 8192,
"max_output_tokens": 8192,
"input_cost_per_token": 0.00000006,
"output_cost_per_token": 0.00000006,
"litellm_provider": "groq",
"mode": "chat",
"supports_function_calling": true,
"supports_response_schema": true
},
"groq/llama-3.2-11b-text-preview": {
"max_tokens": 8192,
"max_input_tokens": 8192,
"max_output_tokens": 8192,
"input_cost_per_token": 0.00000018,
"output_cost_per_token": 0.00000018,
"litellm_provider": "groq",
"mode": "chat",
"supports_function_calling": true,
"supports_response_schema": true
},
"groq/llama-3.2-11b-vision-preview": {
"max_tokens": 8192,
"max_input_tokens": 8192,
"max_output_tokens": 8192,
"input_cost_per_token": 0.00000018,
"output_cost_per_token": 0.00000018,
"litellm_provider": "groq",
"mode": "chat",
"supports_function_calling": true,
"supports_response_schema": true
},
"groq/llama-3.2-90b-text-preview": {
"max_tokens": 8192,
"max_input_tokens": 8192,
"max_output_tokens": 8192,
"input_cost_per_token": 0.0000009,
"output_cost_per_token": 0.0000009,
"litellm_provider": "groq",
"mode": "chat",
"supports_function_calling": true,
"supports_response_schema": true
},
"groq/llama-3.2-90b-vision-preview": {
"max_tokens": 8192,
"max_input_tokens": 8192,
"max_output_tokens": 8192,
"input_cost_per_token": 0.0000009,
"output_cost_per_token": 0.0000009,
"litellm_provider": "groq",
"mode": "chat",
"supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/llama3-70b-8192": { "groq/llama3-70b-8192": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1765,7 +1833,8 @@
"output_cost_per_token": 0.00000079, "output_cost_per_token": 0.00000079,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/llama-3.1-8b-instant": { "groq/llama-3.1-8b-instant": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1775,7 +1844,8 @@
"output_cost_per_token": 0.00000008, "output_cost_per_token": 0.00000008,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/llama-3.1-70b-versatile": { "groq/llama-3.1-70b-versatile": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1785,7 +1855,8 @@
"output_cost_per_token": 0.00000079, "output_cost_per_token": 0.00000079,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/llama-3.1-405b-reasoning": { "groq/llama-3.1-405b-reasoning": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1795,7 +1866,8 @@
"output_cost_per_token": 0.00000079, "output_cost_per_token": 0.00000079,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/mixtral-8x7b-32768": { "groq/mixtral-8x7b-32768": {
"max_tokens": 32768, "max_tokens": 32768,
@ -1805,7 +1877,8 @@
"output_cost_per_token": 0.00000024, "output_cost_per_token": 0.00000024,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/gemma-7b-it": { "groq/gemma-7b-it": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1815,7 +1888,8 @@
"output_cost_per_token": 0.00000007, "output_cost_per_token": 0.00000007,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/gemma2-9b-it": { "groq/gemma2-9b-it": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1825,7 +1899,8 @@
"output_cost_per_token": 0.00000020, "output_cost_per_token": 0.00000020,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/llama3-groq-70b-8192-tool-use-preview": { "groq/llama3-groq-70b-8192-tool-use-preview": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1835,7 +1910,8 @@
"output_cost_per_token": 0.00000089, "output_cost_per_token": 0.00000089,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/llama3-groq-8b-8192-tool-use-preview": { "groq/llama3-groq-8b-8192-tool-use-preview": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1845,7 +1921,8 @@
"output_cost_per_token": 0.00000019, "output_cost_per_token": 0.00000019,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"cerebras/llama3.1-8b": { "cerebras/llama3.1-8b": {
"max_tokens": 128000, "max_tokens": 128000,

View file

@ -12,7 +12,6 @@ model_list:
vertex_ai_project: "adroit-crow-413218" vertex_ai_project: "adroit-crow-413218"
vertex_ai_location: "us-east5" vertex_ai_location: "us-east5"
router_settings: router_settings:
model_group_alias: model_group_alias:
"gpt-4-turbo": # Aliased model name "gpt-4-turbo": # Aliased model name

View file

@ -192,6 +192,10 @@ class RouteChecks:
return True return True
if "/langfuse/" in route: if "/langfuse/" in route:
return True return True
if "/anthropic/" in route:
return True
if "/azure/" in route:
return True
return False return False
@staticmethod @staticmethod

View file

@ -2,10 +2,8 @@
What is this? What is this?
Provider-specific Pass-Through Endpoints Provider-specific Pass-Through Endpoints
"""
""" Use litellm with Anthropic SDK, Vertex AI SDK, Cohere SDK, etc.
1. Create pass-through endpoints for any LITELLM_BASE_URL/gemini/<endpoint> map to https://generativelanguage.googleapis.com/<endpoint>
""" """
import ast import ast

View file

@ -203,6 +203,9 @@ from litellm.proxy.openai_files_endpoints.files_endpoints import (
router as openai_files_router, router as openai_files_router,
) )
from litellm.proxy.openai_files_endpoints.files_endpoints import set_files_config from litellm.proxy.openai_files_endpoints.files_endpoints import set_files_config
from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import (
router as llm_passthrough_router,
)
from litellm.proxy.pass_through_endpoints.pass_through_endpoints import ( from litellm.proxy.pass_through_endpoints.pass_through_endpoints import (
initialize_pass_through_endpoints, initialize_pass_through_endpoints,
) )
@ -233,9 +236,6 @@ from litellm.proxy.utils import (
reset_budget, reset_budget,
update_spend, update_spend,
) )
from litellm.proxy.vertex_ai_endpoints.google_ai_studio_endpoints import (
router as gemini_router,
)
from litellm.proxy.vertex_ai_endpoints.langfuse_endpoints import ( from litellm.proxy.vertex_ai_endpoints.langfuse_endpoints import (
router as langfuse_router, router as langfuse_router,
) )
@ -9128,7 +9128,7 @@ app.include_router(router)
app.include_router(rerank_router) app.include_router(rerank_router)
app.include_router(fine_tuning_router) app.include_router(fine_tuning_router)
app.include_router(vertex_router) app.include_router(vertex_router)
app.include_router(gemini_router) app.include_router(llm_passthrough_router)
app.include_router(langfuse_router) app.include_router(langfuse_router)
app.include_router(pass_through_router) app.include_router(pass_through_router)
app.include_router(health_router) app.include_router(health_router)

View file

@ -13,23 +13,14 @@ from typing_extensions import (
) )
class Field(TypedDict):
key: str
value: Dict[str, Any]
class FunctionCallArgs(TypedDict):
fields: Field
class FunctionResponse(TypedDict): class FunctionResponse(TypedDict):
name: str name: str
response: FunctionCallArgs response: Optional[dict]
class FunctionCall(TypedDict): class FunctionCall(TypedDict):
name: str name: str
args: FunctionCallArgs args: Optional[dict]
class FileDataType(TypedDict): class FileDataType(TypedDict):

View file

@ -1739,15 +1739,15 @@ def supports_response_schema(model: str, custom_llm_provider: Optional[str]) ->
Does not raise error. Defaults to 'False'. Outputs logging.error. Does not raise error. Defaults to 'False'. Outputs logging.error.
""" """
## GET LLM PROVIDER ##
model, custom_llm_provider, _, _ = get_llm_provider(
model=model, custom_llm_provider=custom_llm_provider
)
if custom_llm_provider == "predibase": # predibase supports this globally
return True
try: try:
## GET LLM PROVIDER ##
model, custom_llm_provider, _, _ = get_llm_provider(
model=model, custom_llm_provider=custom_llm_provider
)
if custom_llm_provider == "predibase": # predibase supports this globally
return True
## GET MODEL INFO ## GET MODEL INFO
model_info = litellm.get_model_info( model_info = litellm.get_model_info(
model=model, custom_llm_provider=custom_llm_provider model=model, custom_llm_provider=custom_llm_provider
@ -1755,12 +1755,17 @@ def supports_response_schema(model: str, custom_llm_provider: Optional[str]) ->
if model_info.get("supports_response_schema", False) is True: if model_info.get("supports_response_schema", False) is True:
return True return True
return False
except Exception: except Exception:
verbose_logger.error( ## check if provider supports response schema globally
f"Model not supports response_schema. You passed model={model}, custom_llm_provider={custom_llm_provider}." supported_params = get_supported_openai_params(
model=model,
custom_llm_provider=custom_llm_provider,
request_type="chat_completion",
) )
return False if supported_params is not None and "response_schema" in supported_params:
return True
return False
def supports_function_calling( def supports_function_calling(
@ -2710,6 +2715,7 @@ def get_optional_params( # noqa: PLR0915
non_default_params["response_format"] = type_to_response_format_param( non_default_params["response_format"] = type_to_response_format_param(
response_format=non_default_params["response_format"] response_format=non_default_params["response_format"]
) )
if "tools" in non_default_params and isinstance( if "tools" in non_default_params and isinstance(
non_default_params, list non_default_params, list
): # fixes https://github.com/BerriAI/litellm/issues/4933 ): # fixes https://github.com/BerriAI/litellm/issues/4933
@ -3259,24 +3265,14 @@ def get_optional_params( # noqa: PLR0915
) )
_check_valid_arg(supported_params=supported_params) _check_valid_arg(supported_params=supported_params)
if max_tokens is not None: optional_params = litellm.OllamaConfig().map_openai_params(
optional_params["num_predict"] = max_tokens non_default_params=non_default_params,
if stream: optional_params=optional_params,
optional_params["stream"] = stream )
if temperature is not None:
optional_params["temperature"] = temperature
if seed is not None:
optional_params["seed"] = seed
if top_p is not None:
optional_params["top_p"] = top_p
if frequency_penalty is not None:
optional_params["repeat_penalty"] = frequency_penalty
if stop is not None:
optional_params["stop"] = stop
if response_format is not None and response_format["type"] == "json_object":
optional_params["format"] = "json"
elif custom_llm_provider == "ollama_chat": elif custom_llm_provider == "ollama_chat":
supported_params = litellm.OllamaChatConfig().get_supported_openai_params() supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params) _check_valid_arg(supported_params=supported_params)
@ -3494,24 +3490,16 @@ def get_optional_params( # noqa: PLR0915
) )
_check_valid_arg(supported_params=supported_params) _check_valid_arg(supported_params=supported_params)
if temperature is not None: optional_params = litellm.GroqChatConfig().map_openai_params(
optional_params["temperature"] = temperature non_default_params=non_default_params,
if max_tokens is not None: optional_params=optional_params,
optional_params["max_tokens"] = max_tokens model=model,
if top_p is not None: drop_params=(
optional_params["top_p"] = top_p drop_params
if stream is not None: if drop_params is not None and isinstance(drop_params, bool)
optional_params["stream"] = stream else False
if stop is not None: ),
optional_params["stop"] = stop )
if tools is not None:
optional_params["tools"] = tools
if tool_choice is not None:
optional_params["tool_choice"] = tool_choice
if response_format is not None:
optional_params["response_format"] = response_format
if seed is not None:
optional_params["seed"] = seed
elif custom_llm_provider == "deepseek": elif custom_llm_provider == "deepseek":
supported_params = get_supported_openai_params( supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider model=model, custom_llm_provider=custom_llm_provider
@ -6178,5 +6166,7 @@ class ProviderConfigManager:
return litellm.OpenAIO1Config() return litellm.OpenAIO1Config()
elif litellm.LlmProviders.DEEPSEEK == provider: elif litellm.LlmProviders.DEEPSEEK == provider:
return litellm.DeepSeekChatConfig() return litellm.DeepSeekChatConfig()
elif litellm.LlmProviders.GROQ == provider:
return litellm.GroqChatConfig()
return OpenAIGPTConfig() return OpenAIGPTConfig()

View file

@ -1745,7 +1745,8 @@
"output_cost_per_token": 0.00000080, "output_cost_per_token": 0.00000080,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/llama3-8b-8192": { "groq/llama3-8b-8192": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1755,7 +1756,74 @@
"output_cost_per_token": 0.00000008, "output_cost_per_token": 0.00000008,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
},
"groq/llama-3.2-1b-preview": {
"max_tokens": 8192,
"max_input_tokens": 8192,
"max_output_tokens": 8192,
"input_cost_per_token": 0.00000004,
"output_cost_per_token": 0.00000004,
"litellm_provider": "groq",
"mode": "chat",
"supports_function_calling": true,
"supports_response_schema": true
},
"groq/llama-3.2-3b-preview": {
"max_tokens": 8192,
"max_input_tokens": 8192,
"max_output_tokens": 8192,
"input_cost_per_token": 0.00000006,
"output_cost_per_token": 0.00000006,
"litellm_provider": "groq",
"mode": "chat",
"supports_function_calling": true,
"supports_response_schema": true
},
"groq/llama-3.2-11b-text-preview": {
"max_tokens": 8192,
"max_input_tokens": 8192,
"max_output_tokens": 8192,
"input_cost_per_token": 0.00000018,
"output_cost_per_token": 0.00000018,
"litellm_provider": "groq",
"mode": "chat",
"supports_function_calling": true,
"supports_response_schema": true
},
"groq/llama-3.2-11b-vision-preview": {
"max_tokens": 8192,
"max_input_tokens": 8192,
"max_output_tokens": 8192,
"input_cost_per_token": 0.00000018,
"output_cost_per_token": 0.00000018,
"litellm_provider": "groq",
"mode": "chat",
"supports_function_calling": true,
"supports_response_schema": true
},
"groq/llama-3.2-90b-text-preview": {
"max_tokens": 8192,
"max_input_tokens": 8192,
"max_output_tokens": 8192,
"input_cost_per_token": 0.0000009,
"output_cost_per_token": 0.0000009,
"litellm_provider": "groq",
"mode": "chat",
"supports_function_calling": true,
"supports_response_schema": true
},
"groq/llama-3.2-90b-vision-preview": {
"max_tokens": 8192,
"max_input_tokens": 8192,
"max_output_tokens": 8192,
"input_cost_per_token": 0.0000009,
"output_cost_per_token": 0.0000009,
"litellm_provider": "groq",
"mode": "chat",
"supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/llama3-70b-8192": { "groq/llama3-70b-8192": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1765,7 +1833,8 @@
"output_cost_per_token": 0.00000079, "output_cost_per_token": 0.00000079,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/llama-3.1-8b-instant": { "groq/llama-3.1-8b-instant": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1775,7 +1844,8 @@
"output_cost_per_token": 0.00000008, "output_cost_per_token": 0.00000008,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/llama-3.1-70b-versatile": { "groq/llama-3.1-70b-versatile": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1785,7 +1855,8 @@
"output_cost_per_token": 0.00000079, "output_cost_per_token": 0.00000079,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/llama-3.1-405b-reasoning": { "groq/llama-3.1-405b-reasoning": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1795,7 +1866,8 @@
"output_cost_per_token": 0.00000079, "output_cost_per_token": 0.00000079,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/mixtral-8x7b-32768": { "groq/mixtral-8x7b-32768": {
"max_tokens": 32768, "max_tokens": 32768,
@ -1805,7 +1877,8 @@
"output_cost_per_token": 0.00000024, "output_cost_per_token": 0.00000024,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/gemma-7b-it": { "groq/gemma-7b-it": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1815,7 +1888,8 @@
"output_cost_per_token": 0.00000007, "output_cost_per_token": 0.00000007,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/gemma2-9b-it": { "groq/gemma2-9b-it": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1825,7 +1899,8 @@
"output_cost_per_token": 0.00000020, "output_cost_per_token": 0.00000020,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/llama3-groq-70b-8192-tool-use-preview": { "groq/llama3-groq-70b-8192-tool-use-preview": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1835,7 +1910,8 @@
"output_cost_per_token": 0.00000089, "output_cost_per_token": 0.00000089,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"groq/llama3-groq-8b-8192-tool-use-preview": { "groq/llama3-groq-8b-8192-tool-use-preview": {
"max_tokens": 8192, "max_tokens": 8192,
@ -1845,7 +1921,8 @@
"output_cost_per_token": 0.00000019, "output_cost_per_token": 0.00000019,
"litellm_provider": "groq", "litellm_provider": "groq",
"mode": "chat", "mode": "chat",
"supports_function_calling": true "supports_function_calling": true,
"supports_response_schema": true
}, },
"cerebras/llama3.1-8b": { "cerebras/llama3.1-8b": {
"max_tokens": 128000, "max_tokens": 128000,

View file

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "litellm" name = "litellm"
version = "1.52.12" version = "1.52.13"
description = "Library to easily interface with LLM API providers" description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"] authors = ["BerriAI"]
license = "MIT" license = "MIT"
@ -91,7 +91,7 @@ requires = ["poetry-core", "wheel"]
build-backend = "poetry.core.masonry.api" build-backend = "poetry.core.masonry.api"
[tool.commitizen] [tool.commitizen]
version = "1.52.12" version = "1.52.13"
version_files = [ version_files = [
"pyproject.toml:^version" "pyproject.toml:^version"
] ]

View file

@ -0,0 +1,88 @@
import ast
import os
ALLOWED_FILES = [
# local files
"../../litellm/__init__.py",
"../../litellm/llms/custom_httpx/http_handler.py",
# when running on ci/cd
"./litellm/__init__.py",
"./litellm/llms/custom_httpx/http_handler.py",
]
warning_msg = "this is a serious violation that can impact latency. Creating Async clients per request can add +500ms per request"
def check_for_async_http_handler(file_path):
"""
Checks if AsyncHttpHandler is instantiated in the given file.
Returns a list of line numbers where AsyncHttpHandler is used.
"""
print("..checking file=", file_path)
if file_path in ALLOWED_FILES:
return []
with open(file_path, "r") as file:
try:
tree = ast.parse(file.read())
except SyntaxError:
print(f"Warning: Syntax error in file {file_path}")
return []
violations = []
target_names = [
"AsyncHttpHandler",
"AsyncHTTPHandler",
"AsyncClient",
"httpx.AsyncClient",
] # Add variations here
for node in ast.walk(tree):
if isinstance(node, ast.Call):
if isinstance(node.func, ast.Name) and node.func.id.lower() in [
name.lower() for name in target_names
]:
raise ValueError(
f"found violation in file {file_path} line: {node.lineno}. Please use `get_async_httpx_client` instead. {warning_msg}"
)
return violations
def scan_directory_for_async_handler(base_dir):
"""
Scans all Python files in the directory tree for AsyncHttpHandler usage.
Returns a dict of files and line numbers where violations were found.
"""
violations = {}
for root, _, files in os.walk(base_dir):
for file in files:
if file.endswith(".py"):
file_path = os.path.join(root, file)
file_violations = check_for_async_http_handler(file_path)
if file_violations:
violations[file_path] = file_violations
return violations
def test_no_async_http_handler_usage():
"""
Test to ensure AsyncHttpHandler is not used anywhere in the codebase.
"""
base_dir = "./litellm" # Adjust this path as needed
# base_dir = "../../litellm" # LOCAL TESTING
violations = scan_directory_for_async_handler(base_dir)
if violations:
violation_messages = []
for file_path, line_numbers in violations.items():
violation_messages.append(
f"Found AsyncHttpHandler in {file_path} at lines: {line_numbers}"
)
raise AssertionError(
"AsyncHttpHandler usage detected:\n" + "\n".join(violation_messages)
)
if __name__ == "__main__":
test_no_async_http_handler_usage()

View file

@ -8,6 +8,7 @@ import traceback
from dotenv import load_dotenv from dotenv import load_dotenv
from openai.types.image import Image from openai.types.image import Image
from litellm.caching import InMemoryCache
logging.basicConfig(level=logging.DEBUG) logging.basicConfig(level=logging.DEBUG)
load_dotenv() load_dotenv()
@ -107,7 +108,7 @@ class TestVertexImageGeneration(BaseImageGenTest):
# comment this when running locally # comment this when running locally
load_vertex_ai_credentials() load_vertex_ai_credentials()
litellm.in_memory_llm_clients_cache = {} litellm.in_memory_llm_clients_cache = InMemoryCache()
return { return {
"model": "vertex_ai/imagegeneration@006", "model": "vertex_ai/imagegeneration@006",
"vertex_ai_project": "adroit-crow-413218", "vertex_ai_project": "adroit-crow-413218",
@ -118,13 +119,13 @@ class TestVertexImageGeneration(BaseImageGenTest):
class TestBedrockSd3(BaseImageGenTest): class TestBedrockSd3(BaseImageGenTest):
def get_base_image_generation_call_args(self) -> dict: def get_base_image_generation_call_args(self) -> dict:
litellm.in_memory_llm_clients_cache = {} litellm.in_memory_llm_clients_cache = InMemoryCache()
return {"model": "bedrock/stability.sd3-large-v1:0"} return {"model": "bedrock/stability.sd3-large-v1:0"}
class TestBedrockSd1(BaseImageGenTest): class TestBedrockSd1(BaseImageGenTest):
def get_base_image_generation_call_args(self) -> dict: def get_base_image_generation_call_args(self) -> dict:
litellm.in_memory_llm_clients_cache = {} litellm.in_memory_llm_clients_cache = InMemoryCache()
return {"model": "bedrock/stability.sd3-large-v1:0"} return {"model": "bedrock/stability.sd3-large-v1:0"}
@ -181,7 +182,7 @@ def test_image_generation_azure_dall_e_3():
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_aimage_generation_bedrock_with_optional_params(): async def test_aimage_generation_bedrock_with_optional_params():
try: try:
litellm.in_memory_llm_clients_cache = {} litellm.in_memory_llm_clients_cache = InMemoryCache()
response = await litellm.aimage_generation( response = await litellm.aimage_generation(
prompt="A cute baby sea otter", prompt="A cute baby sea otter",
model="bedrock/stability.stable-diffusion-xl-v1", model="bedrock/stability.stable-diffusion-xl-v1",

View file

@ -49,7 +49,7 @@ class BaseLLMChatTest(ABC):
) )
assert response is not None assert response is not None
except litellm.InternalServerError: except litellm.InternalServerError:
pass pytest.skip("Model is overloaded")
# for OpenAI the content contains the JSON schema, so we need to assert that the content is not None # for OpenAI the content contains the JSON schema, so we need to assert that the content is not None
assert response.choices[0].message.content is not None assert response.choices[0].message.content is not None
@ -92,7 +92,9 @@ class BaseLLMChatTest(ABC):
# relevant issue: https://github.com/BerriAI/litellm/issues/6741 # relevant issue: https://github.com/BerriAI/litellm/issues/6741
assert response.choices[0].message.content is not None assert response.choices[0].message.content is not None
@pytest.mark.flaky(retries=6, delay=1)
def test_json_response_pydantic_obj(self): def test_json_response_pydantic_obj(self):
litellm.set_verbose = True
from pydantic import BaseModel from pydantic import BaseModel
from litellm.utils import supports_response_schema from litellm.utils import supports_response_schema
@ -119,6 +121,11 @@ class BaseLLMChatTest(ABC):
response_format=TestModel, response_format=TestModel,
) )
assert res is not None assert res is not None
print(res.choices[0].message)
assert res.choices[0].message.content is not None
assert res.choices[0].message.tool_calls is None
except litellm.InternalServerError: except litellm.InternalServerError:
pytest.skip("Model is overloaded") pytest.skip("Model is overloaded")
@ -140,12 +147,15 @@ class BaseLLMChatTest(ABC):
}, },
] ]
response = litellm.completion( try:
**base_completion_call_args, response = litellm.completion(
messages=messages, **base_completion_call_args,
response_format={"type": "json_object"}, messages=messages,
stream=True, response_format={"type": "json_object"},
) stream=True,
)
except litellm.InternalServerError:
pytest.skip("Model is overloaded")
print(response) print(response)
@ -161,6 +171,25 @@ class BaseLLMChatTest(ABC):
assert content is not None assert content is not None
assert len(content) > 0 assert len(content) > 0
@pytest.fixture
def tool_call_no_arguments(self):
return {
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_2c384bc6-de46-4f29-8adc-60dd5805d305",
"function": {"name": "Get-FAQ", "arguments": "{}"},
"type": "function",
}
],
}
@abstractmethod
def test_tool_call_no_arguments(self, tool_call_no_arguments):
"""Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833"""
pass
@pytest.fixture @pytest.fixture
def pdf_messages(self): def pdf_messages(self):
import base64 import base64

View file

@ -697,6 +697,15 @@ class TestAnthropicCompletion(BaseLLMChatTest):
assert _document_validation["source"]["media_type"] == "application/pdf" assert _document_validation["source"]["media_type"] == "application/pdf"
assert _document_validation["source"]["type"] == "base64" assert _document_validation["source"]["type"] == "base64"
def test_tool_call_no_arguments(self, tool_call_no_arguments):
"""Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833"""
from litellm.llms.prompt_templates.factory import (
convert_to_anthropic_tool_invoke,
)
result = convert_to_anthropic_tool_invoke([tool_call_no_arguments])
print(result)
def test_convert_tool_response_to_message_with_values(): def test_convert_tool_response_to_message_with_values():
"""Test converting a tool response with 'values' key to a message""" """Test converting a tool response with 'values' key to a message"""

View file

@ -7,3 +7,7 @@ class TestDeepSeekChatCompletion(BaseLLMChatTest):
return { return {
"model": "deepseek/deepseek-chat", "model": "deepseek/deepseek-chat",
} }
def test_tool_call_no_arguments(self, tool_call_no_arguments):
"""Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833"""
pass

View file

@ -0,0 +1,12 @@
from base_llm_unit_tests import BaseLLMChatTest
class TestGroq(BaseLLMChatTest):
def get_base_completion_call_args(self) -> dict:
return {
"model": "groq/llama-3.1-70b-versatile",
}
def test_tool_call_no_arguments(self, tool_call_no_arguments):
"""Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833"""
pass

View file

@ -32,3 +32,7 @@ class TestMistralCompletion(BaseLLMChatTest):
def get_base_completion_call_args(self) -> dict: def get_base_completion_call_args(self) -> dict:
litellm.set_verbose = True litellm.set_verbose = True
return {"model": "mistral/mistral-small-latest"} return {"model": "mistral/mistral-small-latest"}
def test_tool_call_no_arguments(self, tool_call_no_arguments):
"""Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833"""
pass

View file

@ -952,3 +952,17 @@ def test_lm_studio_embedding_params():
drop_params=True, drop_params=True,
) )
assert len(optional_params) == 0 assert len(optional_params) == 0
def test_ollama_pydantic_obj():
from pydantic import BaseModel
class ResponseFormat(BaseModel):
x: str
y: str
get_optional_params(
model="qwen2:0.5b",
custom_llm_provider="ollama",
response_format=ResponseFormat,
)

View file

@ -306,6 +306,8 @@ def test_multiple_function_call():
) )
assert len(r.choices) > 0 assert len(r.choices) > 0
print(mock_post.call_args.kwargs["json"])
assert mock_post.call_args.kwargs["json"] == { assert mock_post.call_args.kwargs["json"] == {
"contents": [ "contents": [
{"role": "user", "parts": [{"text": "do test"}]}, {"role": "user", "parts": [{"text": "do test"}]},
@ -313,28 +315,8 @@ def test_multiple_function_call():
"role": "model", "role": "model",
"parts": [ "parts": [
{"text": "test"}, {"text": "test"},
{ {"function_call": {"name": "test", "args": {"arg": "test"}}},
"function_call": { {"function_call": {"name": "test2", "args": {"arg": "test2"}}},
"name": "test",
"args": {
"fields": {
"key": "arg",
"value": {"string_value": "test"},
}
},
}
},
{
"function_call": {
"name": "test2",
"args": {
"fields": {
"key": "arg",
"value": {"string_value": "test2"},
}
},
}
},
], ],
}, },
{ {
@ -342,23 +324,13 @@ def test_multiple_function_call():
{ {
"function_response": { "function_response": {
"name": "test", "name": "test",
"response": { "response": {"content": "42"},
"fields": {
"key": "content",
"value": {"string_value": "42"},
}
},
} }
}, },
{ {
"function_response": { "function_response": {
"name": "test2", "name": "test2",
"response": { "response": {"content": "15"},
"fields": {
"key": "content",
"value": {"string_value": "15"},
}
},
} }
}, },
] ]
@ -441,34 +413,16 @@ def test_multiple_function_call_changed_text_pos():
assert len(resp.choices) > 0 assert len(resp.choices) > 0
mock_post.assert_called_once() mock_post.assert_called_once()
print(mock_post.call_args.kwargs["json"]["contents"])
assert mock_post.call_args.kwargs["json"]["contents"] == [ assert mock_post.call_args.kwargs["json"]["contents"] == [
{"role": "user", "parts": [{"text": "do test"}]}, {"role": "user", "parts": [{"text": "do test"}]},
{ {
"role": "model", "role": "model",
"parts": [ "parts": [
{"text": "test"}, {"text": "test"},
{ {"function_call": {"name": "test", "args": {"arg": "test"}}},
"function_call": { {"function_call": {"name": "test2", "args": {"arg": "test2"}}},
"name": "test",
"args": {
"fields": {
"key": "arg",
"value": {"string_value": "test"},
}
},
}
},
{
"function_call": {
"name": "test2",
"args": {
"fields": {
"key": "arg",
"value": {"string_value": "test2"},
}
},
}
},
], ],
}, },
{ {
@ -476,23 +430,13 @@ def test_multiple_function_call_changed_text_pos():
{ {
"function_response": { "function_response": {
"name": "test2", "name": "test2",
"response": { "response": {"content": "15"},
"fields": {
"key": "content",
"value": {"string_value": "15"},
}
},
} }
}, },
{ {
"function_response": { "function_response": {
"name": "test", "name": "test",
"response": { "response": {"content": "42"},
"fields": {
"key": "content",
"value": {"string_value": "42"},
}
},
} }
}, },
] ]
@ -1354,3 +1298,20 @@ def test_vertex_embedding_url(model, expected_url):
assert url == expected_url assert url == expected_url
assert endpoint == "predict" assert endpoint == "predict"
from base_llm_unit_tests import BaseLLMChatTest
class TestVertexGemini(BaseLLMChatTest):
def get_base_completion_call_args(self) -> dict:
return {"model": "gemini/gemini-1.5-flash"}
def test_tool_call_no_arguments(self, tool_call_no_arguments):
"""Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833"""
from litellm.llms.prompt_templates.factory import (
convert_to_gemini_tool_call_invoke,
)
result = convert_to_gemini_tool_call_invoke(tool_call_no_arguments)
print(result)

View file

@ -12,6 +12,7 @@ sys.path.insert(0, os.path.abspath("../.."))
import litellm import litellm
from litellm import completion from litellm import completion
from litellm.caching import InMemoryCache
litellm.num_retries = 3 litellm.num_retries = 3
litellm.success_callback = ["langfuse"] litellm.success_callback = ["langfuse"]
@ -29,15 +30,20 @@ def langfuse_client():
f"{os.environ['LANGFUSE_PUBLIC_KEY']}-{os.environ['LANGFUSE_SECRET_KEY']}" f"{os.environ['LANGFUSE_PUBLIC_KEY']}-{os.environ['LANGFUSE_SECRET_KEY']}"
) )
# use a in memory langfuse client for testing, RAM util on ci/cd gets too high when we init many langfuse clients # use a in memory langfuse client for testing, RAM util on ci/cd gets too high when we init many langfuse clients
if _langfuse_cache_key in litellm.in_memory_llm_clients_cache:
langfuse_client = litellm.in_memory_llm_clients_cache[_langfuse_cache_key] _cached_client = litellm.in_memory_llm_clients_cache.get_cache(_langfuse_cache_key)
if _cached_client:
langfuse_client = _cached_client
else: else:
langfuse_client = langfuse.Langfuse( langfuse_client = langfuse.Langfuse(
public_key=os.environ["LANGFUSE_PUBLIC_KEY"], public_key=os.environ["LANGFUSE_PUBLIC_KEY"],
secret_key=os.environ["LANGFUSE_SECRET_KEY"], secret_key=os.environ["LANGFUSE_SECRET_KEY"],
host=None, host=None,
) )
litellm.in_memory_llm_clients_cache[_langfuse_cache_key] = langfuse_client litellm.in_memory_llm_clients_cache.set_cache(
key=_langfuse_cache_key,
value=langfuse_client,
)
print("NEW LANGFUSE CLIENT") print("NEW LANGFUSE CLIENT")

View file

@ -2867,6 +2867,7 @@ def test_gemini_function_call_parameter_in_messages():
print(e) print(e)
# mock_client.assert_any_call() # mock_client.assert_any_call()
assert { assert {
"contents": [ "contents": [
{ {
@ -2879,12 +2880,7 @@ def test_gemini_function_call_parameter_in_messages():
{ {
"function_call": { "function_call": {
"name": "search", "name": "search",
"args": { "args": {"queries": ["weather in boston"]},
"fields": {
"key": "queries",
"value": {"list_value": ["weather in boston"]},
}
},
} }
} }
], ],
@ -2895,12 +2891,7 @@ def test_gemini_function_call_parameter_in_messages():
"function_response": { "function_response": {
"name": "search", "name": "search",
"response": { "response": {
"fields": { "content": "The current weather in Boston is 22°F."
"key": "content",
"value": {
"string_value": "The current weather in Boston is 22°F."
},
}
}, },
} }
} }
@ -2935,6 +2926,7 @@ def test_gemini_function_call_parameter_in_messages():
def test_gemini_function_call_parameter_in_messages_2(): def test_gemini_function_call_parameter_in_messages_2():
litellm.set_verbose = True
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.transformation import ( from litellm.llms.vertex_ai_and_google_ai_studio.gemini.transformation import (
_gemini_convert_messages_with_history, _gemini_convert_messages_with_history,
) )
@ -2958,6 +2950,7 @@ def test_gemini_function_call_parameter_in_messages_2():
returned_contents = _gemini_convert_messages_with_history(messages=messages) returned_contents = _gemini_convert_messages_with_history(messages=messages)
print(f"returned_contents: {returned_contents}")
assert returned_contents == [ assert returned_contents == [
{ {
"role": "user", "role": "user",
@ -2970,12 +2963,7 @@ def test_gemini_function_call_parameter_in_messages_2():
{ {
"function_call": { "function_call": {
"name": "search", "name": "search",
"args": { "args": {"queries": ["weather in boston"]},
"fields": {
"key": "queries",
"value": {"list_value": ["weather in boston"]},
}
},
} }
}, },
], ],
@ -2986,12 +2974,7 @@ def test_gemini_function_call_parameter_in_messages_2():
"function_response": { "function_response": {
"name": "search", "name": "search",
"response": { "response": {
"fields": { "content": "The weather in Boston is 100 degrees."
"key": "content",
"value": {
"string_value": "The weather in Boston is 100 degrees."
},
}
}, },
} }
} }

View file

@ -67,7 +67,8 @@ def test_ollama_json_mode():
assert converted_params == { assert converted_params == {
"temperature": 0.5, "temperature": 0.5,
"format": "json", "format": "json",
}, f"{converted_params} != {'temperature': 0.5, 'format': 'json'}" "stream": False,
}, f"{converted_params} != {'temperature': 0.5, 'format': 'json', 'stream': False}"
except Exception as e: except Exception as e:
pytest.fail(f"Error occurred: {e}") pytest.fail(f"Error occurred: {e}")

View file

@ -1450,7 +1450,7 @@ async def test_mistral_on_router():
{ {
"model_name": "gpt-3.5-turbo", "model_name": "gpt-3.5-turbo",
"litellm_params": { "litellm_params": {
"model": "mistral/mistral-medium", "model": "mistral/mistral-small-latest",
}, },
}, },
] ]

View file

@ -64,6 +64,7 @@ async def test_batch_completion_multiple_models(mode):
models_in_responses = [] models_in_responses = []
print(f"response: {response}") print(f"response: {response}")
for individual_response in response: for individual_response in response:
print(f"individual_response: {individual_response}")
_model = individual_response["model"] _model = individual_response["model"]
models_in_responses.append(_model) models_in_responses.append(_model)

View file

@ -683,7 +683,7 @@ def test_completion_ollama_hosted_stream():
[ [
# "claude-3-5-haiku-20241022", # "claude-3-5-haiku-20241022",
# "claude-2", # "claude-2",
# "mistral/mistral-medium", # "mistral/mistral-small-latest",
"openrouter/openai/gpt-4o-mini", "openrouter/openai/gpt-4o-mini",
], ],
) )

View file

@ -749,6 +749,7 @@ def test_convert_model_response_object():
("gemini/gemini-1.5-pro", True), ("gemini/gemini-1.5-pro", True),
("predibase/llama3-8b-instruct", True), ("predibase/llama3-8b-instruct", True),
("gpt-3.5-turbo", False), ("gpt-3.5-turbo", False),
("groq/llama3-70b-8192", True),
], ],
) )
def test_supports_response_schema(model, expected_bool): def test_supports_response_schema(model, expected_bool):

View file

@ -27,6 +27,9 @@ from fastapi import HTTPException, Request
import pytest import pytest
from litellm.proxy.auth.route_checks import RouteChecks from litellm.proxy.auth.route_checks import RouteChecks
from litellm.proxy._types import LiteLLM_UserTable, LitellmUserRoles, UserAPIKeyAuth from litellm.proxy._types import LiteLLM_UserTable, LitellmUserRoles, UserAPIKeyAuth
from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import (
router as llm_passthrough_router,
)
# Replace the actual hash_token function with our mock # Replace the actual hash_token function with our mock
import litellm.proxy.auth.route_checks import litellm.proxy.auth.route_checks
@ -56,12 +59,21 @@ def test_is_llm_api_route():
assert RouteChecks.is_llm_api_route("/vertex-ai/text") is True assert RouteChecks.is_llm_api_route("/vertex-ai/text") is True
assert RouteChecks.is_llm_api_route("/gemini/generate") is True assert RouteChecks.is_llm_api_route("/gemini/generate") is True
assert RouteChecks.is_llm_api_route("/cohere/generate") is True assert RouteChecks.is_llm_api_route("/cohere/generate") is True
assert RouteChecks.is_llm_api_route("/anthropic/messages") is True
assert RouteChecks.is_llm_api_route("/anthropic/v1/messages") is True
assert RouteChecks.is_llm_api_route("/azure/endpoint") is True
# check non-matching routes # check non-matching routes
assert RouteChecks.is_llm_api_route("/some/random/route") is False assert RouteChecks.is_llm_api_route("/some/random/route") is False
assert RouteChecks.is_llm_api_route("/key/regenerate/82akk800000000jjsk") is False assert RouteChecks.is_llm_api_route("/key/regenerate/82akk800000000jjsk") is False
assert RouteChecks.is_llm_api_route("/key/82akk800000000jjsk/delete") is False assert RouteChecks.is_llm_api_route("/key/82akk800000000jjsk/delete") is False
# check all routes in llm_passthrough_router, ensure they are considered llm api routes
for route in llm_passthrough_router.routes:
route_path = str(route.path)
print("route_path", route_path)
assert RouteChecks.is_llm_api_route(route_path) is True
# Test _route_matches_pattern # Test _route_matches_pattern
def test_route_matches_pattern(): def test_route_matches_pattern():

View file

@ -1794,7 +1794,7 @@ async def test_add_callback_via_key_litellm_pre_call_utils_langsmith(
async def test_gemini_pass_through_endpoint(): async def test_gemini_pass_through_endpoint():
from starlette.datastructures import URL from starlette.datastructures import URL
from litellm.proxy.vertex_ai_endpoints.google_ai_studio_endpoints import ( from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import (
Request, Request,
Response, Response,
gemini_proxy_route, gemini_proxy_route,