mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
Litellm dev 11 21 2024 (#6837)
* Fix Vertex AI function calling invoke: use JSON format instead of protobuf text format. (#6702) * test: test tool_call conversion when arguments is empty dict Fixes https://github.com/BerriAI/litellm/issues/6833 * fix(openai_like/handler.py): return more descriptive error message Fixes https://github.com/BerriAI/litellm/issues/6812 * test: skip overloaded model * docs(anthropic.md): update anthropic docs to show how to route to any new model * feat(groq/): fake stream when 'response_format' param is passed Groq doesn't support streaming when response_format is set * feat(groq/): add response_format support for groq Closes https://github.com/BerriAI/litellm/issues/6845 * fix(o1_handler.py): remove fake streaming for o1 Closes https://github.com/BerriAI/litellm/issues/6801 * build(model_prices_and_context_window.json): add groq llama3.2b model pricing Closes https://github.com/BerriAI/litellm/issues/6807 * fix(utils.py): fix handling ollama response format param Fixes https://github.com/BerriAI/litellm/issues/6848#issuecomment-2491215485 * docs(sidebars.js): refactor chat endpoint placement * fix: fix linting errors * test: fix test * test: fix test * fix(openai_like/handler): handle max retries * fix(streaming_handler.py): fix streaming check for openai-compatible providers * test: update test * test: correctly handle model is overloaded error * test: update test * test: fix test * test: mark flaky test --------- Co-authored-by: Guowang Li <Guowang@users.noreply.github.com>
This commit is contained in:
parent
9ef254ff35
commit
4eca6ede4e
31 changed files with 747 additions and 403 deletions
|
@ -17,7 +17,9 @@ import httpx # type: ignore
|
|||
import requests # type: ignore
|
||||
|
||||
import litellm
|
||||
from litellm import LlmProviders
|
||||
from litellm.litellm_core_utils.core_helpers import map_finish_reason
|
||||
from litellm.llms.bedrock.chat.invoke_handler import MockResponseIterator
|
||||
from litellm.llms.custom_httpx.http_handler import (
|
||||
AsyncHTTPHandler,
|
||||
HTTPHandler,
|
||||
|
@ -25,9 +27,19 @@ from litellm.llms.custom_httpx.http_handler import (
|
|||
)
|
||||
from litellm.llms.databricks.streaming_utils import ModelResponseIterator
|
||||
from litellm.types.utils import CustomStreamingDecoder, ModelResponse
|
||||
from litellm.utils import CustomStreamWrapper, EmbeddingResponse
|
||||
from litellm.utils import (
|
||||
Choices,
|
||||
CustomStreamWrapper,
|
||||
EmbeddingResponse,
|
||||
Message,
|
||||
ProviderConfigManager,
|
||||
TextCompletionResponse,
|
||||
Usage,
|
||||
convert_to_model_response_object,
|
||||
)
|
||||
|
||||
from ..common_utils import OpenAILikeBase, OpenAILikeError
|
||||
from .transformation import OpenAILikeChatConfig
|
||||
|
||||
|
||||
async def make_call(
|
||||
|
@ -39,16 +51,22 @@ async def make_call(
|
|||
messages: list,
|
||||
logging_obj,
|
||||
streaming_decoder: Optional[CustomStreamingDecoder] = None,
|
||||
fake_stream: bool = False,
|
||||
):
|
||||
if client is None:
|
||||
client = litellm.module_level_aclient
|
||||
|
||||
response = await client.post(api_base, headers=headers, data=data, stream=True)
|
||||
response = await client.post(
|
||||
api_base, headers=headers, data=data, stream=not fake_stream
|
||||
)
|
||||
|
||||
if streaming_decoder is not None:
|
||||
completion_stream: Any = streaming_decoder.aiter_bytes(
|
||||
response.aiter_bytes(chunk_size=1024)
|
||||
)
|
||||
elif fake_stream:
|
||||
model_response = ModelResponse(**response.json())
|
||||
completion_stream = MockResponseIterator(model_response=model_response)
|
||||
else:
|
||||
completion_stream = ModelResponseIterator(
|
||||
streaming_response=response.aiter_lines(), sync_stream=False
|
||||
|
@ -73,11 +91,12 @@ def make_sync_call(
|
|||
messages: list,
|
||||
logging_obj,
|
||||
streaming_decoder: Optional[CustomStreamingDecoder] = None,
|
||||
fake_stream: bool = False,
|
||||
):
|
||||
if client is None:
|
||||
client = litellm.module_level_client # Create a new client if none provided
|
||||
|
||||
response = client.post(api_base, headers=headers, data=data, stream=True)
|
||||
response = client.post(api_base, headers=headers, data=data, stream=not fake_stream)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise OpenAILikeError(status_code=response.status_code, message=response.read())
|
||||
|
@ -86,6 +105,9 @@ def make_sync_call(
|
|||
completion_stream = streaming_decoder.iter_bytes(
|
||||
response.iter_bytes(chunk_size=1024)
|
||||
)
|
||||
elif fake_stream:
|
||||
model_response = ModelResponse(**response.json())
|
||||
completion_stream = MockResponseIterator(model_response=model_response)
|
||||
else:
|
||||
completion_stream = ModelResponseIterator(
|
||||
streaming_response=response.iter_lines(), sync_stream=True
|
||||
|
@ -126,8 +148,8 @@ class OpenAILikeChatHandler(OpenAILikeBase):
|
|||
headers={},
|
||||
client: Optional[AsyncHTTPHandler] = None,
|
||||
streaming_decoder: Optional[CustomStreamingDecoder] = None,
|
||||
fake_stream: bool = False,
|
||||
) -> CustomStreamWrapper:
|
||||
|
||||
data["stream"] = True
|
||||
completion_stream = await make_call(
|
||||
client=client,
|
||||
|
@ -169,6 +191,7 @@ class OpenAILikeChatHandler(OpenAILikeBase):
|
|||
logger_fn=None,
|
||||
headers={},
|
||||
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
||||
json_mode: bool = False,
|
||||
) -> ModelResponse:
|
||||
if timeout is None:
|
||||
timeout = httpx.Timeout(timeout=600.0, connect=5.0)
|
||||
|
@ -181,8 +204,6 @@ class OpenAILikeChatHandler(OpenAILikeBase):
|
|||
api_base, headers=headers, data=json.dumps(data), timeout=timeout
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
response_json = response.json()
|
||||
except httpx.HTTPStatusError as e:
|
||||
raise OpenAILikeError(
|
||||
status_code=e.response.status_code,
|
||||
|
@ -193,22 +214,26 @@ class OpenAILikeChatHandler(OpenAILikeBase):
|
|||
except Exception as e:
|
||||
raise OpenAILikeError(status_code=500, message=str(e))
|
||||
|
||||
logging_obj.post_call(
|
||||
input=messages,
|
||||
api_key="",
|
||||
original_response=response_json,
|
||||
additional_args={"complete_input_dict": data},
|
||||
return OpenAILikeChatConfig._transform_response(
|
||||
model=model,
|
||||
response=response,
|
||||
model_response=model_response,
|
||||
stream=stream,
|
||||
logging_obj=logging_obj,
|
||||
optional_params=optional_params,
|
||||
api_key=api_key,
|
||||
data=data,
|
||||
messages=messages,
|
||||
print_verbose=print_verbose,
|
||||
encoding=encoding,
|
||||
json_mode=json_mode,
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
base_model=base_model,
|
||||
)
|
||||
response = ModelResponse(**response_json)
|
||||
|
||||
response.model = custom_llm_provider + "/" + (response.model or "")
|
||||
|
||||
if base_model is not None:
|
||||
response._hidden_params["model"] = base_model
|
||||
return response
|
||||
|
||||
def completion(
|
||||
self,
|
||||
*,
|
||||
model: str,
|
||||
messages: list,
|
||||
api_base: str,
|
||||
|
@ -230,6 +255,7 @@ class OpenAILikeChatHandler(OpenAILikeBase):
|
|||
streaming_decoder: Optional[
|
||||
CustomStreamingDecoder
|
||||
] = None, # if openai-compatible api needs custom stream decoder - e.g. sagemaker
|
||||
fake_stream: bool = False,
|
||||
):
|
||||
custom_endpoint = custom_endpoint or optional_params.pop(
|
||||
"custom_endpoint", None
|
||||
|
@ -243,13 +269,24 @@ class OpenAILikeChatHandler(OpenAILikeBase):
|
|||
headers=headers,
|
||||
)
|
||||
|
||||
stream: bool = optional_params.get("stream", None) or False
|
||||
optional_params["stream"] = stream
|
||||
stream: bool = optional_params.pop("stream", None) or False
|
||||
extra_body = optional_params.pop("extra_body", {})
|
||||
json_mode = optional_params.pop("json_mode", None)
|
||||
optional_params.pop("max_retries", None)
|
||||
if not fake_stream:
|
||||
optional_params["stream"] = stream
|
||||
|
||||
if messages is not None and custom_llm_provider is not None:
|
||||
provider_config = ProviderConfigManager.get_provider_config(
|
||||
model=model, provider=LlmProviders(custom_llm_provider)
|
||||
)
|
||||
messages = provider_config._transform_messages(messages)
|
||||
|
||||
data = {
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
**optional_params,
|
||||
**extra_body,
|
||||
}
|
||||
|
||||
## LOGGING
|
||||
|
@ -288,6 +325,7 @@ class OpenAILikeChatHandler(OpenAILikeBase):
|
|||
client=client,
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
streaming_decoder=streaming_decoder,
|
||||
fake_stream=fake_stream,
|
||||
)
|
||||
else:
|
||||
return self.acompletion_function(
|
||||
|
@ -327,6 +365,7 @@ class OpenAILikeChatHandler(OpenAILikeBase):
|
|||
messages=messages,
|
||||
logging_obj=logging_obj,
|
||||
streaming_decoder=streaming_decoder,
|
||||
fake_stream=fake_stream,
|
||||
)
|
||||
# completion_stream.__iter__()
|
||||
return CustomStreamWrapper(
|
||||
|
@ -344,7 +383,6 @@ class OpenAILikeChatHandler(OpenAILikeBase):
|
|||
)
|
||||
response.raise_for_status()
|
||||
|
||||
response_json = response.json()
|
||||
except httpx.HTTPStatusError as e:
|
||||
raise OpenAILikeError(
|
||||
status_code=e.response.status_code,
|
||||
|
@ -356,17 +394,19 @@ class OpenAILikeChatHandler(OpenAILikeBase):
|
|||
)
|
||||
except Exception as e:
|
||||
raise OpenAILikeError(status_code=500, message=str(e))
|
||||
logging_obj.post_call(
|
||||
input=messages,
|
||||
api_key="",
|
||||
original_response=response_json,
|
||||
additional_args={"complete_input_dict": data},
|
||||
return OpenAILikeChatConfig._transform_response(
|
||||
model=model,
|
||||
response=response,
|
||||
model_response=model_response,
|
||||
stream=stream,
|
||||
logging_obj=logging_obj,
|
||||
optional_params=optional_params,
|
||||
api_key=api_key,
|
||||
data=data,
|
||||
messages=messages,
|
||||
print_verbose=print_verbose,
|
||||
encoding=encoding,
|
||||
json_mode=json_mode,
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
base_model=base_model,
|
||||
)
|
||||
response = ModelResponse(**response_json)
|
||||
|
||||
response.model = custom_llm_provider + "/" + (response.model or "")
|
||||
|
||||
if base_model is not None:
|
||||
response._hidden_params["model"] = base_model
|
||||
|
||||
return response
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue