mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Merge branch 'main' into litellm_gemini_stream_tool_calling
This commit is contained in:
commit
65bee737c5
70 changed files with 1844 additions and 984 deletions
|
@ -2555,6 +2555,24 @@ def get_optional_params(
|
|||
message=f"Function calling is not supported by {custom_llm_provider}.",
|
||||
)
|
||||
|
||||
if "tools" in non_default_params:
|
||||
tools = non_default_params["tools"]
|
||||
for (
|
||||
tool
|
||||
) in (
|
||||
tools
|
||||
): # clean out 'additionalProperties = False'. Causes vertexai/gemini OpenAI API Schema errors - https://github.com/langchain-ai/langchainjs/issues/5240
|
||||
tool_function = tool.get("function", {})
|
||||
parameters = tool_function.get("parameters", None)
|
||||
if parameters is not None:
|
||||
new_parameters = copy.deepcopy(parameters)
|
||||
if (
|
||||
"additionalProperties" in new_parameters
|
||||
and new_parameters["additionalProperties"] is False
|
||||
):
|
||||
new_parameters.pop("additionalProperties", None)
|
||||
tool_function["parameters"] = new_parameters
|
||||
|
||||
def _check_valid_arg(supported_params):
|
||||
verbose_logger.debug(
|
||||
f"\nLiteLLM completion() model= {model}; provider = {custom_llm_provider}"
|
||||
|
@ -4707,7 +4725,9 @@ def get_model_info(model: str, custom_llm_provider: Optional[str] = None) -> Mod
|
|||
)
|
||||
except Exception:
|
||||
raise Exception(
|
||||
"This model isn't mapped yet. Add it here - https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json"
|
||||
"This model isn't mapped yet. model={}, custom_llm_provider={}. Add it here - https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json".format(
|
||||
model, custom_llm_provider
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
|
@ -7519,7 +7539,7 @@ def exception_type(
|
|||
if original_exception.status_code == 400:
|
||||
exception_mapping_worked = True
|
||||
raise BadRequestError(
|
||||
message=f"{exception_provider} - {message}",
|
||||
message=f"{exception_provider} - {error_str}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
response=original_exception.response,
|
||||
|
@ -7528,7 +7548,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 401:
|
||||
exception_mapping_worked = True
|
||||
raise AuthenticationError(
|
||||
message=f"AuthenticationError: {exception_provider} - {message}",
|
||||
message=f"AuthenticationError: {exception_provider} - {error_str}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
response=original_exception.response,
|
||||
|
@ -7537,7 +7557,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 404:
|
||||
exception_mapping_worked = True
|
||||
raise NotFoundError(
|
||||
message=f"NotFoundError: {exception_provider} - {message}",
|
||||
message=f"NotFoundError: {exception_provider} - {error_str}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
response=original_exception.response,
|
||||
|
@ -7546,7 +7566,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 408:
|
||||
exception_mapping_worked = True
|
||||
raise Timeout(
|
||||
message=f"Timeout Error: {exception_provider} - {message}",
|
||||
message=f"Timeout Error: {exception_provider} - {error_str}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -7554,7 +7574,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 422:
|
||||
exception_mapping_worked = True
|
||||
raise BadRequestError(
|
||||
message=f"BadRequestError: {exception_provider} - {message}",
|
||||
message=f"BadRequestError: {exception_provider} - {error_str}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
response=original_exception.response,
|
||||
|
@ -7563,7 +7583,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 429:
|
||||
exception_mapping_worked = True
|
||||
raise RateLimitError(
|
||||
message=f"RateLimitError: {exception_provider} - {message}",
|
||||
message=f"RateLimitError: {exception_provider} - {error_str}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
response=original_exception.response,
|
||||
|
@ -7572,7 +7592,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 503:
|
||||
exception_mapping_worked = True
|
||||
raise ServiceUnavailableError(
|
||||
message=f"ServiceUnavailableError: {exception_provider} - {message}",
|
||||
message=f"ServiceUnavailableError: {exception_provider} - {error_str}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
response=original_exception.response,
|
||||
|
@ -7581,7 +7601,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 504: # gateway timeout error
|
||||
exception_mapping_worked = True
|
||||
raise Timeout(
|
||||
message=f"Timeout Error: {exception_provider} - {message}",
|
||||
message=f"Timeout Error: {exception_provider} - {error_str}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -7590,7 +7610,7 @@ def exception_type(
|
|||
exception_mapping_worked = True
|
||||
raise APIError(
|
||||
status_code=original_exception.status_code,
|
||||
message=f"APIError: {exception_provider} - {message}",
|
||||
message=f"APIError: {exception_provider} - {error_str}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
request=original_exception.request,
|
||||
|
@ -7599,7 +7619,7 @@ def exception_type(
|
|||
else:
|
||||
# if no status code then it is an APIConnectionError: https://github.com/openai/openai-python#handling-errors
|
||||
raise APIConnectionError(
|
||||
message=f"APIConnectionError: {exception_provider} - {message}",
|
||||
message=f"APIConnectionError: {exception_provider} - {error_str}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
litellm_debug_info=extra_information,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue