mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
* feat(litellm_pre_call_utils.py): support forwarding request headers to backend llm api * fix(litellm_pre_call_utils.py): handle custom litellm key header * test(router_code_coverage.py): check if all router functions are dire… (#6186) * test(router_code_coverage.py): check if all router functions are directly tested prevent regressions * docs(configs.md): document all environment variables (#6185) * docs: make it easier to find anthropic/openai prompt caching doc * aded codecov yml (#6207) * fix codecov.yaml * run ci/cd again * (refactor) caching use LLMCachingHandler for async_get_cache and set_cache (#6208) * use folder for caching * fix importing caching * fix clickhouse pyright * fix linting * fix correctly pass kwargs and args * fix test case for embedding * fix linting * fix embedding caching logic * fix refactor handle utils.py * fix test_embedding_caching_azure_individual_items_reordered * (feat) prometheus have well defined latency buckets (#6211) * fix prometheus have well defined latency buckets * use a well define latency bucket * use types file for prometheus logging * add test for LATENCY_BUCKETS * fix prom testing * fix config.yml * (refactor caching) use LLMCachingHandler for caching streaming responses (#6210) * use folder for caching * fix importing caching * fix clickhouse pyright * fix linting * fix correctly pass kwargs and args * fix test case for embedding * fix linting * fix embedding caching logic * fix refactor handle utils.py * refactor async set stream cache * fix linting * bump (#6187) * update code cov yaml * fix config.yml * add caching component to code cov * fix config.yml ci/cd * add coverage for proxy auth * (refactor caching) use common `_retrieve_from_cache` helper (#6212) * use folder for caching * fix importing caching * fix clickhouse pyright * fix linting * fix correctly pass kwargs and args * fix test case for embedding * fix linting * fix embedding caching logic * fix refactor handle utils.py * refactor async set stream cache * fix linting * refactor - use _retrieve_from_cache * refactor use _convert_cached_result_to_model_response * fix linting errors * bump: version 1.49.2 → 1.49.3 * fix code cov components * test(test_router_helpers.py): add router component unit tests * test: add additional router tests * test: add more router testing * test: add more router testing + more mock functions * ci(router_code_coverage.py): fix check --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> Co-authored-by: yujonglee <yujonglee.dev@gmail.com> * bump: version 1.49.3 → 1.49.4 * (refactor) use helper function `_assemble_complete_response_from_streaming_chunks` to assemble complete responses in caching and logging callbacks (#6220) * (refactor) use _assemble_complete_response_from_streaming_chunks * add unit test for test_assemble_complete_response_from_streaming_chunks_1 * fix assemble complete_streaming_response * config add logging_testing * add logging_coverage in codecov * test test_assemble_complete_response_from_streaming_chunks_3 * add unit tests for _assemble_complete_response_from_streaming_chunks * fix remove unused / junk function * add test for streaming_chunks when error assembling * (refactor) OTEL - use safe_set_attribute for setting attributes (#6226) * otel - use safe_set_attribute for setting attributes * fix OTEL only use safe_set_attribute * (fix) prompt caching cost calculation OpenAI, Azure OpenAI (#6231) * fix prompt caching cost calculation * fix testing for prompt cache cost calc * fix(allowed_model_region): allow us as allowed region (#6234) * test(router_code_coverage.py): check if all router functions are dire… (#6186) * test(router_code_coverage.py): check if all router functions are directly tested prevent regressions * docs(configs.md): document all environment variables (#6185) * docs: make it easier to find anthropic/openai prompt caching doc * aded codecov yml (#6207) * fix codecov.yaml * run ci/cd again * (refactor) caching use LLMCachingHandler for async_get_cache and set_cache (#6208) * use folder for caching * fix importing caching * fix clickhouse pyright * fix linting * fix correctly pass kwargs and args * fix test case for embedding * fix linting * fix embedding caching logic * fix refactor handle utils.py * fix test_embedding_caching_azure_individual_items_reordered * (feat) prometheus have well defined latency buckets (#6211) * fix prometheus have well defined latency buckets * use a well define latency bucket * use types file for prometheus logging * add test for LATENCY_BUCKETS * fix prom testing * fix config.yml * (refactor caching) use LLMCachingHandler for caching streaming responses (#6210) * use folder for caching * fix importing caching * fix clickhouse pyright * fix linting * fix correctly pass kwargs and args * fix test case for embedding * fix linting * fix embedding caching logic * fix refactor handle utils.py * refactor async set stream cache * fix linting * bump (#6187) * update code cov yaml * fix config.yml * add caching component to code cov * fix config.yml ci/cd * add coverage for proxy auth * (refactor caching) use common `_retrieve_from_cache` helper (#6212) * use folder for caching * fix importing caching * fix clickhouse pyright * fix linting * fix correctly pass kwargs and args * fix test case for embedding * fix linting * fix embedding caching logic * fix refactor handle utils.py * refactor async set stream cache * fix linting * refactor - use _retrieve_from_cache * refactor use _convert_cached_result_to_model_response * fix linting errors * bump: version 1.49.2 → 1.49.3 * fix code cov components * test(test_router_helpers.py): add router component unit tests * test: add additional router tests * test: add more router testing * test: add more router testing + more mock functions * ci(router_code_coverage.py): fix check --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> Co-authored-by: yujonglee <yujonglee.dev@gmail.com> * bump: version 1.49.3 → 1.49.4 * (refactor) use helper function `_assemble_complete_response_from_streaming_chunks` to assemble complete responses in caching and logging callbacks (#6220) * (refactor) use _assemble_complete_response_from_streaming_chunks * add unit test for test_assemble_complete_response_from_streaming_chunks_1 * fix assemble complete_streaming_response * config add logging_testing * add logging_coverage in codecov * test test_assemble_complete_response_from_streaming_chunks_3 * add unit tests for _assemble_complete_response_from_streaming_chunks * fix remove unused / junk function * add test for streaming_chunks when error assembling * (refactor) OTEL - use safe_set_attribute for setting attributes (#6226) * otel - use safe_set_attribute for setting attributes * fix OTEL only use safe_set_attribute * fix(allowed_model_region): allow us as allowed region --------- Co-authored-by: Krish Dholakia <krrishdholakia@gmail.com> Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> Co-authored-by: yujonglee <yujonglee.dev@gmail.com> * fix(litellm_pre_call_utils.py): support 'us' region routing + fix header forwarding to filter on `x-` headers * docs(customer_routing.md): fix region-based routing example * feat(azure.py): handle empty arguments function call - azure Closes https://github.com/BerriAI/litellm/issues/6241 * feat(guardrails_ai.py): support guardrails ai integration Adds support for on-prem guardrails via guardrails ai * fix(proxy/utils.py): prevent sql injection attack Fixes https://huntr.com/bounties/a4f6d357-5b44-4e00-9cac-f1cc351211d2 * fix: fix linting errors * fix(litellm_pre_call_utils.py): don't log litellm api key in proxy server request headers * fix(litellm_pre_call_utils.py): don't forward stainless headers * docs(guardrails_ai.md): add guardrails ai quick start to docs * test: handle flaky test --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> Co-authored-by: yujonglee <yujonglee.dev@gmail.com> Co-authored-by: Marcus Elwin <marcus@elwin.com>
250 lines
10 KiB
Python
250 lines
10 KiB
Python
import types
|
||
from typing import List, Optional, Type, Union
|
||
|
||
import litellm
|
||
|
||
from ....exceptions import UnsupportedParamsError
|
||
from ....types.llms.openai import (
|
||
AllMessageValues,
|
||
ChatCompletionToolChoiceFunctionParam,
|
||
ChatCompletionToolChoiceObjectParam,
|
||
ChatCompletionToolParam,
|
||
ChatCompletionToolParamFunctionChunk,
|
||
)
|
||
from ...prompt_templates.factory import convert_to_azure_openai_messages
|
||
|
||
|
||
class AzureOpenAIConfig:
|
||
"""
|
||
Reference: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions
|
||
|
||
The class `AzureOpenAIConfig` provides configuration for the OpenAI's Chat API interface, for use with Azure. It inherits from `OpenAIConfig`. Below are the parameters::
|
||
|
||
- `frequency_penalty` (number or null): Defaults to 0. Allows a value between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, thereby minimizing repetition.
|
||
|
||
- `function_call` (string or object): This optional parameter controls how the model calls functions.
|
||
|
||
- `functions` (array): An optional parameter. It is a list of functions for which the model may generate JSON inputs.
|
||
|
||
- `logit_bias` (map): This optional parameter modifies the likelihood of specified tokens appearing in the completion.
|
||
|
||
- `max_tokens` (integer or null): This optional parameter helps to set the maximum number of tokens to generate in the chat completion.
|
||
|
||
- `n` (integer or null): This optional parameter helps to set how many chat completion choices to generate for each input message.
|
||
|
||
- `presence_penalty` (number or null): Defaults to 0. It penalizes new tokens based on if they appear in the text so far, hence increasing the model's likelihood to talk about new topics.
|
||
|
||
- `stop` (string / array / null): Specifies up to 4 sequences where the API will stop generating further tokens.
|
||
|
||
- `temperature` (number or null): Defines the sampling temperature to use, varying between 0 and 2.
|
||
|
||
- `top_p` (number or null): An alternative to sampling with temperature, used for nucleus sampling.
|
||
"""
|
||
|
||
def __init__(
|
||
self,
|
||
frequency_penalty: Optional[int] = None,
|
||
function_call: Optional[Union[str, dict]] = None,
|
||
functions: Optional[list] = None,
|
||
logit_bias: Optional[dict] = None,
|
||
max_tokens: Optional[int] = None,
|
||
n: Optional[int] = None,
|
||
presence_penalty: Optional[int] = None,
|
||
stop: Optional[Union[str, list]] = None,
|
||
temperature: Optional[int] = None,
|
||
top_p: Optional[int] = None,
|
||
) -> None:
|
||
locals_ = locals().copy()
|
||
for key, value in locals_.items():
|
||
if key != "self" and value is not None:
|
||
setattr(self.__class__, key, value)
|
||
|
||
@classmethod
|
||
def get_config(cls):
|
||
return {
|
||
k: v
|
||
for k, v in cls.__dict__.items()
|
||
if not k.startswith("__")
|
||
and not isinstance(
|
||
v,
|
||
(
|
||
types.FunctionType,
|
||
types.BuiltinFunctionType,
|
||
classmethod,
|
||
staticmethod,
|
||
),
|
||
)
|
||
and v is not None
|
||
}
|
||
|
||
def get_supported_openai_params(self):
|
||
return [
|
||
"temperature",
|
||
"n",
|
||
"stream",
|
||
"stream_options",
|
||
"stop",
|
||
"max_tokens",
|
||
"max_completion_tokens",
|
||
"tools",
|
||
"tool_choice",
|
||
"presence_penalty",
|
||
"frequency_penalty",
|
||
"logit_bias",
|
||
"user",
|
||
"function_call",
|
||
"functions",
|
||
"tools",
|
||
"tool_choice",
|
||
"top_p",
|
||
"logprobs",
|
||
"top_logprobs",
|
||
"response_format",
|
||
"seed",
|
||
"extra_headers",
|
||
]
|
||
|
||
def map_openai_params(
|
||
self,
|
||
non_default_params: dict,
|
||
optional_params: dict,
|
||
model: str,
|
||
api_version: str, # Y-M-D-{optional}
|
||
drop_params,
|
||
) -> dict:
|
||
supported_openai_params = self.get_supported_openai_params()
|
||
|
||
api_version_times = api_version.split("-")
|
||
api_version_year = api_version_times[0]
|
||
api_version_month = api_version_times[1]
|
||
api_version_day = api_version_times[2]
|
||
for param, value in non_default_params.items():
|
||
if param == "tool_choice":
|
||
"""
|
||
This parameter requires API version 2023-12-01-preview or later
|
||
|
||
tool_choice='required' is not supported as of 2024-05-01-preview
|
||
"""
|
||
## check if api version supports this param ##
|
||
if (
|
||
api_version_year < "2023"
|
||
or (api_version_year == "2023" and api_version_month < "12")
|
||
or (
|
||
api_version_year == "2023"
|
||
and api_version_month == "12"
|
||
and api_version_day < "01"
|
||
)
|
||
):
|
||
if litellm.drop_params is True or (
|
||
drop_params is not None and drop_params is True
|
||
):
|
||
pass
|
||
else:
|
||
raise UnsupportedParamsError(
|
||
status_code=400,
|
||
message=f"""Azure does not support 'tool_choice', for api_version={api_version}. Bump your API version to '2023-12-01-preview' or later. This parameter requires 'api_version="2023-12-01-preview"' or later. Azure API Reference: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions""",
|
||
)
|
||
elif value == "required" and (
|
||
api_version_year == "2024" and api_version_month <= "05"
|
||
): ## check if tool_choice value is supported ##
|
||
if litellm.drop_params is True or (
|
||
drop_params is not None and drop_params is True
|
||
):
|
||
pass
|
||
else:
|
||
raise UnsupportedParamsError(
|
||
status_code=400,
|
||
message=f"Azure does not support '{value}' as a {param} param, for api_version={api_version}. To drop 'tool_choice=required' for calls with this Azure API version, set `litellm.drop_params=True` or for proxy:\n\n`litellm_settings:\n drop_params: true`\nAzure API Reference: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions",
|
||
)
|
||
else:
|
||
optional_params["tool_choice"] = value
|
||
elif param == "response_format" and isinstance(value, dict):
|
||
json_schema: Optional[dict] = None
|
||
schema_name: str = ""
|
||
if "response_schema" in value:
|
||
json_schema = value["response_schema"]
|
||
schema_name = "json_tool_call"
|
||
elif "json_schema" in value:
|
||
json_schema = value["json_schema"]["schema"]
|
||
schema_name = value["json_schema"]["name"]
|
||
"""
|
||
Follow similar approach to anthropic - translate to a single tool call.
|
||
|
||
When using tools in this way: - https://docs.anthropic.com/en/docs/build-with-claude/tool-use#json-mode
|
||
- You usually want to provide a single tool
|
||
- You should set tool_choice (see Forcing tool use) to instruct the model to explicitly use that tool
|
||
- Remember that the model will pass the input to the tool, so the name of the tool and description should be from the model’s perspective.
|
||
"""
|
||
if json_schema is not None and (
|
||
(api_version_year <= "2024" and api_version_month < "08")
|
||
or "gpt-4o" not in model
|
||
): # azure api version "2024-08-01-preview" onwards supports 'json_schema' only for gpt-4o
|
||
_tool_choice = ChatCompletionToolChoiceObjectParam(
|
||
type="function",
|
||
function=ChatCompletionToolChoiceFunctionParam(
|
||
name=schema_name
|
||
),
|
||
)
|
||
|
||
_tool = ChatCompletionToolParam(
|
||
type="function",
|
||
function=ChatCompletionToolParamFunctionChunk(
|
||
name=schema_name, parameters=json_schema
|
||
),
|
||
)
|
||
|
||
optional_params["tools"] = [_tool]
|
||
optional_params["tool_choice"] = _tool_choice
|
||
optional_params["json_mode"] = True
|
||
else:
|
||
optional_params["response_format"] = value
|
||
elif param == "max_completion_tokens":
|
||
# TODO - Azure OpenAI will probably add support for this, we should pass it through when Azure adds support
|
||
optional_params["max_tokens"] = value
|
||
elif param in supported_openai_params:
|
||
optional_params[param] = value
|
||
|
||
return optional_params
|
||
|
||
@classmethod
|
||
def transform_request(
|
||
cls, model: str, messages: List[AllMessageValues], optional_params: dict
|
||
) -> dict:
|
||
messages = convert_to_azure_openai_messages(messages)
|
||
return {
|
||
"model": model,
|
||
"messages": messages,
|
||
**optional_params,
|
||
}
|
||
|
||
def get_mapped_special_auth_params(self) -> dict:
|
||
return {"token": "azure_ad_token"}
|
||
|
||
def map_special_auth_params(self, non_default_params: dict, optional_params: dict):
|
||
for param, value in non_default_params.items():
|
||
if param == "token":
|
||
optional_params["azure_ad_token"] = value
|
||
return optional_params
|
||
|
||
def get_eu_regions(self) -> List[str]:
|
||
"""
|
||
Source: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-4-and-gpt-4-turbo-model-availability
|
||
"""
|
||
return ["europe", "sweden", "switzerland", "france", "uk"]
|
||
|
||
def get_us_regions(self) -> List[str]:
|
||
"""
|
||
Source: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-4-and-gpt-4-turbo-model-availability
|
||
"""
|
||
return [
|
||
"us",
|
||
"eastus",
|
||
"eastus2",
|
||
"eastus2euap",
|
||
"eastus3",
|
||
"southcentralus",
|
||
"westus",
|
||
"westus2",
|
||
"westus3",
|
||
"westus4",
|
||
]
|