mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
* fix(types/utils.py): support returning 'reasoning_content' for deepseek models Fixes https://github.com/BerriAI/litellm/issues/7877#issuecomment-2603813218 * fix(convert_dict_to_response.py): return deepseek response in provider_specific_field allows for separating openai vs. non-openai params in model response * fix(utils.py): support 'provider_specific_field' in delta chunk as well allows deepseek reasoning content chunk to be returned to user from stream as well Fixes https://github.com/BerriAI/litellm/issues/7877#issuecomment-2603813218 * fix(watsonx/chat/handler.py): fix passing space id to watsonx on chat route * fix(watsonx/): fix watsonx_text/ route with space id * fix(watsonx/): qa item - also adds better unit testing for watsonx embedding calls * fix(utils.py): rename to '..fields' * fix: fix linting errors * fix(utils.py): fix typing - don't show provider-specific field if none or empty - prevents default respons e from being non-oai compatible * fix: cleanup unused imports * docs(deepseek.md): add docs for deepseek reasoning model
109 lines
3.8 KiB
Python
109 lines
3.8 KiB
Python
"""
|
|
Translation from OpenAI's `/chat/completions` endpoint to IBM WatsonX's `/text/chat` endpoint.
|
|
|
|
Docs: https://cloud.ibm.com/apidocs/watsonx-ai#text-chat
|
|
"""
|
|
|
|
from typing import List, Optional, Tuple, Union
|
|
|
|
from litellm.secret_managers.main import get_secret_str
|
|
from litellm.types.llms.watsonx import WatsonXAIEndpoint
|
|
|
|
from ....utils import _remove_additional_properties, _remove_strict_from_schema
|
|
from ...openai.chat.gpt_transformation import OpenAIGPTConfig
|
|
from ..common_utils import IBMWatsonXMixin
|
|
|
|
|
|
class IBMWatsonXChatConfig(IBMWatsonXMixin, OpenAIGPTConfig):
|
|
|
|
def get_supported_openai_params(self, model: str) -> List:
|
|
return [
|
|
"temperature", # equivalent to temperature
|
|
"max_tokens", # equivalent to max_new_tokens
|
|
"top_p", # equivalent to top_p
|
|
"frequency_penalty", # equivalent to repetition_penalty
|
|
"stop", # equivalent to stop_sequences
|
|
"seed", # equivalent to random_seed
|
|
"stream", # equivalent to stream
|
|
"tools",
|
|
"tool_choice", # equivalent to tool_choice + tool_choice_options
|
|
"logprobs",
|
|
"top_logprobs",
|
|
"n",
|
|
"presence_penalty",
|
|
"response_format",
|
|
]
|
|
|
|
def is_tool_choice_option(self, tool_choice: Optional[Union[str, dict]]) -> bool:
|
|
if tool_choice is None:
|
|
return False
|
|
if isinstance(tool_choice, str):
|
|
return tool_choice in ["auto", "none", "required"]
|
|
return False
|
|
|
|
def map_openai_params(
|
|
self,
|
|
non_default_params: dict,
|
|
optional_params: dict,
|
|
model: str,
|
|
drop_params: bool,
|
|
) -> dict:
|
|
## TOOLS ##
|
|
_tools = non_default_params.pop("tools", None)
|
|
if _tools is not None:
|
|
# remove 'additionalProperties' from tools
|
|
_tools = _remove_additional_properties(_tools)
|
|
# remove 'strict' from tools
|
|
_tools = _remove_strict_from_schema(_tools)
|
|
if _tools is not None:
|
|
non_default_params["tools"] = _tools
|
|
|
|
## TOOL CHOICE ##
|
|
|
|
_tool_choice = non_default_params.pop("tool_choice", None)
|
|
if self.is_tool_choice_option(_tool_choice):
|
|
optional_params["tool_choice_options"] = _tool_choice
|
|
elif _tool_choice is not None:
|
|
optional_params["tool_choice"] = _tool_choice
|
|
return super().map_openai_params(
|
|
non_default_params, optional_params, model, drop_params
|
|
)
|
|
|
|
def _get_openai_compatible_provider_info(
|
|
self, api_base: Optional[str], api_key: Optional[str]
|
|
) -> Tuple[Optional[str], Optional[str]]:
|
|
api_base = api_base or get_secret_str("HOSTED_VLLM_API_BASE") # type: ignore
|
|
dynamic_api_key = (
|
|
api_key or get_secret_str("HOSTED_VLLM_API_KEY") or ""
|
|
) # vllm does not require an api key
|
|
return api_base, dynamic_api_key
|
|
|
|
def get_complete_url(
|
|
self,
|
|
api_base: str,
|
|
model: str,
|
|
optional_params: dict,
|
|
stream: Optional[bool] = None,
|
|
) -> str:
|
|
url = self._get_base_url(api_base=api_base)
|
|
if model.startswith("deployment/"):
|
|
deployment_id = "/".join(model.split("/")[1:])
|
|
endpoint = (
|
|
WatsonXAIEndpoint.DEPLOYMENT_CHAT_STREAM.value
|
|
if stream
|
|
else WatsonXAIEndpoint.DEPLOYMENT_CHAT.value
|
|
)
|
|
endpoint = endpoint.format(deployment_id=deployment_id)
|
|
else:
|
|
endpoint = (
|
|
WatsonXAIEndpoint.CHAT_STREAM.value
|
|
if stream
|
|
else WatsonXAIEndpoint.CHAT.value
|
|
)
|
|
url = url.rstrip("/") + endpoint
|
|
|
|
## add api version
|
|
url = self._add_api_version_to_url(
|
|
url=url, api_version=optional_params.pop("api_version", None)
|
|
)
|
|
return url
|