mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
* fix(caching_handler.py): handle positional arguments in add cache logic Fixes https://github.com/BerriAI/litellm/issues/6264 * feat(litellm_pre_call_utils.py): allow forwarding openai org id to backend client https://github.com/BerriAI/litellm/issues/6237 * docs(configs.md): add 'forward_openai_org_id' to docs * fix(proxy_server.py): return model info if user_model is set Fixes https://github.com/BerriAI/litellm/issues/6233 * fix(hosted_vllm/chat/transformation.py): don't set tools unless non-none * fix(openai.py): improve debug log for openai 'str' error Addresses https://github.com/BerriAI/litellm/issues/6272 * fix(proxy_server.py): fix linting error * fix(proxy_server.py): fix linting errors * test: skip WIP test * docs(openai.md): add docs on passing openai org id from client to openai
35 lines
1.1 KiB
Python
35 lines
1.1 KiB
Python
"""
|
|
Translate from OpenAI's `/v1/chat/completions` to VLLM's `/v1/chat/completions`
|
|
"""
|
|
|
|
import types
|
|
from typing import List, Optional, Union
|
|
|
|
from pydantic import BaseModel
|
|
|
|
import litellm
|
|
from litellm.types.llms.openai import AllMessageValues, ChatCompletionAssistantMessage
|
|
|
|
from ....utils import _remove_additional_properties, _remove_strict_from_schema
|
|
from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig
|
|
|
|
|
|
class HostedVLLMChatConfig(OpenAIGPTConfig):
|
|
def map_openai_params(
|
|
self,
|
|
non_default_params: dict,
|
|
optional_params: dict,
|
|
model: str,
|
|
drop_params: bool,
|
|
) -> dict:
|
|
_tools = non_default_params.pop("tools", None)
|
|
if _tools is not None:
|
|
# remove 'additionalProperties' from tools
|
|
_tools = _remove_additional_properties(_tools)
|
|
# remove 'strict' from tools
|
|
_tools = _remove_strict_from_schema(_tools)
|
|
if _tools is not None:
|
|
non_default_params["tools"] = _tools
|
|
return super().map_openai_params(
|
|
non_default_params, optional_params, model, drop_params
|
|
)
|