mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
* fix(lm_studio/chat/transformation.py): Fix https://github.com/BerriAI/litellm/issues/7811 * fix(router.py): fix mock timeout check * fix: drop model name from fallback args since it causes a conflict with the model=model that is provided later on. (#7806) This error happens if you provide multiple fallback models to the completion function with model name defined in each one. * fix(router.py): remove mock_timeout before sending to request prevents reuse in fallbacks * test: update test * test: revert test change - wrong pr --------- Co-authored-by: Dudu Lasry <david1542@users.noreply.github.com>
20 lines
710 B
Python
20 lines
710 B
Python
"""
|
|
Translate from OpenAI's `/v1/chat/completions` to LM Studio's `/chat/completions`
|
|
"""
|
|
|
|
from typing import Optional, Tuple
|
|
|
|
from litellm.secret_managers.main import get_secret_str
|
|
|
|
from ...openai.chat.gpt_transformation import OpenAIGPTConfig
|
|
|
|
|
|
class LMStudioChatConfig(OpenAIGPTConfig):
|
|
def _get_openai_compatible_provider_info(
|
|
self, api_base: Optional[str], api_key: Optional[str]
|
|
) -> Tuple[Optional[str], Optional[str]]:
|
|
api_base = api_base or get_secret_str("LM_STUDIO_API_BASE") # type: ignore
|
|
dynamic_api_key = (
|
|
api_key or get_secret_str("LM_STUDIO_API_KEY") or " "
|
|
) # vllm does not require an api key
|
|
return api_base, dynamic_api_key
|