mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Support checking provider-specific /models
endpoints for available models based on key (#7538)
* test(test_utils.py): initial test for valid models Addresses https://github.com/BerriAI/litellm/issues/7525 * fix: test * feat(fireworks_ai/transformation.py): support retrieving valid models from fireworks ai endpoint * refactor(fireworks_ai/): support checking model info on `/v1/models` route * docs(set_keys.md): update docs to clarify check llm provider api usage * fix(watsonx/common_utils.py): support 'WATSONX_ZENAPIKEY' for iam auth * fix(watsonx): read in watsonx token from env var * fix: fix linting errors * fix(utils.py): fix provider config check * style: cleanup unused imports
This commit is contained in:
parent
cac06a32b8
commit
f770dd0c95
12 changed files with 350 additions and 42 deletions
29
litellm/llms/litellm_proxy/chat/transformation.py
Normal file
29
litellm/llms/litellm_proxy/chat/transformation.py
Normal file
|
@ -0,0 +1,29 @@
|
|||
"""
|
||||
Translate from OpenAI's `/v1/chat/completions` to VLLM's `/v1/chat/completions`
|
||||
"""
|
||||
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from litellm.secret_managers.main import get_secret_str
|
||||
|
||||
from ...openai.chat.gpt_transformation import OpenAIGPTConfig
|
||||
|
||||
|
||||
class LiteLLMProxyChatConfig(OpenAIGPTConfig):
|
||||
def _get_openai_compatible_provider_info(
|
||||
self, api_base: Optional[str], api_key: Optional[str]
|
||||
) -> Tuple[Optional[str], Optional[str]]:
|
||||
api_base = api_base or get_secret_str("LITELLM_PROXY_API_BASE") # type: ignore
|
||||
dynamic_api_key = api_key or get_secret_str("LITELLM_PROXY_API_KEY")
|
||||
return api_base, dynamic_api_key
|
||||
|
||||
def get_models(
|
||||
self, api_key: Optional[str] = None, api_base: Optional[str] = None
|
||||
) -> List[str]:
|
||||
api_base, api_key = self._get_openai_compatible_provider_info(api_base, api_key)
|
||||
if api_base is None:
|
||||
raise ValueError(
|
||||
"api_base not set for LiteLLM Proxy route. Set in env via `LITELLM_PROXY_API_BASE`"
|
||||
)
|
||||
models = super().get_models(api_key=api_key, api_base=api_base)
|
||||
return [f"litellm_proxy/{model}" for model in models]
|
Loading…
Add table
Add a link
Reference in a new issue