mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
Support checking provider-specific /models
endpoints for available models based on key (#7538)
* test(test_utils.py): initial test for valid models Addresses https://github.com/BerriAI/litellm/issues/7525 * fix: test * feat(fireworks_ai/transformation.py): support retrieving valid models from fireworks ai endpoint * refactor(fireworks_ai/): support checking model info on `/v1/models` route * docs(set_keys.md): update docs to clarify check llm provider api usage * fix(watsonx/common_utils.py): support 'WATSONX_ZENAPIKEY' for iam auth * fix(watsonx): read in watsonx token from env var * fix: fix linting errors * fix(utils.py): fix provider config check * style: cleanup unused imports
This commit is contained in:
parent
9772583f2f
commit
fb1272b46b
12 changed files with 350 additions and 42 deletions
|
@ -488,11 +488,10 @@ def _get_openai_compatible_provider_info( # noqa: PLR0915
|
|||
elif custom_llm_provider == "fireworks_ai":
|
||||
# fireworks is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.fireworks.ai/inference/v1
|
||||
(
|
||||
model,
|
||||
api_base,
|
||||
dynamic_api_key,
|
||||
) = litellm.FireworksAIConfig()._get_openai_compatible_provider_info(
|
||||
model=model, api_base=api_base, api_key=api_key
|
||||
api_base=api_base, api_key=api_key
|
||||
)
|
||||
elif custom_llm_provider == "azure_ai":
|
||||
(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue