mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
Litellm dev 01 06 2025 p2 (#7597)
* test(test_amazing_vertex_completion.py): fix test * test: initial working code gecko test * fix(vertex_ai_non_gemini.py): support vertex ai code gecko fake streaming Fixes https://github.com/BerriAI/litellm/issues/7360 * test(test_get_model_info.py): add test for getting custom provider model info Covers https://github.com/BerriAI/litellm/issues/7575 * fix(utils.py): fix get_provider_model_info check Handle custom llm provider scenario Fixes https://github.com/ BerriAI/litellm/issues/7575
This commit is contained in:
parent
b397dc1497
commit
0c3fef24cd
4 changed files with 76 additions and 12 deletions
|
@ -4224,6 +4224,7 @@ def _get_model_info_helper( # noqa: PLR0915
|
|||
_model_info: Optional[Dict[str, Any]] = None
|
||||
key: Optional[str] = None
|
||||
provider_config: Optional[BaseLLMModelInfo] = None
|
||||
|
||||
if combined_model_name in litellm.model_cost:
|
||||
key = combined_model_name
|
||||
_model_info = _get_model_info_from_model_cost(key=key)
|
||||
|
@ -4263,7 +4264,10 @@ def _get_model_info_helper( # noqa: PLR0915
|
|||
):
|
||||
_model_info = None
|
||||
|
||||
if custom_llm_provider:
|
||||
if custom_llm_provider and custom_llm_provider in [
|
||||
provider.value for provider in LlmProviders
|
||||
]:
|
||||
# Check if the provider string exists in LlmProviders enum
|
||||
provider_config = ProviderConfigManager.get_provider_model_info(
|
||||
model=model, provider=LlmProviders(custom_llm_provider)
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue