mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Litellm dev 12 30 2024 p1 (#7480)
* test(azure_openai_o1.py): initial commit with testing for azure openai o1 preview model * fix(base_llm_unit_tests.py): handle azure o1 preview response format tests skip as o1 on azure doesn't support tool calling yet * fix: initial commit of azure o1 handler using openai caller simplifies calling + allows fake streaming logic alr. implemented for openai to just work * feat(azure/o1_handler.py): fake o1 streaming for azure o1 models azure does not currently support streaming for o1 * feat(o1_transformation.py): support overriding 'should_fake_stream' on azure/o1 via 'supports_native_streaming' param on model info enables user to toggle on when azure allows o1 streaming without needing to bump versions * style(router.py): remove 'give feedback/get help' messaging when router is used Prevents noisy messaging Closes https://github.com/BerriAI/litellm/issues/5942 * test: fix azure o1 test * test: fix tests * fix: fix test
This commit is contained in:
parent
60bdfb437f
commit
347779b813
17 changed files with 273 additions and 141 deletions
|
@ -1893,7 +1893,6 @@ def register_model(model_cost: Union[str, dict]): # noqa: PLR0915
|
|||
},
|
||||
}
|
||||
"""
|
||||
|
||||
loaded_model_cost = {}
|
||||
if isinstance(model_cost, dict):
|
||||
loaded_model_cost = model_cost
|
||||
|
@ -4353,6 +4352,9 @@ def _get_model_info_helper( # noqa: PLR0915
|
|||
supports_embedding_image_input=_model_info.get(
|
||||
"supports_embedding_image_input", False
|
||||
),
|
||||
supports_native_streaming=_model_info.get(
|
||||
"supports_native_streaming", None
|
||||
),
|
||||
tpm=_model_info.get("tpm", None),
|
||||
rpm=_model_info.get("rpm", None),
|
||||
)
|
||||
|
@ -6050,7 +6052,10 @@ class ProviderConfigManager:
|
|||
"""
|
||||
Returns the provider config for a given provider.
|
||||
"""
|
||||
if litellm.openAIO1Config.is_model_o1_reasoning_model(model=model):
|
||||
if (
|
||||
provider == LlmProviders.OPENAI
|
||||
and litellm.openAIO1Config.is_model_o1_reasoning_model(model=model)
|
||||
):
|
||||
return litellm.OpenAIO1Config()
|
||||
elif litellm.LlmProviders.DEEPSEEK == provider:
|
||||
return litellm.DeepSeekChatConfig()
|
||||
|
@ -6122,6 +6127,8 @@ class ProviderConfigManager:
|
|||
):
|
||||
return litellm.AI21ChatConfig()
|
||||
elif litellm.LlmProviders.AZURE == provider:
|
||||
if litellm.AzureOpenAIO1Config().is_o1_model(model=model):
|
||||
return litellm.AzureOpenAIO1Config()
|
||||
return litellm.AzureOpenAIConfig()
|
||||
elif litellm.LlmProviders.AZURE_AI == provider:
|
||||
return litellm.AzureAIStudioConfig()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue