litellm-mirror/litellm/llms/azure/chat/o1_transformation.py
Krish Dholakia 142662a504 build(pyproject.toml): bump uvicorn depedency requirement (#7773)
* build(pyproject.toml): bump uvicorn depedency requirement

Fixes https://github.com/BerriAI/litellm/issues/7768

* fix(anthropic/chat/transformation.py): fix is_vertex_request check to actually use optional param passed in

Fixes https://github.com/BerriAI/litellm/issues/6898#issuecomment-2590860695

* fix(o1_transformation.py): fix azure o1 'is_o1_model' check to just check for o1 in model string

https://github.com/BerriAI/litellm/issues/7743

* test: load vertex creds
2025-01-14 21:47:11 -08:00

51 lines
1.5 KiB
Python

"""
Support for o1 model family
https://platform.openai.com/docs/guides/reasoning
Translations handled by LiteLLM:
- modalities: image => drop param (if user opts in to dropping param)
- role: system ==> translate to role 'user'
- streaming => faked by LiteLLM
- Tools, response_format => drop param (if user opts in to dropping param)
- Logprobs => drop param (if user opts in to dropping param)
- Temperature => drop param (if user opts in to dropping param)
"""
from typing import Optional
from litellm import verbose_logger
from litellm.utils import get_model_info
from ...openai.chat.o1_transformation import OpenAIO1Config
class AzureOpenAIO1Config(OpenAIO1Config):
def should_fake_stream(
self,
model: Optional[str],
stream: Optional[bool],
custom_llm_provider: Optional[str] = None,
) -> bool:
"""
Currently no Azure OpenAI models support native streaming.
"""
if stream is not True:
return False
if model is not None:
try:
model_info = get_model_info(
model=model, custom_llm_provider=custom_llm_provider
)
if model_info.get("supports_native_streaming") is True:
return False
except Exception as e:
verbose_logger.debug(
f"Error getting model info in AzureOpenAIO1Config: {e}"
)
return True
def is_o1_model(self, model: str) -> bool:
return "o1" in model