(feat) Support audio, modalities params (#6304)

* add audio, modalities param

* add test for gpt audio models

* add get_supported_openai_params for GPT audio models

* add supported params for audio

* test_audio_output_from_model

* bump openai to openai==1.52.0

* bump openai on pyproject

* fix audio test

* fix test mock_chat_response

* handle audio for Message

* fix handling audio for OAI compatible API endpoints

* fix linting

* fix mock dbrx test
This commit is contained in:
Ishaan Jaff 2024-10-18 19:14:25 +05:30 committed by GitHub
parent e35fc3203e
commit 13e0b3f626
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 290 additions and 23 deletions

View file

@ -303,10 +303,25 @@ class OpenAIConfig:
}
def get_supported_openai_params(self, model: str) -> list:
if litellm.OpenAIO1Config().is_model_o1_reasoning_model(model=model):
return litellm.OpenAIO1Config().get_supported_openai_params(model=model)
"""
This function returns the list of supported openai parameters for a given OpenAI Model
- If O1 model, returns O1 supported params
- If gpt-audio model, returns gpt-audio supported params
- Else, returns gpt supported params
Args:
model (str): OpenAI model
Returns:
list: List of supported openai parameters
"""
if litellm.openAIO1Config.is_model_o1_reasoning_model(model=model):
return litellm.openAIO1Config.get_supported_openai_params(model=model)
elif litellm.openAIGPTAudioConfig.is_model_gpt_audio_model(model=model):
return litellm.openAIGPTAudioConfig.get_supported_openai_params(model=model)
else:
return litellm.OpenAIGPTConfig().get_supported_openai_params(model=model)
return litellm.openAIGPTConfig.get_supported_openai_params(model=model)
def _map_openai_params(
self, non_default_params: dict, optional_params: dict, model: str
@ -325,14 +340,22 @@ class OpenAIConfig:
drop_params: bool,
) -> dict:
""" """
if litellm.OpenAIO1Config().is_model_o1_reasoning_model(model=model):
return litellm.OpenAIO1Config().map_openai_params(
if litellm.openAIO1Config.is_model_o1_reasoning_model(model=model):
return litellm.openAIO1Config.map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
model=model,
drop_params=drop_params,
)
return litellm.OpenAIGPTConfig().map_openai_params(
elif litellm.openAIGPTAudioConfig.is_model_gpt_audio_model(model=model):
return litellm.openAIGPTAudioConfig.map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
model=model,
drop_params=drop_params,
)
return litellm.openAIGPTConfig.map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
model=model,
@ -666,10 +689,10 @@ class OpenAIChatCompletion(BaseLLM):
custom_llm_provider=custom_llm_provider,
)
if (
litellm.OpenAIO1Config().is_model_o1_reasoning_model(model=model)
litellm.openAIO1Config.is_model_o1_reasoning_model(model=model)
and messages is not None
):
messages = litellm.OpenAIO1Config().o1_prompt_factory(
messages = litellm.openAIO1Config.o1_prompt_factory(
messages=messages,
)