mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
o1 - add image param handling (#7312)
* fix(openai.py): fix returning o1 non-streaming requests fixes issue where fake stream always true for o1 * build(model_prices_and_context_window.json): add 'supports_vision' for o1 models * fix: add internal server error exception mapping * fix(base_llm_unit_tests.py): drop temperature from test * test: mark prompt caching as a flaky test
This commit is contained in:
parent
a101c1fff4
commit
62b00cf28d
9 changed files with 68 additions and 79 deletions
|
@ -37,8 +37,13 @@ class OpenAIO1Config(OpenAIGPTConfig):
|
|||
return super().get_config()
|
||||
|
||||
def should_fake_stream(
|
||||
self, model: str, custom_llm_provider: Optional[str] = None
|
||||
self,
|
||||
model: str,
|
||||
stream: Optional[bool],
|
||||
custom_llm_provider: Optional[str] = None,
|
||||
) -> bool:
|
||||
if stream is not True:
|
||||
return False
|
||||
supported_stream_models = ["o1-mini", "o1-preview"]
|
||||
for supported_model in supported_stream_models:
|
||||
if supported_model in model:
|
||||
|
@ -142,17 +147,4 @@ class OpenAIO1Config(OpenAIGPTConfig):
|
|||
)
|
||||
messages[i] = new_message # Replace the old message with the new one
|
||||
|
||||
if "content" in message and isinstance(message["content"], list):
|
||||
new_content = []
|
||||
for content_item in message["content"]:
|
||||
if content_item.get("type") == "image_url":
|
||||
if litellm.drop_params is not True:
|
||||
raise ValueError(
|
||||
"Image content is not supported for O-1 models. Set litellm.drop_param to True to drop image content."
|
||||
)
|
||||
# If drop_param is True, we simply don't add the image content to new_content
|
||||
else:
|
||||
new_content.append(content_item)
|
||||
message["content"] = new_content
|
||||
|
||||
return messages
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue