Compare commits

...
Sign in to create a new pull request.

5 commits

Author SHA1 Message Date
Ishaan Jaff
64f105ea23 ci/cd run again 2024-11-14 08:13:05 -08:00
Ishaan Jaff
fccced7530 fix test_supports_response_schema 2024-11-13 22:00:24 -08:00
Ishaan Jaff
eb4786c378 Merge branch 'main' into litellm_add_stability.stable-image-ultra-v1 2024-11-13 19:59:06 -08:00
Ishaan Jaff
d096a7c64c add pricing for stability.stable-image-ultra-v1:0 2024-11-12 20:56:00 -08:00
Ishaan Jaff
49c31ecee6 add stability.stable-image-ultra-v1:0 2024-11-12 20:53:17 -08:00
5 changed files with 24 additions and 4 deletions

View file

@ -53,9 +53,15 @@ class AmazonStability3Config:
sd3-medium
sd3.5-large
sd3.5-large-turbo
Stability ultra models
stable-image-ultra-v1
"""
if model and ("sd3" in model or "sd3.5" in model):
return True
if model:
if "sd3" in model or "sd3.5" in model:
return True
if "stable-image-ultra-v1" in model:
return True
return False
@classmethod

View file

@ -5620,6 +5620,13 @@
"litellm_provider": "bedrock",
"mode": "image_generation"
},
"stability.stable-image-ultra-v1:0": {
"max_tokens": 77,
"max_input_tokens": 77,
"output_cost_per_image": 0.14,
"litellm_provider": "bedrock",
"mode": "image_generation"
},
"sagemaker/meta-textgeneration-llama-2-7b": {
"max_tokens": 4096,
"max_input_tokens": 4096,

View file

@ -5620,6 +5620,13 @@
"litellm_provider": "bedrock",
"mode": "image_generation"
},
"stability.stable-image-ultra-v1:0": {
"max_tokens": 77,
"max_input_tokens": 77,
"output_cost_per_image": 0.14,
"litellm_provider": "bedrock",
"mode": "image_generation"
},
"sagemaker/meta-textgeneration-llama-2-7b": {
"max_tokens": 4096,
"max_input_tokens": 4096,

View file

@ -10,7 +10,7 @@ import os
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
) # Adds the parent directory to the system-path
from typing import Literal
import pytest

View file

@ -748,7 +748,7 @@ def test_convert_model_response_object():
("vertex_ai/gemini-1.5-pro", True),
("gemini/gemini-1.5-pro", True),
("predibase/llama3-8b-instruct", True),
("gpt-4o", False),
("gpt-3.5-turbo", False),
],
)
def test_supports_response_schema(model, expected_bool):