mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
[BETA] Add OpenAI /images/variations
+ Topaz API support (#7700)
* feat(main.py): initial commit for `/image/variations` endpoint support * refactor(base_llm/): introduce new base llm base config for image variation endpoints * refactor(openai/image_variations/transformation.py): implement openai image variation transformation handler * fix: test * feat(openai/): working openai `/image/variation` endpoint calls via sdk * feat(topaz/): topaz sync image variation call support Addresses https://github.com/BerriAI/litellm/issues/7593 ' * fix(topaz/transformation.py): fix linting errors * fix(openai/image_variations/handler.py): fix passing json data * fix(main.py): image_variation/ support async image variation route - `aimage_variation` * fix(test_get_model_info.py): fix test * fix: cleanup unused imports * feat(openai/): add async `/image/variations` endpoint support * feat(topaz/): support async `/image/variations` calls * fix: test * fix(utils.py): fix get_model_info_helper for no model info w/ provider config handles situation where model info is not known but provider config exists * test(test_router_fallbacks.py): mark flaky test * fix: fix unused imports * test: bump otel load test perf threshold - accounts for current load tests hitting same server
This commit is contained in:
parent
a7c803edc5
commit
ad2f66b3e3
25 changed files with 1254 additions and 20 deletions
|
@ -1362,8 +1362,11 @@ def test_get_valid_models_fireworks_ai(monkeypatch):
|
|||
from litellm.utils import get_valid_models
|
||||
import litellm
|
||||
|
||||
litellm._turn_on_debug()
|
||||
|
||||
monkeypatch.setenv("FIREWORKS_API_KEY", "sk-1234")
|
||||
monkeypatch.setenv("FIREWORKS_ACCOUNT_ID", "1234")
|
||||
monkeypatch.setattr(litellm, "provider_list", ["fireworks_ai"])
|
||||
|
||||
mock_response_data = {
|
||||
"models": [
|
||||
|
@ -1431,6 +1434,7 @@ def test_get_valid_models_fireworks_ai(monkeypatch):
|
|||
litellm.module_level_client, "get", return_value=mock_response
|
||||
) as mock_post:
|
||||
valid_models = get_valid_models(check_provider_endpoint=True)
|
||||
mock_post.assert_called_once()
|
||||
assert (
|
||||
"fireworks_ai/accounts/fireworks/models/llama-3.1-8b-instruct"
|
||||
in valid_models
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue