mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
* feat(main.py): initial commit for `/image/variations` endpoint support * refactor(base_llm/): introduce new base llm base config for image variation endpoints * refactor(openai/image_variations/transformation.py): implement openai image variation transformation handler * fix: test * feat(openai/): working openai `/image/variation` endpoint calls via sdk * feat(topaz/): topaz sync image variation call support Addresses https://github.com/BerriAI/litellm/issues/7593 ' * fix(topaz/transformation.py): fix linting errors * fix(openai/image_variations/handler.py): fix passing json data * fix(main.py): image_variation/ support async image variation route - `aimage_variation` * fix(test_get_model_info.py): fix test * fix: cleanup unused imports * feat(openai/): add async `/image/variations` endpoint support * feat(topaz/): support async `/image/variations` calls * fix: test * fix(utils.py): fix get_model_info_helper for no model info w/ provider config handles situation where model info is not known but provider config exists * test(test_router_fallbacks.py): mark flaky test * fix: fix unused imports * test: bump otel load test perf threshold - accounts for current load tests hitting same server
83 lines
2.1 KiB
Python
83 lines
2.1 KiB
Python
# What this tests?
|
|
## This tests the litellm support for the openai /generations endpoint
|
|
|
|
import logging
|
|
import os
|
|
import sys
|
|
import traceback
|
|
|
|
|
|
sys.path.insert(
|
|
0, os.path.abspath("../..")
|
|
) # Adds the parent directory to the system path
|
|
|
|
from dotenv import load_dotenv
|
|
from openai.types.image import Image
|
|
from litellm.caching import InMemoryCache
|
|
|
|
logging.basicConfig(level=logging.DEBUG)
|
|
load_dotenv()
|
|
import asyncio
|
|
import os
|
|
import pytest
|
|
|
|
import litellm
|
|
import json
|
|
import tempfile
|
|
from base_image_generation_test import BaseImageGenTest
|
|
import logging
|
|
from litellm._logging import verbose_logger
|
|
import requests
|
|
from io import BytesIO
|
|
|
|
verbose_logger.setLevel(logging.DEBUG)
|
|
|
|
|
|
@pytest.fixture
|
|
def image_url():
|
|
# URL of the image
|
|
image_url = "https://litellm-listing.s3.amazonaws.com/litellm_logo.png"
|
|
|
|
# Fetch the image from the URL
|
|
response = requests.get(image_url)
|
|
print(response)
|
|
response.raise_for_status() # Ensure the request was successful
|
|
|
|
# Load the image into a file-like object
|
|
image_file = BytesIO(response.content)
|
|
|
|
return image_file
|
|
|
|
|
|
def test_openai_image_variation_openai_sdk(image_url):
|
|
from openai import OpenAI
|
|
|
|
client = OpenAI()
|
|
response = client.images.create_variation(image=image_url, n=2, size="1024x1024")
|
|
print(response)
|
|
|
|
|
|
@pytest.mark.parametrize("sync_mode", [True, False])
|
|
@pytest.mark.asyncio
|
|
async def test_openai_image_variation_litellm_sdk(image_url, sync_mode):
|
|
from litellm import image_variation, aimage_variation
|
|
|
|
if sync_mode:
|
|
image_variation(image=image_url, n=2, size="1024x1024")
|
|
else:
|
|
await aimage_variation(image=image_url, n=2, size="1024x1024")
|
|
|
|
|
|
@pytest.mark.parametrize("sync_mode", [True, False]) # ,
|
|
@pytest.mark.asyncio
|
|
async def test_topaz_image_variation(image_url, sync_mode):
|
|
from litellm import image_variation, aimage_variation
|
|
|
|
if sync_mode:
|
|
image_variation(
|
|
model="topaz/Standard V2", image=image_url, n=2, size="1024x1024"
|
|
)
|
|
else:
|
|
response = await aimage_variation(
|
|
model="topaz/Standard V2", image=image_url, n=2, size="1024x1024"
|
|
)
|