mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
* feat(main.py): initial commit for `/image/variations` endpoint support * refactor(base_llm/): introduce new base llm base config for image variation endpoints * refactor(openai/image_variations/transformation.py): implement openai image variation transformation handler * fix: test * feat(openai/): working openai `/image/variation` endpoint calls via sdk * feat(topaz/): topaz sync image variation call support Addresses https://github.com/BerriAI/litellm/issues/7593 ' * fix(topaz/transformation.py): fix linting errors * fix(openai/image_variations/handler.py): fix passing json data * fix(main.py): image_variation/ support async image variation route - `aimage_variation` * fix(test_get_model_info.py): fix test * fix: cleanup unused imports * feat(openai/): add async `/image/variations` endpoint support * feat(topaz/): support async `/image/variations` calls * fix: test * fix(utils.py): fix get_model_info_helper for no model info w/ provider config handles situation where model info is not known but provider config exists * test(test_router_fallbacks.py): mark flaky test * fix: fix unused imports * test: bump otel load test perf threshold - accounts for current load tests hitting same server
33 lines
1.3 KiB
Python
33 lines
1.3 KiB
Python
"""
|
|
Translate from OpenAI's `/v1/chat/completions` to VLLM's `/v1/chat/completions`
|
|
"""
|
|
|
|
from typing import List, Optional, Tuple
|
|
|
|
from litellm.secret_managers.main import get_secret_str
|
|
|
|
from ...openai.chat.gpt_transformation import OpenAIGPTConfig
|
|
|
|
|
|
class LiteLLMProxyChatConfig(OpenAIGPTConfig):
|
|
def _get_openai_compatible_provider_info(
|
|
self, api_base: Optional[str], api_key: Optional[str]
|
|
) -> Tuple[Optional[str], Optional[str]]:
|
|
api_base = api_base or get_secret_str("LITELLM_PROXY_API_BASE") # type: ignore
|
|
dynamic_api_key = api_key or get_secret_str("LITELLM_PROXY_API_KEY")
|
|
return api_base, dynamic_api_key
|
|
|
|
def get_models(
|
|
self, api_key: Optional[str] = None, api_base: Optional[str] = None
|
|
) -> List[str]:
|
|
api_base, api_key = self._get_openai_compatible_provider_info(api_base, api_key)
|
|
if api_base is None:
|
|
raise ValueError(
|
|
"api_base not set for LiteLLM Proxy route. Set in env via `LITELLM_PROXY_API_BASE`"
|
|
)
|
|
models = super().get_models(api_key=api_key, api_base=api_base)
|
|
return [f"litellm_proxy/{model}" for model in models]
|
|
|
|
@staticmethod
|
|
def get_api_key(api_key: Optional[str] = None) -> Optional[str]:
|
|
return api_key or get_secret_str("LITELLM_PROXY_API_KEY")
|