mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
(Bug Fix) Using LiteLLM Python SDK with model=litellm_proxy/
for embedding, image_generation, transcription, speech, rerank (#8815)
* test_litellm_gateway_from_sdk * fix embedding check for openai * test litellm proxy provider * fix image generation openai compatible models * fix litellm.transcription * test_litellm_gateway_from_sdk_rerank * docs litellm python sdk * docs litellm python sdk with proxy * test_litellm_gateway_from_sdk_rerank * ci/cd run again * test_litellm_gateway_from_sdk_image_generation * test_litellm_gateway_from_sdk_embedding * test_litellm_gateway_from_sdk_embedding
This commit is contained in:
parent
ef22209a15
commit
f9cee4c46b
6 changed files with 466 additions and 83 deletions
|
@ -3409,6 +3409,7 @@ def embedding( # noqa: PLR0915
|
|||
or custom_llm_provider == "openai"
|
||||
or custom_llm_provider == "together_ai"
|
||||
or custom_llm_provider == "nvidia_nim"
|
||||
or custom_llm_provider == "litellm_proxy"
|
||||
):
|
||||
api_base = (
|
||||
api_base
|
||||
|
@ -3485,7 +3486,8 @@ def embedding( # noqa: PLR0915
|
|||
# set API KEY
|
||||
if api_key is None:
|
||||
api_key = (
|
||||
litellm.api_key
|
||||
api_key
|
||||
or litellm.api_key
|
||||
or litellm.openai_like_key
|
||||
or get_secret_str("OPENAI_LIKE_API_KEY")
|
||||
)
|
||||
|
@ -4596,7 +4598,10 @@ def image_generation( # noqa: PLR0915
|
|||
client=client,
|
||||
headers=headers,
|
||||
)
|
||||
elif custom_llm_provider == "openai":
|
||||
elif (
|
||||
custom_llm_provider == "openai"
|
||||
or custom_llm_provider in litellm.openai_compatible_providers
|
||||
):
|
||||
model_response = openai_chat_completions.image_generation(
|
||||
model=model,
|
||||
prompt=prompt,
|
||||
|
@ -5042,8 +5047,7 @@ def transcription(
|
|||
)
|
||||
elif (
|
||||
custom_llm_provider == "openai"
|
||||
or custom_llm_provider == "groq"
|
||||
or custom_llm_provider == "fireworks_ai"
|
||||
or custom_llm_provider in litellm.openai_compatible_providers
|
||||
):
|
||||
api_base = (
|
||||
api_base
|
||||
|
@ -5201,7 +5205,10 @@ def speech(
|
|||
custom_llm_provider=custom_llm_provider,
|
||||
)
|
||||
response: Optional[HttpxBinaryResponseContent] = None
|
||||
if custom_llm_provider == "openai":
|
||||
if (
|
||||
custom_llm_provider == "openai"
|
||||
or custom_llm_provider in litellm.openai_compatible_providers
|
||||
):
|
||||
if voice is None or not (isinstance(voice, str)):
|
||||
raise litellm.BadRequestError(
|
||||
message="'voice' is required to be passed as a string for OpenAI TTS",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue