mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 11s
* fix(main.py): support passing max retries to azure/openai embedding integrations Fixes https://github.com/BerriAI/litellm/issues/7003 * feat(team_endpoints.py): allow updating team model aliases Closes https://github.com/BerriAI/litellm/issues/6956 * feat(router.py): allow specifying model id as fallback - skips any cooldown check Allows a default model to be checked if all models in cooldown s/o @micahjsmith * docs(reliability.md): add fallback to specific model to docs * fix(utils.py): new 'is_prompt_caching_valid_prompt' helper util Allows user to identify if messages/tools have prompt caching Related issue: https://github.com/BerriAI/litellm/issues/6784 * feat(router.py): store model id for prompt caching valid prompt Allows routing to that model id on subsequent requests * fix(router.py): only cache if prompt is valid prompt caching prompt prevents storing unnecessary items in cache * feat(router.py): support routing prompt caching enabled models to previous deployments Closes https://github.com/BerriAI/litellm/issues/6784 * test: fix linting errors * feat(databricks/): convert basemodel to dict and exclude none values allow passing pydantic message to databricks * fix(utils.py): ensure all chat completion messages are dict * (feat) Track `custom_llm_provider` in LiteLLMSpendLogs (#7081) * add custom_llm_provider to SpendLogsPayload * add custom_llm_provider to SpendLogs * add custom llm provider to SpendLogs payload * test_spend_logs_payload * Add MLflow to the side bar (#7031) Signed-off-by: B-Step62 <yuki.watanabe@databricks.com> * (bug fix) SpendLogs update DB catch all possible DB errors for retrying (#7082) * catch DB_CONNECTION_ERROR_TYPES * fix DB retry mechanism for SpendLog updates * use DB_CONNECTION_ERROR_TYPES in auth checks * fix exp back off for writing SpendLogs * use _raise_failed_update_spend_exception to ensure errors print as NON blocking * test_update_spend_logs_multiple_batches_with_failure * (Feat) Add StructuredOutputs support for Fireworks.AI (#7085) * fix model cost map fireworks ai "supports_response_schema": true, * fix supports_response_schema * fix map openai params fireworks ai * test_map_response_format * test_map_response_format * added deepinfra/Meta-Llama-3.1-405B-Instruct (#7084) * bump: version 1.53.9 → 1.54.0 * fix deepinfra * litellm db fixes LiteLLM_UserTable (#7089) * ci/cd queue new release * fix llama-3.3-70b-versatile * refactor - use consistent file naming convention `AI21/` -> `ai21` (#7090) * fix refactor - use consistent file naming convention * ci/cd run again * fix naming structure * fix use consistent naming (#7092) --------- Signed-off-by: B-Step62 <yuki.watanabe@databricks.com> Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> Co-authored-by: Yuki Watanabe <31463517+B-Step62@users.noreply.github.com> Co-authored-by: ali sayyah <ali.sayyah2@gmail.com>
300 lines
10 KiB
Python
300 lines
10 KiB
Python
import asyncio
|
|
import copy
|
|
import json
|
|
import os
|
|
from copy import deepcopy
|
|
from typing import Any, Callable, List, Literal, Optional, Tuple, Union
|
|
|
|
import httpx
|
|
from openai import OpenAI
|
|
|
|
import litellm
|
|
from litellm.llms.cohere.embed.handler import embedding as cohere_embedding
|
|
from litellm.llms.custom_httpx.http_handler import (
|
|
AsyncHTTPHandler,
|
|
HTTPHandler,
|
|
_get_httpx_client,
|
|
get_async_httpx_client,
|
|
)
|
|
from litellm.llms.OpenAI.openai import OpenAIChatCompletion
|
|
from litellm.types.llms.azure_ai import ImageEmbeddingRequest
|
|
from litellm.types.utils import Embedding, EmbeddingResponse
|
|
from litellm.utils import convert_to_model_response_object, is_base64_encoded
|
|
|
|
from .cohere_transformation import AzureAICohereConfig
|
|
|
|
|
|
class AzureAIEmbedding(OpenAIChatCompletion):
|
|
|
|
def _process_response(
|
|
self,
|
|
image_embedding_responses: Optional[List],
|
|
text_embedding_responses: Optional[List],
|
|
image_embeddings_idx: List[int],
|
|
model_response: EmbeddingResponse,
|
|
input: List,
|
|
):
|
|
combined_responses = []
|
|
if (
|
|
image_embedding_responses is not None
|
|
and text_embedding_responses is not None
|
|
):
|
|
# Combine and order the results
|
|
text_idx = 0
|
|
image_idx = 0
|
|
|
|
for idx in range(len(input)):
|
|
if idx in image_embeddings_idx:
|
|
combined_responses.append(image_embedding_responses[image_idx])
|
|
image_idx += 1
|
|
else:
|
|
combined_responses.append(text_embedding_responses[text_idx])
|
|
text_idx += 1
|
|
|
|
model_response.data = combined_responses
|
|
elif image_embedding_responses is not None:
|
|
model_response.data = image_embedding_responses
|
|
elif text_embedding_responses is not None:
|
|
model_response.data = text_embedding_responses
|
|
|
|
response = AzureAICohereConfig()._transform_response(response=model_response) # type: ignore
|
|
|
|
return response
|
|
|
|
async def async_image_embedding(
|
|
self,
|
|
model: str,
|
|
data: ImageEmbeddingRequest,
|
|
timeout: float,
|
|
logging_obj,
|
|
model_response: litellm.EmbeddingResponse,
|
|
optional_params: dict,
|
|
api_key: Optional[str],
|
|
api_base: Optional[str],
|
|
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
|
|
) -> EmbeddingResponse:
|
|
if client is None or not isinstance(client, AsyncHTTPHandler):
|
|
client = get_async_httpx_client(
|
|
llm_provider=litellm.LlmProviders.AZURE_AI,
|
|
params={"timeout": timeout},
|
|
)
|
|
|
|
url = "{}/images/embeddings".format(api_base)
|
|
|
|
response = await client.post(
|
|
url=url,
|
|
json=data, # type: ignore
|
|
headers={"Authorization": "Bearer {}".format(api_key)},
|
|
)
|
|
|
|
embedding_response = response.json()
|
|
embedding_headers = dict(response.headers)
|
|
returned_response: litellm.EmbeddingResponse = convert_to_model_response_object( # type: ignore
|
|
response_object=embedding_response,
|
|
model_response_object=model_response,
|
|
response_type="embedding",
|
|
stream=False,
|
|
_response_headers=embedding_headers,
|
|
)
|
|
return returned_response
|
|
|
|
def image_embedding(
|
|
self,
|
|
model: str,
|
|
data: ImageEmbeddingRequest,
|
|
timeout: float,
|
|
logging_obj,
|
|
model_response: litellm.EmbeddingResponse,
|
|
optional_params: dict,
|
|
api_key: Optional[str],
|
|
api_base: Optional[str],
|
|
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
|
|
):
|
|
if api_base is None:
|
|
raise ValueError(
|
|
"api_base is None. Please set AZURE_AI_API_BASE or dynamically via `api_base` param, to make the request."
|
|
)
|
|
if api_key is None:
|
|
raise ValueError(
|
|
"api_key is None. Please set AZURE_AI_API_KEY or dynamically via `api_key` param, to make the request."
|
|
)
|
|
|
|
if client is None or not isinstance(client, HTTPHandler):
|
|
client = HTTPHandler(timeout=timeout, concurrent_limit=1)
|
|
|
|
url = "{}/images/embeddings".format(api_base)
|
|
|
|
response = client.post(
|
|
url=url,
|
|
json=data, # type: ignore
|
|
headers={"Authorization": "Bearer {}".format(api_key)},
|
|
)
|
|
|
|
embedding_response = response.json()
|
|
embedding_headers = dict(response.headers)
|
|
returned_response: litellm.EmbeddingResponse = convert_to_model_response_object( # type: ignore
|
|
response_object=embedding_response,
|
|
model_response_object=model_response,
|
|
response_type="embedding",
|
|
stream=False,
|
|
_response_headers=embedding_headers,
|
|
)
|
|
return returned_response
|
|
|
|
async def async_embedding(
|
|
self,
|
|
model: str,
|
|
input: List,
|
|
timeout: float,
|
|
logging_obj,
|
|
model_response: litellm.EmbeddingResponse,
|
|
optional_params: dict,
|
|
api_key: Optional[str] = None,
|
|
api_base: Optional[str] = None,
|
|
client=None,
|
|
) -> EmbeddingResponse:
|
|
|
|
(
|
|
image_embeddings_request,
|
|
v1_embeddings_request,
|
|
image_embeddings_idx,
|
|
) = AzureAICohereConfig()._transform_request(
|
|
input=input, optional_params=optional_params, model=model
|
|
)
|
|
|
|
image_embedding_responses: Optional[List] = None
|
|
text_embedding_responses: Optional[List] = None
|
|
|
|
if image_embeddings_request["input"]:
|
|
image_response = await self.async_image_embedding(
|
|
model=model,
|
|
data=image_embeddings_request,
|
|
timeout=timeout,
|
|
logging_obj=logging_obj,
|
|
model_response=model_response,
|
|
optional_params=optional_params,
|
|
api_key=api_key,
|
|
api_base=api_base,
|
|
client=client,
|
|
)
|
|
|
|
image_embedding_responses = image_response.data
|
|
if image_embedding_responses is None:
|
|
raise Exception("/image/embeddings route returned None Embeddings.")
|
|
|
|
if v1_embeddings_request["input"]:
|
|
response: EmbeddingResponse = await super().embedding( # type: ignore
|
|
model=model,
|
|
input=input,
|
|
timeout=timeout,
|
|
logging_obj=logging_obj,
|
|
model_response=model_response,
|
|
optional_params=optional_params,
|
|
api_key=api_key,
|
|
api_base=api_base,
|
|
client=client,
|
|
aembedding=True,
|
|
)
|
|
text_embedding_responses = response.data
|
|
if text_embedding_responses is None:
|
|
raise Exception("/v1/embeddings route returned None Embeddings.")
|
|
|
|
return self._process_response(
|
|
image_embedding_responses=image_embedding_responses,
|
|
text_embedding_responses=text_embedding_responses,
|
|
image_embeddings_idx=image_embeddings_idx,
|
|
model_response=model_response,
|
|
input=input,
|
|
)
|
|
|
|
def embedding(
|
|
self,
|
|
model: str,
|
|
input: List,
|
|
timeout: float,
|
|
logging_obj,
|
|
model_response: litellm.EmbeddingResponse,
|
|
optional_params: dict,
|
|
api_key: Optional[str] = None,
|
|
api_base: Optional[str] = None,
|
|
client=None,
|
|
aembedding=None,
|
|
max_retries: Optional[int] = None,
|
|
) -> litellm.EmbeddingResponse:
|
|
"""
|
|
- Separate image url from text
|
|
-> route image url call to `/image/embeddings`
|
|
-> route text call to `/v1/embeddings` (OpenAI route)
|
|
|
|
assemble result in-order, and return
|
|
"""
|
|
if aembedding is True:
|
|
return self.async_embedding( # type: ignore
|
|
model,
|
|
input,
|
|
timeout,
|
|
logging_obj,
|
|
model_response,
|
|
optional_params,
|
|
api_key,
|
|
api_base,
|
|
client,
|
|
)
|
|
|
|
(
|
|
image_embeddings_request,
|
|
v1_embeddings_request,
|
|
image_embeddings_idx,
|
|
) = AzureAICohereConfig()._transform_request(
|
|
input=input, optional_params=optional_params, model=model
|
|
)
|
|
|
|
image_embedding_responses: Optional[List] = None
|
|
text_embedding_responses: Optional[List] = None
|
|
|
|
if image_embeddings_request["input"]:
|
|
image_response = self.image_embedding(
|
|
model=model,
|
|
data=image_embeddings_request,
|
|
timeout=timeout,
|
|
logging_obj=logging_obj,
|
|
model_response=model_response,
|
|
optional_params=optional_params,
|
|
api_key=api_key,
|
|
api_base=api_base,
|
|
client=client,
|
|
)
|
|
|
|
image_embedding_responses = image_response.data
|
|
if image_embedding_responses is None:
|
|
raise Exception("/image/embeddings route returned None Embeddings.")
|
|
|
|
if v1_embeddings_request["input"]:
|
|
response: EmbeddingResponse = super().embedding( # type: ignore
|
|
model,
|
|
input,
|
|
timeout,
|
|
logging_obj,
|
|
model_response,
|
|
optional_params,
|
|
api_key,
|
|
api_base,
|
|
client=(
|
|
client
|
|
if client is not None and isinstance(client, OpenAI)
|
|
else None
|
|
),
|
|
aembedding=aembedding,
|
|
)
|
|
|
|
text_embedding_responses = response.data
|
|
if text_embedding_responses is None:
|
|
raise Exception("/v1/embeddings route returned None Embeddings.")
|
|
|
|
return self._process_response(
|
|
image_embedding_responses=image_embedding_responses,
|
|
text_embedding_responses=text_embedding_responses,
|
|
image_embeddings_idx=image_embeddings_idx,
|
|
model_response=model_response,
|
|
input=input,
|
|
)
|