fix: make vision and embedding tests pass with openai, anthropic and gemini

NOTE - Anthropic embeddings do not work due to LiteLLM not supporting
them.
This commit is contained in:
Ashwin Bharambe 2025-02-26 10:52:33 -08:00
parent abfc4b3bce
commit 4cf95475e5
4 changed files with 10 additions and 5 deletions

View file

@ -20,11 +20,11 @@ MODEL_ENTRIES = [ProviderModelEntry(provider_model_id=m) for m in LLM_MODEL_IDS]
ProviderModelEntry( ProviderModelEntry(
provider_model_id="openai/text-embedding-3-small", provider_model_id="openai/text-embedding-3-small",
model_type=ModelType.embedding, model_type=ModelType.embedding,
metadata={"embedding_dimension": 1536}, metadata={"embedding_dimension": 1536, "context_length": 8192},
), ),
ProviderModelEntry( ProviderModelEntry(
provider_model_id="openai/text-embedding-3-large", provider_model_id="openai/text-embedding-3-large",
model_type=ModelType.embedding, model_type=ModelType.embedding,
metadata={"embedding_dimension": 3072}, metadata={"embedding_dimension": 3072, "context_length": 8192},
), ),
] ]

View file

@ -97,7 +97,6 @@ class LiteLLMOpenAIMixin(
) )
params = await self._get_params(request) params = await self._get_params(request)
# unfortunately, we need to use synchronous litellm.completion here because litellm # unfortunately, we need to use synchronous litellm.completion here because litellm
# caches various httpx.client objects in a non-eventloop aware manner # caches various httpx.client objects in a non-eventloop aware manner
response = litellm.completion(**params) response = litellm.completion(**params)

View file

@ -523,15 +523,19 @@ async def convert_message_to_openai_dict_new(message: Message | Dict) -> OpenAIC
) -> Union[str, Iterable[OpenAIChatCompletionContentPartParam]]: ) -> Union[str, Iterable[OpenAIChatCompletionContentPartParam]]:
# Llama Stack and OpenAI spec match for str and text input # Llama Stack and OpenAI spec match for str and text input
if isinstance(content, str): if isinstance(content, str):
return content return OpenAIChatCompletionContentPartTextParam(
type="text",
text=content,
)
elif isinstance(content, TextContentItem): elif isinstance(content, TextContentItem):
return OpenAIChatCompletionContentPartTextParam( return OpenAIChatCompletionContentPartTextParam(
type="text",
text=content.text, text=content.text,
) )
elif isinstance(content, ImageContentItem): elif isinstance(content, ImageContentItem):
return OpenAIChatCompletionContentPartImageParam( return OpenAIChatCompletionContentPartImageParam(
image_url=OpenAIImageURL(url=await convert_image_content_to_url(content)),
type="image_url", type="image_url",
image_url=OpenAIImageURL(url=await convert_image_content_to_url(content)),
) )
elif isinstance(content, List): elif isinstance(content, List):
return [await _convert_user_message_content(item) for item in content] return [await _convert_user_message_content(item) for item in content]

View file

@ -119,12 +119,14 @@ models:
model_type: llm model_type: llm
- metadata: - metadata:
embedding_dimension: 1536 embedding_dimension: 1536
context_length: 8192
model_id: openai/text-embedding-3-small model_id: openai/text-embedding-3-small
provider_id: openai provider_id: openai
provider_model_id: openai/text-embedding-3-small provider_model_id: openai/text-embedding-3-small
model_type: embedding model_type: embedding
- metadata: - metadata:
embedding_dimension: 3072 embedding_dimension: 3072
context_length: 8192
model_id: openai/text-embedding-3-large model_id: openai/text-embedding-3-large
provider_id: openai provider_id: openai
provider_model_id: openai/text-embedding-3-large provider_model_id: openai/text-embedding-3-large