mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
* VoyageEmbeddingConfig * fix voyage logic to get params * add voyage embedding transformation * add get_provider_embedding_config * use BaseEmbeddingConfig * voyage clean up * use llm http handler for embedding transformations * test_voyage_ai_embedding_extra_params * add voyage async * test_voyage_ai_embedding_extra_params * add async for llm http handler * update BaseLLMEmbeddingTest * test_voyage_ai_embedding_extra_params * fix linting * fix get_provider_embedding_config * fix anthropic text test * update location of base/chat/transformation * fix import path * fix IBMWatsonXAIConfig
71 lines
2.1 KiB
Python
71 lines
2.1 KiB
Python
import asyncio
|
|
import httpx
|
|
import json
|
|
import pytest
|
|
import sys
|
|
from typing import Any, Dict, List
|
|
from unittest.mock import MagicMock, Mock, patch
|
|
import os
|
|
|
|
sys.path.insert(
|
|
0, os.path.abspath("../..")
|
|
) # Adds the parent directory to the system path
|
|
import litellm
|
|
from litellm.exceptions import BadRequestError
|
|
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
|
|
from litellm.utils import (
|
|
CustomStreamWrapper,
|
|
get_supported_openai_params,
|
|
get_optional_params,
|
|
get_optional_params_embeddings,
|
|
)
|
|
|
|
# test_example.py
|
|
from abc import ABC, abstractmethod
|
|
|
|
|
|
class BaseLLMEmbeddingTest(ABC):
|
|
"""
|
|
Abstract base test class that enforces a common test across all test classes.
|
|
"""
|
|
|
|
@abstractmethod
|
|
def get_base_embedding_call_args(self) -> dict:
|
|
"""Must return the base embedding call args"""
|
|
pass
|
|
|
|
@abstractmethod
|
|
def get_custom_llm_provider(self) -> litellm.LlmProviders:
|
|
"""Must return the custom llm provider"""
|
|
pass
|
|
|
|
@pytest.mark.asyncio()
|
|
@pytest.mark.parametrize("sync_mode", [True, False])
|
|
async def test_basic_embedding(self, sync_mode):
|
|
litellm.set_verbose = True
|
|
embedding_call_args = self.get_base_embedding_call_args()
|
|
if sync_mode is True:
|
|
response = litellm.embedding(
|
|
**embedding_call_args,
|
|
input=["hello", "world"],
|
|
)
|
|
|
|
print("embedding response: ", response)
|
|
else:
|
|
response = await litellm.aembedding(
|
|
**embedding_call_args,
|
|
input=["hello", "world"],
|
|
)
|
|
|
|
print("async embedding response: ", response)
|
|
|
|
from openai.types.create_embedding_response import CreateEmbeddingResponse
|
|
|
|
CreateEmbeddingResponse.model_validate(response.model_dump())
|
|
|
|
def test_embedding_optional_params_max_retries(self):
|
|
embedding_call_args = self.get_base_embedding_call_args()
|
|
optional_params = get_optional_params_embeddings(
|
|
**embedding_call_args, max_retries=20
|
|
)
|
|
assert optional_params["max_retries"] == 20
|