mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
(fix) unable to pass input_type parameter to Voyage AI embedding mode (#7276)
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 46s
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 46s
* VoyageEmbeddingConfig * fix voyage logic to get params * add voyage embedding transformation * add get_provider_embedding_config * use BaseEmbeddingConfig * voyage clean up * use llm http handler for embedding transformations * test_voyage_ai_embedding_extra_params * add voyage async * test_voyage_ai_embedding_extra_params * add async for llm http handler * update BaseLLMEmbeddingTest * test_voyage_ai_embedding_extra_params * fix linting * fix get_provider_embedding_config * fix anthropic text test * update location of base/chat/transformation * fix import path * fix IBMWatsonXAIConfig
This commit is contained in:
parent
c90942e550
commit
7a5dd29fe0
52 changed files with 535 additions and 66 deletions
|
@ -168,7 +168,8 @@ from typing import (
|
|||
|
||||
from openai import OpenAIError as OriginalError
|
||||
|
||||
from litellm.llms.base_llm.transformation import BaseConfig
|
||||
from litellm.llms.base_llm.chat.transformation import BaseConfig
|
||||
from litellm.llms.base_llm.embedding.transformation import BaseEmbeddingConfig
|
||||
|
||||
from ._logging import verbose_logger
|
||||
from .caching.caching import (
|
||||
|
@ -2377,6 +2378,21 @@ def get_optional_params_embeddings( # noqa: PLR0915
|
|||
)
|
||||
final_params = {**optional_params, **kwargs}
|
||||
return final_params
|
||||
elif custom_llm_provider == "voyage":
|
||||
supported_params = get_supported_openai_params(
|
||||
model=model,
|
||||
custom_llm_provider="voyage",
|
||||
request_type="embeddings",
|
||||
)
|
||||
_check_valid_arg(supported_params=supported_params)
|
||||
optional_params = litellm.VoyageEmbeddingConfig().map_openai_params(
|
||||
non_default_params=non_default_params,
|
||||
optional_params={},
|
||||
model=model,
|
||||
drop_params=drop_params if drop_params is not None else False,
|
||||
)
|
||||
final_params = {**optional_params, **kwargs}
|
||||
return final_params
|
||||
elif custom_llm_provider == "fireworks_ai":
|
||||
supported_params = get_supported_openai_params(
|
||||
model=model,
|
||||
|
@ -6200,6 +6216,15 @@ class ProviderConfigManager:
|
|||
return litellm.PetalsConfig()
|
||||
return litellm.OpenAIGPTConfig()
|
||||
|
||||
@staticmethod
|
||||
def get_provider_embedding_config(
|
||||
model: str,
|
||||
provider: LlmProviders,
|
||||
) -> BaseEmbeddingConfig:
|
||||
if litellm.LlmProviders.VOYAGE == provider:
|
||||
return litellm.VoyageEmbeddingConfig()
|
||||
raise ValueError(f"Provider {provider} does not support embedding config")
|
||||
|
||||
|
||||
def get_end_user_id_for_cost_tracking(
|
||||
litellm_params: dict,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue