mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
[Feat] Add fireworks AI embedding (#5812)
* add fireworks embedding models * add fireworks ai * fireworks ai embeddings support * is_fireworks_embedding_model * working fireworks embeddings * fix health check * models * fix embedding get optional params * fix linting errors * fix pick_cheapest_chat_model_from_llm_provider * add fireworks ai litellm provider * docs fireworks embedding models * fixes for when azure ad token is passed
This commit is contained in:
parent
d349d501c8
commit
1d630b61ad
9 changed files with 181 additions and 61 deletions
|
@ -2610,13 +2610,13 @@ def get_optional_params_embeddings(
|
|||
status_code=500,
|
||||
message="Setting dimensions is not supported for OpenAI `text-embedding-3` and later models. To drop it from the call, set `litellm.drop_params = True`.",
|
||||
)
|
||||
if custom_llm_provider == "triton":
|
||||
elif custom_llm_provider == "triton":
|
||||
keys = list(non_default_params.keys())
|
||||
for k in keys:
|
||||
non_default_params.pop(k, None)
|
||||
final_params = {**non_default_params, **kwargs}
|
||||
return final_params
|
||||
if custom_llm_provider == "databricks":
|
||||
elif custom_llm_provider == "databricks":
|
||||
supported_params = get_supported_openai_params(
|
||||
model=model or "",
|
||||
custom_llm_provider="databricks",
|
||||
|
@ -2628,7 +2628,7 @@ def get_optional_params_embeddings(
|
|||
)
|
||||
final_params = {**optional_params, **kwargs}
|
||||
return final_params
|
||||
if custom_llm_provider == "vertex_ai":
|
||||
elif custom_llm_provider == "vertex_ai":
|
||||
supported_params = get_supported_openai_params(
|
||||
model=model,
|
||||
custom_llm_provider="vertex_ai",
|
||||
|
@ -2643,7 +2643,7 @@ def get_optional_params_embeddings(
|
|||
)
|
||||
final_params = {**optional_params, **kwargs}
|
||||
return final_params
|
||||
if custom_llm_provider == "bedrock":
|
||||
elif custom_llm_provider == "bedrock":
|
||||
# if dimensions is in non_default_params -> pass it for model=bedrock/amazon.titan-embed-text-v2
|
||||
if "amazon.titan-embed-text-v1" in model:
|
||||
object: Any = litellm.AmazonTitanG1Config()
|
||||
|
@ -2666,35 +2666,7 @@ def get_optional_params_embeddings(
|
|||
)
|
||||
final_params = {**optional_params, **kwargs}
|
||||
return final_params
|
||||
# elif model == "amazon.titan-embed-image-v1":
|
||||
# supported_params = litellm.AmazonTitanG1Config().get_supported_openai_params()
|
||||
# _check_valid_arg(supported_params=supported_params)
|
||||
# optional_params = litellm.AmazonTitanG1Config().map_openai_params(
|
||||
# non_default_params=non_default_params, optional_params={}
|
||||
# )
|
||||
# final_params = {**optional_params, **kwargs}
|
||||
# return final_params
|
||||
|
||||
# if (
|
||||
# "dimensions" in non_default_params.keys()
|
||||
# and "amazon.titan-embed-text-v2" in model
|
||||
# ):
|
||||
# kwargs["dimensions"] = non_default_params["dimensions"]
|
||||
# non_default_params.pop("dimensions", None)
|
||||
|
||||
# if len(non_default_params.keys()) > 0:
|
||||
# if litellm.drop_params is True: # drop the unsupported non-default values
|
||||
# keys = list(non_default_params.keys())
|
||||
# for k in keys:
|
||||
# non_default_params.pop(k, None)
|
||||
# final_params = {**non_default_params, **kwargs}
|
||||
# return final_params
|
||||
# raise UnsupportedParamsError(
|
||||
# status_code=500,
|
||||
# message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.",
|
||||
# )
|
||||
# return {**non_default_params, **kwargs}
|
||||
if custom_llm_provider == "mistral":
|
||||
elif custom_llm_provider == "mistral":
|
||||
supported_params = get_supported_openai_params(
|
||||
model=model,
|
||||
custom_llm_provider="mistral",
|
||||
|
@ -2706,7 +2678,20 @@ def get_optional_params_embeddings(
|
|||
)
|
||||
final_params = {**optional_params, **kwargs}
|
||||
return final_params
|
||||
if (
|
||||
elif custom_llm_provider == "fireworks_ai":
|
||||
supported_params = get_supported_openai_params(
|
||||
model=model,
|
||||
custom_llm_provider="fireworks_ai",
|
||||
request_type="embeddings",
|
||||
)
|
||||
_check_valid_arg(supported_params=supported_params)
|
||||
optional_params = litellm.FireworksAIEmbeddingConfig().map_openai_params(
|
||||
non_default_params=non_default_params, optional_params={}, model=model
|
||||
)
|
||||
final_params = {**optional_params, **kwargs}
|
||||
return final_params
|
||||
|
||||
elif (
|
||||
custom_llm_provider != "openai"
|
||||
and custom_llm_provider != "azure"
|
||||
and custom_llm_provider not in litellm.openai_compatible_providers
|
||||
|
@ -2723,7 +2708,6 @@ def get_optional_params_embeddings(
|
|||
status_code=500,
|
||||
message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.",
|
||||
)
|
||||
|
||||
final_params = {**non_default_params, **kwargs}
|
||||
return final_params
|
||||
|
||||
|
@ -4293,7 +4277,12 @@ def get_supported_openai_params(
|
|||
elif custom_llm_provider == "anthropic":
|
||||
return litellm.AnthropicConfig().get_supported_openai_params()
|
||||
elif custom_llm_provider == "fireworks_ai":
|
||||
return litellm.FireworksAIConfig().get_supported_openai_params()
|
||||
if request_type == "embeddings":
|
||||
return litellm.FireworksAIEmbeddingConfig().get_supported_openai_params(
|
||||
model=model
|
||||
)
|
||||
else:
|
||||
return litellm.FireworksAIConfig().get_supported_openai_params()
|
||||
elif custom_llm_provider == "nvidia_nim":
|
||||
return litellm.NvidiaNimConfig().get_supported_openai_params(model=model)
|
||||
elif custom_llm_provider == "cerebras":
|
||||
|
@ -4915,6 +4904,10 @@ def get_model_info(model: str, custom_llm_provider: Optional[str] = None) -> Mod
|
|||
"litellm_provider"
|
||||
].startswith("vertex_ai"):
|
||||
pass
|
||||
elif custom_llm_provider == "fireworks_ai" and _model_info[
|
||||
"litellm_provider"
|
||||
].startswith("fireworks_ai"):
|
||||
pass
|
||||
else:
|
||||
raise Exception
|
||||
elif split_model in litellm.model_cost:
|
||||
|
@ -4929,6 +4922,10 @@ def get_model_info(model: str, custom_llm_provider: Optional[str] = None) -> Mod
|
|||
"litellm_provider"
|
||||
].startswith("vertex_ai"):
|
||||
pass
|
||||
elif custom_llm_provider == "fireworks_ai" and _model_info[
|
||||
"litellm_provider"
|
||||
].startswith("fireworks_ai"):
|
||||
pass
|
||||
else:
|
||||
raise Exception
|
||||
else:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue