[FEAT] Added snowflake completion provider

This commit is contained in:
Sunny Wan 2025-03-03 01:20:00 -05:00
parent 66f5f70c88
commit c625b3fe0d
8 changed files with 288 additions and 0 deletions

View file

@ -146,6 +146,7 @@ from .llms.openai_like.embedding.handler import OpenAILikeEmbeddingHandler
from .llms.petals.completion import handler as petals_handler
from .llms.predibase.chat.handler import PredibaseChatCompletion
from .llms.replicate.chat.handler import completion as replicate_chat_completion
from .llms.snowflake.completion.handler import SnowflakeChatCompletion
from .llms.sagemaker.chat.handler import SagemakerChatHandler
from .llms.sagemaker.completion.handler import SagemakerLLM
from .llms.vertex_ai import vertex_ai_non_gemini
@ -236,6 +237,7 @@ databricks_embedding = DatabricksEmbeddingHandler()
base_llm_http_handler = BaseLLMHTTPHandler()
base_llm_aiohttp_handler = BaseLLMAIOHTTPHandler()
sagemaker_chat_completion = SagemakerChatHandler()
snow_flake_chat_completion = SnowflakeChatCompletion()
####### COMPLETION ENDPOINTS ################
@ -2974,6 +2976,28 @@ def completion( # type: ignore # noqa: PLR0915
)
return response
response = model_response
elif custom_llm_provider == "snowflake" or model in litellm.snowflake_models:
api_base = (
api_base
or f"""https://{get_secret("SNOWFLAKE_ACCOUNT_ID")}.snowflakecomputing.com/api/v2/cortex/inference:complete"""
or get_secret("SNOWFLAKE_API_BASE")
)
response = snow_flake_chat_completion.completion(
model=model,
messages=messages,
api_base=api_base,
acompletion=acompletion,
custom_prompt_dict=litellm.custom_prompt_dict,
model_response=model_response,
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
logger_fn=logger_fn,
encoding=encoding,
JWT=api_key,
logging_obj=logging,
headers=headers,
)
elif custom_llm_provider == "custom":
url = litellm.api_base or api_base or ""
if url is None or url == "":
@ -3032,6 +3056,7 @@ def completion( # type: ignore # noqa: PLR0915
model_response.created = int(time.time())
model_response.model = model
response = model_response
elif (
custom_llm_provider in litellm._custom_providers
): # Assume custom LLM provider