litellm-mirror/litellm/constants.py
Krish Dholakia 40a22eb4c6
LiteLLM Common Base LLM Config (pt.4): Move Ollama to Base LLM Config (#7157)
* refactor(ollama/): refactor ollama `/api/generate` to use base llm config

Addresses https://github.com/andrewyng/aisuite/issues/113#issuecomment-2512369132

* test: skip unresponsive test

* test(test_secret_manager.py): mark flaky test

* test: fix google sm test
2024-12-10 21:39:28 -08:00

68 lines
1.3 KiB
Python

ROUTER_MAX_FALLBACKS = 5
DEFAULT_BATCH_SIZE = 512
DEFAULT_FLUSH_INTERVAL_SECONDS = 5
DEFAULT_MAX_RETRIES = 2
LITELLM_CHAT_PROVIDERS = [
# "openai",
# "openai_like",
# "xai",
# "custom_openai",
# "text-completion-openai",
# "cohere",
# "cohere_chat",
# "clarifai",
# "anthropic",
# "anthropic_text",
# "replicate",
# "huggingface",
# "together_ai",
# "openrouter",
# "vertex_ai",
# "vertex_ai_beta",
# "palm",
# "gemini",
# "ai21",
# "baseten",
# "azure",
# "azure_text",
# "azure_ai",
# "sagemaker",
# "sagemaker_chat",
# "bedrock",
# "vllm",
# "nlp_cloud",
# "petals",
# "oobabooga",
"ollama",
# "ollama_chat",
# "deepinfra",
# "perplexity",
# "anyscale",
# "mistral",
# "groq",
# "nvidia_nim",
# "cerebras",
# "ai21_chat",
# "volcengine",
# "codestral",
# "text-completion-codestral",
# "deepseek",
# "sambanova",
# "maritalk",
# "voyage",
# "cloudflare",
"fireworks_ai",
"friendliai",
"watsonx",
"watsonx_text",
"triton",
"predibase",
"databricks",
"empower",
"github",
"custom",
"litellm_proxy",
"hosted_vllm",
"lm_studio",
"galadriel",
]