mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
* refactor(fireworks_ai/): inherit from openai like base config refactors fireworks ai to use a common config * test: fix import in test * refactor(watsonx/): refactor watsonx to use llm base config refactors chat + completion routes to base config path * fix: fix linting error * test: fix test * fix: fix test
68 lines
1.3 KiB
Python
68 lines
1.3 KiB
Python
ROUTER_MAX_FALLBACKS = 5
|
|
DEFAULT_BATCH_SIZE = 512
|
|
DEFAULT_FLUSH_INTERVAL_SECONDS = 5
|
|
DEFAULT_MAX_RETRIES = 2
|
|
LITELLM_CHAT_PROVIDERS = [
|
|
# "openai",
|
|
# "openai_like",
|
|
# "xai",
|
|
# "custom_openai",
|
|
# "text-completion-openai",
|
|
# "cohere",
|
|
# "cohere_chat",
|
|
# "clarifai",
|
|
# "anthropic",
|
|
# "anthropic_text",
|
|
# "replicate",
|
|
# "huggingface",
|
|
# "together_ai",
|
|
# "openrouter",
|
|
# "vertex_ai",
|
|
# "vertex_ai_beta",
|
|
# "palm",
|
|
# "gemini",
|
|
# "ai21",
|
|
# "baseten",
|
|
# "azure",
|
|
# "azure_text",
|
|
# "azure_ai",
|
|
# "sagemaker",
|
|
# "sagemaker_chat",
|
|
# "bedrock",
|
|
# "vllm",
|
|
# "nlp_cloud",
|
|
# "petals",
|
|
# "oobabooga",
|
|
# "ollama",
|
|
# "ollama_chat",
|
|
# "deepinfra",
|
|
# "perplexity",
|
|
# "anyscale",
|
|
# "mistral",
|
|
# "groq",
|
|
# "nvidia_nim",
|
|
# "cerebras",
|
|
# "ai21_chat",
|
|
# "volcengine",
|
|
# "codestral",
|
|
# "text-completion-codestral",
|
|
# "deepseek",
|
|
# "sambanova",
|
|
# "maritalk",
|
|
# "voyage",
|
|
# "cloudflare",
|
|
"fireworks_ai",
|
|
"friendliai",
|
|
"watsonx",
|
|
"watsonx_text",
|
|
"triton",
|
|
"predibase",
|
|
"databricks",
|
|
"empower",
|
|
"github",
|
|
"custom",
|
|
"litellm_proxy",
|
|
"hosted_vllm",
|
|
"lm_studio",
|
|
"galadriel",
|
|
]
|