mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
* feat(base_llm): initial commit for common base config class Addresses code qa critique https://github.com/andrewyng/aisuite/issues/113#issuecomment-2512369132 * feat(base_llm/): add transform request/response abstract methods to base config class * feat(cohere-+-clarifai): refactor integrations to use common base config class * fix: fix linting errors * refactor(anthropic/): move anthropic + vertex anthropic to use base config * test: fix xai test * test: fix tests * fix: fix linting errors * test: comment out WIP test * fix(transformation.py): fix is pdf used check * fix: fix linting error
68 lines
1.2 KiB
Python
68 lines
1.2 KiB
Python
ROUTER_MAX_FALLBACKS = 5
|
|
DEFAULT_BATCH_SIZE = 512
|
|
DEFAULT_FLUSH_INTERVAL_SECONDS = 5
|
|
DEFAULT_MAX_RETRIES = 2
|
|
LITELLM_CHAT_PROVIDERS = [
|
|
"openai",
|
|
"openai_like",
|
|
"xai",
|
|
"custom_openai",
|
|
"text-completion-openai",
|
|
"cohere",
|
|
"cohere_chat",
|
|
"clarifai",
|
|
"anthropic",
|
|
"replicate",
|
|
"huggingface",
|
|
"together_ai",
|
|
"openrouter",
|
|
"vertex_ai",
|
|
"vertex_ai_beta",
|
|
"palm",
|
|
"gemini",
|
|
"ai21",
|
|
"baseten",
|
|
"azure",
|
|
"azure_text",
|
|
"azure_ai",
|
|
"sagemaker",
|
|
"sagemaker_chat",
|
|
"bedrock",
|
|
"vllm",
|
|
"nlp_cloud",
|
|
"petals",
|
|
"oobabooga",
|
|
"ollama",
|
|
"ollama_chat",
|
|
"deepinfra",
|
|
"perplexity",
|
|
"anyscale",
|
|
"mistral",
|
|
"groq",
|
|
"nvidia_nim",
|
|
"cerebras",
|
|
"ai21_chat",
|
|
"volcengine",
|
|
"codestral",
|
|
"text-completion-codestral",
|
|
"deepseek",
|
|
"sambanova",
|
|
"maritalk",
|
|
"voyage",
|
|
"cloudflare",
|
|
"xinference",
|
|
"fireworks_ai",
|
|
"friendliai",
|
|
"watsonx",
|
|
"watsonx_text",
|
|
"triton",
|
|
"predibase",
|
|
"databricks",
|
|
"empower",
|
|
"github",
|
|
"custom",
|
|
"litellm_proxy",
|
|
"hosted_vllm",
|
|
"lm_studio",
|
|
"galadriel",
|
|
]
|