forked from phoenix/litellm-mirror
LiteLLM Minor Fixes & Improvements (09/24/2024) (#5880)
* LiteLLM Minor Fixes & Improvements (09/23/2024) (#5842) * feat(auth_utils.py): enable admin to allow client-side credentials to be passed Makes it easier for devs to experiment with finetuned fireworks ai models * feat(router.py): allow setting configurable_clientside_auth_params for a model Closes https://github.com/BerriAI/litellm/issues/5843 * build(model_prices_and_context_window.json): fix anthropic claude-3-5-sonnet max output token limit Fixes https://github.com/BerriAI/litellm/issues/5850 * fix(azure_ai/): support content list for azure ai Fixes https://github.com/BerriAI/litellm/issues/4237 * fix(litellm_logging.py): always set saved_cache_cost Set to 0 by default * fix(fireworks_ai/cost_calculator.py): add fireworks ai default pricing handles calling 405b+ size models * fix(slack_alerting.py): fix error alerting for failed spend tracking Fixes regression with slack alerting error monitoring * fix(vertex_and_google_ai_studio_gemini.py): handle gemini no candidates in streaming chunk error * docs(bedrock.md): add llama3-1 models * test: fix tests * fix(azure_ai/chat): fix transformation for azure ai calls * feat(azure_ai/embed): Add azure ai embeddings support Closes https://github.com/BerriAI/litellm/issues/5861 * fix(azure_ai/embed): enable async embedding * feat(azure_ai/embed): support azure ai multimodal embeddings * fix(azure_ai/embed): support async multi modal embeddings * feat(together_ai/embed): support together ai embedding calls * feat(rerank/main.py): log source documents for rerank endpoints to langfuse improves rerank endpoint logging * fix(langfuse.py): support logging `/audio/speech` input to langfuse * test(test_embedding.py): fix test * test(test_completion_cost.py): fix helper util
This commit is contained in:
parent
5bc5eaff8a
commit
16c0307eab
25 changed files with 1675 additions and 340 deletions
|
@ -990,6 +990,26 @@
|
|||
"mode": "chat",
|
||||
"source":"https://azuremarketplace.microsoft.com/en-us/marketplace/apps/metagenai.meta-llama-3-1-405b-instruct-offer?tab=PlansAndPrice"
|
||||
},
|
||||
"azure_ai/Cohere-embed-v3-english": {
|
||||
"max_tokens": 512,
|
||||
"max_input_tokens": 512,
|
||||
"output_vector_size": 1024,
|
||||
"input_cost_per_token": 0.0000001,
|
||||
"output_cost_per_token": 0.0,
|
||||
"litellm_provider": "azure_ai",
|
||||
"mode": "embedding",
|
||||
"source":"https://azuremarketplace.microsoft.com/en-us/marketplace/apps/cohere.cohere-embed-v3-english-offer?tab=PlansAndPrice"
|
||||
},
|
||||
"azure_ai/Cohere-embed-v3-multilingual": {
|
||||
"max_tokens": 512,
|
||||
"max_input_tokens": 512,
|
||||
"output_vector_size": 1024,
|
||||
"input_cost_per_token": 0.0000001,
|
||||
"output_cost_per_token": 0.0,
|
||||
"litellm_provider": "azure_ai",
|
||||
"mode": "embedding",
|
||||
"source":"https://azuremarketplace.microsoft.com/en-us/marketplace/apps/cohere.cohere-embed-v3-english-offer?tab=PlansAndPrice"
|
||||
},
|
||||
"babbage-002": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 16384,
|
||||
|
@ -4964,50 +4984,71 @@
|
|||
"together-ai-up-to-4b": {
|
||||
"input_cost_per_token": 0.0000001,
|
||||
"output_cost_per_token": 0.0000001,
|
||||
"litellm_provider": "together_ai"
|
||||
"litellm_provider": "together_ai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"together-ai-4.1b-8b": {
|
||||
"input_cost_per_token": 0.0000002,
|
||||
"output_cost_per_token": 0.0000002,
|
||||
"litellm_provider": "together_ai"
|
||||
"litellm_provider": "together_ai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"together-ai-8.1b-21b": {
|
||||
"max_tokens": 1000,
|
||||
"input_cost_per_token": 0.0000003,
|
||||
"output_cost_per_token": 0.0000003,
|
||||
"litellm_provider": "together_ai"
|
||||
"litellm_provider": "together_ai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"together-ai-21.1b-41b": {
|
||||
"input_cost_per_token": 0.0000008,
|
||||
"output_cost_per_token": 0.0000008,
|
||||
"litellm_provider": "together_ai"
|
||||
"litellm_provider": "together_ai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"together-ai-41.1b-80b": {
|
||||
"input_cost_per_token": 0.0000009,
|
||||
"output_cost_per_token": 0.0000009,
|
||||
"litellm_provider": "together_ai"
|
||||
"litellm_provider": "together_ai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"together-ai-81.1b-110b": {
|
||||
"input_cost_per_token": 0.0000018,
|
||||
"output_cost_per_token": 0.0000018,
|
||||
"litellm_provider": "together_ai"
|
||||
"litellm_provider": "together_ai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"together-ai-embedding-up-to-150m": {
|
||||
"input_cost_per_token": 0.000000008,
|
||||
"output_cost_per_token": 0.0,
|
||||
"litellm_provider": "together_ai",
|
||||
"mode": "embedding"
|
||||
},
|
||||
"together-ai-embedding-151m-to-350m": {
|
||||
"input_cost_per_token": 0.000000016,
|
||||
"output_cost_per_token": 0.0,
|
||||
"litellm_provider": "together_ai",
|
||||
"mode": "embedding"
|
||||
},
|
||||
"together_ai/mistralai/Mixtral-8x7B-Instruct-v0.1": {
|
||||
"input_cost_per_token": 0.0000006,
|
||||
"output_cost_per_token": 0.0000006,
|
||||
"litellm_provider": "together_ai",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true
|
||||
"supports_parallel_function_calling": true,
|
||||
"mode": "chat"
|
||||
},
|
||||
"together_ai/mistralai/Mistral-7B-Instruct-v0.1": {
|
||||
"litellm_provider": "together_ai",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true
|
||||
"supports_parallel_function_calling": true,
|
||||
"mode": "chat"
|
||||
},
|
||||
"together_ai/togethercomputer/CodeLlama-34b-Instruct": {
|
||||
"litellm_provider": "together_ai",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true
|
||||
"supports_parallel_function_calling": true,
|
||||
"mode": "chat"
|
||||
},
|
||||
"ollama/codegemma": {
|
||||
"max_tokens": 8192,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue