From be265fbb151a711c7ba67e469ae143d4951169dd Mon Sep 17 00:00:00 2001 From: lucca Date: Thu, 4 Apr 2024 16:58:51 -0300 Subject: [PATCH] initial --- cookbook/benchmark/readme.md | 1 + docs/my-website/docs/completion/prompt_formatting.md | 2 +- docs/my-website/docs/providers/cohere.md | 1 + litellm/model_prices_and_context_window_backup.json | 10 ++++++++++ model_prices_and_context_window.json | 10 ++++++++++ 5 files changed, 23 insertions(+), 1 deletion(-) diff --git a/cookbook/benchmark/readme.md b/cookbook/benchmark/readme.md index e573f76c2..a543d9101 100644 --- a/cookbook/benchmark/readme.md +++ b/cookbook/benchmark/readme.md @@ -87,6 +87,7 @@ | command-light | cohere | 0.00003 | | command-medium-beta | cohere | 0.00003 | | command-xlarge-beta | cohere | 0.00003 | +| command-r-plus| cohere | 0.000018 | | j2-ultra | ai21 | 0.00003 | | ai21.j2-ultra-v1 | bedrock | 0.0000376 | | gpt-4-1106-preview | openai | 0.00004 | diff --git a/docs/my-website/docs/completion/prompt_formatting.md b/docs/my-website/docs/completion/prompt_formatting.md index 0681de739..ac62566b6 100644 --- a/docs/my-website/docs/completion/prompt_formatting.md +++ b/docs/my-website/docs/completion/prompt_formatting.md @@ -72,7 +72,7 @@ Here's the code for how we format all providers. Let us know how we can improve | Anthropic | `claude-instant-1`, `claude-instant-1.2`, `claude-2` | [Code](https://github.com/BerriAI/litellm/blob/721564c63999a43f96ee9167d0530759d51f8d45/litellm/llms/anthropic.py#L84) | OpenAI Text Completion | `text-davinci-003`, `text-curie-001`, `text-babbage-001`, `text-ada-001`, `babbage-002`, `davinci-002`, | [Code](https://github.com/BerriAI/litellm/blob/721564c63999a43f96ee9167d0530759d51f8d45/litellm/main.py#L442) | Replicate | all model names starting with `replicate/` | [Code](https://github.com/BerriAI/litellm/blob/721564c63999a43f96ee9167d0530759d51f8d45/litellm/llms/replicate.py#L180) -| Cohere | `command-nightly`, `command`, `command-light`, `command-medium-beta`, `command-xlarge-beta` | [Code](https://github.com/BerriAI/litellm/blob/721564c63999a43f96ee9167d0530759d51f8d45/litellm/llms/cohere.py#L115) +| Cohere | `command-nightly`, `command`, `command-light`, `command-medium-beta`, `command-xlarge-beta`, `command-r-plus` | [Code](https://github.com/BerriAI/litellm/blob/721564c63999a43f96ee9167d0530759d51f8d45/litellm/llms/cohere.py#L115) | Huggingface | all model names starting with `huggingface/` | [Code](https://github.com/BerriAI/litellm/blob/721564c63999a43f96ee9167d0530759d51f8d45/litellm/llms/huggingface_restapi.py#L186) | OpenRouter | all model names starting with `openrouter/` | [Code](https://github.com/BerriAI/litellm/blob/721564c63999a43f96ee9167d0530759d51f8d45/litellm/main.py#L611) | AI21 | `j2-mid`, `j2-light`, `j2-ultra` | [Code](https://github.com/BerriAI/litellm/blob/721564c63999a43f96ee9167d0530759d51f8d45/litellm/llms/ai21.py#L107) diff --git a/docs/my-website/docs/providers/cohere.md b/docs/my-website/docs/providers/cohere.md index 71763e30d..a37cdb8aa 100644 --- a/docs/my-website/docs/providers/cohere.md +++ b/docs/my-website/docs/providers/cohere.md @@ -47,6 +47,7 @@ for chunk in response: |------------|----------------| | command-r | `completion('command-r', messages)` | | command-light | `completion('command-light', messages)` | +| command-r-plus | `completion('command-r-plus', messages)` | | command-medium | `completion('command-medium', messages)` | | command-medium-beta | `completion('command-medium-beta', messages)` | | command-xlarge-nightly | `completion('command-xlarge-nightly', messages)` | diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index d3eb93d72..e531d725a 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -1189,6 +1189,16 @@ "litellm_provider": "cohere_chat", "mode": "chat" }, + "command-r-plus": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000003, + "output_cost_per_token": 0.000015, + "litellm_provider": "cohere_chat", + "mode": "chat", + "supports_function_calling": true + }, "command-nightly": { "max_tokens": 4096, "max_input_tokens": 4096, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index d3eb93d72..e531d725a 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -1189,6 +1189,16 @@ "litellm_provider": "cohere_chat", "mode": "chat" }, + "command-r-plus": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000003, + "output_cost_per_token": 0.000015, + "litellm_provider": "cohere_chat", + "mode": "chat", + "supports_function_calling": true + }, "command-nightly": { "max_tokens": 4096, "max_input_tokens": 4096,