(feat) add groq api pricing, models

This commit is contained in:
ishaan-jaff 2024-02-29 16:11:22 -08:00
parent d2115d5a17
commit 4bfefd2b08
3 changed files with 31 additions and 0 deletions

View file

@ -1,6 +1,8 @@
# Groq
https://groq.com/
**We support ALL Groq models, just set `groq/` as a prefix when sending completion requests**
## API Key
```python
# env variable
@ -47,3 +49,4 @@ We support ALL Groq models, just set `groq/` as a prefix when sending completion
| Model Name | Function Call |
|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| llama2-70b-4096 | `completion(model="groq/llama2-70b-4096", messages)` |
| mixtral-8x7b-32768 | `completion(model="groq/mixtral-8x7b-32768", messages)` |

View file

@ -580,6 +580,20 @@
"litellm_provider": "mistral",
"mode": "embedding"
},
"groq/llama2-70b-4096": {
"max_tokens": 4096,
"input_cost_per_token": 0.00000070,
"output_cost_per_token": 0.00000080,
"litellm_provider": "groq",
"mode": "chat"
},
"groq/mixtral-8x7b-32768": {
"max_tokens": 32768,
"input_cost_per_token": 0.00000027,
"output_cost_per_token": 0.00000027,
"litellm_provider": "groq",
"mode": "chat"
},
"claude-instant-1.2": {
"max_tokens": 100000,
"max_output_tokens": 8191,

View file

@ -580,6 +580,20 @@
"litellm_provider": "mistral",
"mode": "embedding"
},
"groq/llama2-70b-4096": {
"max_tokens": 4096,
"input_cost_per_token": 0.00000070,
"output_cost_per_token": 0.00000080,
"litellm_provider": "groq",
"mode": "chat"
},
"groq/mixtral-8x7b-32768": {
"max_tokens": 32768,
"input_cost_per_token": 0.00000027,
"output_cost_per_token": 0.00000027,
"litellm_provider": "groq",
"mode": "chat"
},
"claude-instant-1.2": {
"max_tokens": 100000,
"max_output_tokens": 8191,