forked from phoenix/litellm-mirror
Add groq/gemma-7b-it
This commit is contained in:
parent
d580b66a91
commit
a9634b717c
2 changed files with 9 additions and 1 deletions
|
@ -49,4 +49,5 @@ We support ALL Groq models, just set `groq/` as a prefix when sending completion
|
|||
| Model Name | Function Call |
|
||||
|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| llama2-70b-4096 | `completion(model="groq/llama2-70b-4096", messages)` |
|
||||
| mixtral-8x7b-32768 | `completion(model="groq/mixtral-8x7b-32768", messages)` |
|
||||
| mixtral-8x7b-32768 | `completion(model="groq/mixtral-8x7b-32768", messages)` |
|
||||
| gemma-7b-it | `completion(model="groq/gemma-7b-it", messages)` |
|
|
@ -631,6 +631,13 @@
|
|||
"litellm_provider": "groq",
|
||||
"mode": "chat"
|
||||
},
|
||||
"groq/gemma-7b-it": {
|
||||
"max_tokens": 8192,
|
||||
"input_cost_per_token": 0.00000010,
|
||||
"output_cost_per_token": 0.00000010,
|
||||
"litellm_provider": "groq",
|
||||
"mode": "chat"
|
||||
},
|
||||
"claude-instant-1.2": {
|
||||
"max_tokens": 100000,
|
||||
"max_output_tokens": 8191,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue