From a9634b717c40d9c651242f8df82a40cd630ecd79 Mon Sep 17 00:00:00 2001 From: USAGI <124278082+snekkenull@users.noreply.github.com> Date: Fri, 15 Mar 2024 11:50:19 +0800 Subject: [PATCH] Add groq/gemma-7b-it --- docs/my-website/docs/providers/groq.md | 3 ++- model_prices_and_context_window.json | 7 +++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/my-website/docs/providers/groq.md b/docs/my-website/docs/providers/groq.md index e09cf9f8a..d8a4fded4 100644 --- a/docs/my-website/docs/providers/groq.md +++ b/docs/my-website/docs/providers/groq.md @@ -49,4 +49,5 @@ We support ALL Groq models, just set `groq/` as a prefix when sending completion | Model Name | Function Call | |--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------| | llama2-70b-4096 | `completion(model="groq/llama2-70b-4096", messages)` | -| mixtral-8x7b-32768 | `completion(model="groq/mixtral-8x7b-32768", messages)` | +| mixtral-8x7b-32768 | `completion(model="groq/mixtral-8x7b-32768", messages)` | +| gemma-7b-it | `completion(model="groq/gemma-7b-it", messages)` | \ No newline at end of file diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index ddacbf05c..0a90c91ca 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -631,6 +631,13 @@ "litellm_provider": "groq", "mode": "chat" }, + "groq/gemma-7b-it": { + "max_tokens": 8192, + "input_cost_per_token": 0.00000010, + "output_cost_per_token": 0.00000010, + "litellm_provider": "groq", + "mode": "chat" + }, "claude-instant-1.2": { "max_tokens": 100000, "max_output_tokens": 8191,