From f3144dd9cf790eda0c1ee070c01af087668e589a Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 27 Feb 2024 17:44:35 -0800 Subject: [PATCH 1/2] (docs) vertex ai --- docs/my-website/docs/providers/vertex.md | 19 +++++++++++-------- ...odel_prices_and_context_window_backup.json | 9 +++++++++ 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/docs/my-website/docs/providers/vertex.md b/docs/my-website/docs/providers/vertex.md index 70ee9eca9..20225473b 100644 --- a/docs/my-website/docs/providers/vertex.md +++ b/docs/my-website/docs/providers/vertex.md @@ -25,14 +25,17 @@ response = litellm.completion(model="gemini-pro", messages=[{"role": "user", "co 1. Modify the config.yaml ```yaml -litellm_settings: - vertex_project: "hardy-device-38811" # Your Project ID - vertex_location: "us-central1" # proj location - -model_list: - -model_name: team1-gemini-pro - litellm_params: - model: gemini-pro +model_list: + - model_name: gemini-vision + litellm_params: + model: vertex_ai/gemini-1.0-pro-vision-001 + vertex_project: "project-id" + vertex_location: "us-central1" + - model_name: gemini-vision + litellm_params: + model: vertex_ai/gemini-1.0-pro-vision-001 + vertex_project: "project-id2" + vertex_location: "us-east" ``` 2. Start the proxy diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 6c663200d..5bd0bcdff 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -687,6 +687,15 @@ "litellm_provider": "vertex_ai-language-models", "mode": "chat" }, + "gemini-1.5-pro-preview-0215": { + "max_tokens": 8192, + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "input_cost_per_token": 0, + "output_cost_per_token": 0, + "litellm_provider": "vertex_ai-language-models", + "mode": "chat" + }, "gemini-pro-vision": { "max_tokens": 16384, "max_output_tokens": 2048, From 7485fa797c7d2eee715eebbcc7de2e7cf7a8b265 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 27 Feb 2024 17:55:05 -0800 Subject: [PATCH 2/2] (docs) vertex ai litellm proxy --- docs/my-website/docs/providers/vertex.md | 32 ++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/docs/my-website/docs/providers/vertex.md b/docs/my-website/docs/providers/vertex.md index 20225473b..d959498ce 100644 --- a/docs/my-website/docs/providers/vertex.md +++ b/docs/my-website/docs/providers/vertex.md @@ -1,3 +1,6 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + # VertexAI - Google [Gemini, Model Garden] @@ -22,8 +25,16 @@ response = litellm.completion(model="gemini-pro", messages=[{"role": "user", "co ## OpenAI Proxy Usage +Here's how to use Vertex AI with the LiteLLM Proxy Server + 1. Modify the config.yaml + + + + +Use this when you need to set a different location for each vertex model + ```yaml model_list: - model_name: gemini-vision @@ -38,6 +49,27 @@ model_list: vertex_location: "us-east" ``` + + + + +Use this when you have one vertex location for all models + +```yaml +litellm_settings: + vertex_project: "hardy-device-38811" # Your Project ID + vertex_location: "us-central1" # proj location + +model_list: + -model_name: team1-gemini-pro + litellm_params: + model: gemini-pro +``` + + + + + 2. Start the proxy ```bash