forked from phoenix/litellm-mirror
Merge pull request #2229 from BerriAI/litellm_vertex_docs
[Docs] Proxy - Pass vertex_params
This commit is contained in:
commit
f90533a3b3
2 changed files with 44 additions and 0 deletions
|
@ -1,3 +1,6 @@
|
||||||
|
import Tabs from '@theme/Tabs';
|
||||||
|
import TabItem from '@theme/TabItem';
|
||||||
|
|
||||||
# VertexAI - Google [Gemini, Model Garden]
|
# VertexAI - Google [Gemini, Model Garden]
|
||||||
|
|
||||||
<a target="_blank" href="https://colab.research.google.com/github/BerriAI/litellm/blob/main/cookbook/liteLLM_VertextAI_Example.ipynb">
|
<a target="_blank" href="https://colab.research.google.com/github/BerriAI/litellm/blob/main/cookbook/liteLLM_VertextAI_Example.ipynb">
|
||||||
|
@ -22,8 +25,36 @@ response = litellm.completion(model="gemini-pro", messages=[{"role": "user", "co
|
||||||
|
|
||||||
## OpenAI Proxy Usage
|
## OpenAI Proxy Usage
|
||||||
|
|
||||||
|
Here's how to use Vertex AI with the LiteLLM Proxy Server
|
||||||
|
|
||||||
1. Modify the config.yaml
|
1. Modify the config.yaml
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
|
||||||
|
<TabItem value="completion_param" label="Different location per model">
|
||||||
|
|
||||||
|
Use this when you need to set a different location for each vertex model
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
model_list:
|
||||||
|
- model_name: gemini-vision
|
||||||
|
litellm_params:
|
||||||
|
model: vertex_ai/gemini-1.0-pro-vision-001
|
||||||
|
vertex_project: "project-id"
|
||||||
|
vertex_location: "us-central1"
|
||||||
|
- model_name: gemini-vision
|
||||||
|
litellm_params:
|
||||||
|
model: vertex_ai/gemini-1.0-pro-vision-001
|
||||||
|
vertex_project: "project-id2"
|
||||||
|
vertex_location: "us-east"
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
<TabItem value="litellm_param" label="One location all vertex models">
|
||||||
|
|
||||||
|
Use this when you have one vertex location for all models
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
litellm_settings:
|
litellm_settings:
|
||||||
vertex_project: "hardy-device-38811" # Your Project ID
|
vertex_project: "hardy-device-38811" # Your Project ID
|
||||||
|
@ -35,6 +66,10 @@ model_list:
|
||||||
model: gemini-pro
|
model: gemini-pro
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
2. Start the proxy
|
2. Start the proxy
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|
|
@ -687,6 +687,15 @@
|
||||||
"litellm_provider": "vertex_ai-language-models",
|
"litellm_provider": "vertex_ai-language-models",
|
||||||
"mode": "chat"
|
"mode": "chat"
|
||||||
},
|
},
|
||||||
|
"gemini-1.5-pro-preview-0215": {
|
||||||
|
"max_tokens": 8192,
|
||||||
|
"max_input_tokens": 1000000,
|
||||||
|
"max_output_tokens": 8192,
|
||||||
|
"input_cost_per_token": 0,
|
||||||
|
"output_cost_per_token": 0,
|
||||||
|
"litellm_provider": "vertex_ai-language-models",
|
||||||
|
"mode": "chat"
|
||||||
|
},
|
||||||
"gemini-pro-vision": {
|
"gemini-pro-vision": {
|
||||||
"max_tokens": 16384,
|
"max_tokens": 16384,
|
||||||
"max_output_tokens": 2048,
|
"max_output_tokens": 2048,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue