mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Update gemini.md
Added example for Gemini Vision Pro
This commit is contained in:
parent
453c635d7b
commit
c6b9cf55b5
1 changed files with 44 additions and 0 deletions
|
@ -3,6 +3,7 @@
|
||||||
## Pre-requisites
|
## Pre-requisites
|
||||||
* `pip install -q google-generativeai`
|
* `pip install -q google-generativeai`
|
||||||
|
|
||||||
|
# Gemini-Pro
|
||||||
## Sample Usage
|
## Sample Usage
|
||||||
```python
|
```python
|
||||||
import litellm
|
import litellm
|
||||||
|
@ -15,11 +16,54 @@ response = completion(
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
# Gemini-Pro-Vision
|
||||||
LiteLLM Supports the following image types passed in `url`
|
LiteLLM Supports the following image types passed in `url`
|
||||||
- Images with direct links - https://storage.googleapis.com/github-repo/img/gemini/intro/landmark3.jpg
|
- Images with direct links - https://storage.googleapis.com/github-repo/img/gemini/intro/landmark3.jpg
|
||||||
- Image in local storage - ./localimage.jpeg
|
- Image in local storage - ./localimage.jpeg
|
||||||
|
|
||||||
|
## Sample Usage
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
import litellm
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
# Load the environment variables from .env file
|
||||||
|
load_dotenv()
|
||||||
|
os.environ["GEMINI_API_KEY"] = os.getenv('GEMINI_API_KEY')
|
||||||
|
|
||||||
|
prompt = 'Describe the image in a few sentences.'
|
||||||
|
# Note: You can pass here the URL or Path of image directly.
|
||||||
|
image_url = 'https://storage.googleapis.com/github-repo/img/gemini/intro/landmark3.jpg'
|
||||||
|
|
||||||
|
# Create the messages payload according to the documentation
|
||||||
|
messages = [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": prompt
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "image_url",
|
||||||
|
"image_url": {"url": image_url}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
# Make the API call to Gemini model
|
||||||
|
response = litellm.completion(
|
||||||
|
model="gemini/gemini-pro-vision",
|
||||||
|
messages=messages,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract the response content
|
||||||
|
content = response.get('choices', [{}])[0].get('message', {}).get('content')
|
||||||
|
|
||||||
|
# Print the result
|
||||||
|
print(content)
|
||||||
|
```
|
||||||
|
|
||||||
## Chat Models
|
## Chat Models
|
||||||
| Model Name | Function Call | Required OS Variables |
|
| Model Name | Function Call | Required OS Variables |
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue