forked from phoenix/litellm-mirror
docs(vertex.md): add vertex ai function calling to docs
This commit is contained in:
parent
7b4fca8fb7
commit
c9bb4b29c7
2 changed files with 49 additions and 12 deletions
|
@ -11,7 +11,7 @@ LiteLLM supports
|
|||
|
||||
:::info
|
||||
|
||||
Anthropic API fails requests when `max_tokens` are not passed. Due to this litellm passes `max_tokens=4096` when no `max_tokens` are passed
|
||||
Anthropic API fails requests when `max_tokens` are not passed. Due to this litellm passes `max_tokens=4096` when no `max_tokens` are passed.
|
||||
|
||||
:::
|
||||
|
||||
|
@ -229,17 +229,6 @@ assert isinstance(
|
|||
|
||||
```
|
||||
|
||||
### Setting `anthropic-beta` Header in Requests
|
||||
|
||||
Pass the the `extra_headers` param to litellm, All headers will be forwarded to Anthropic API
|
||||
|
||||
```python
|
||||
response = completion(
|
||||
model="anthropic/claude-3-opus-20240229",
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
)
|
||||
```
|
||||
|
||||
### Forcing Anthropic Tool Use
|
||||
|
||||
|
|
|
@ -449,6 +449,54 @@ print(response)
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Usage - Function Calling
|
||||
|
||||
LiteLLM supports Function Calling for Vertex AI gemini models.
|
||||
|
||||
```python
|
||||
from litellm import completion
|
||||
import os
|
||||
# set env
|
||||
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = ".."
|
||||
os.environ["VERTEX_AI_PROJECT"] = ".."
|
||||
os.environ["VERTEX_AI_LOCATION"] = ".."
|
||||
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_current_weather",
|
||||
"description": "Get the current weather in a given location",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g. San Francisco, CA",
|
||||
},
|
||||
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
|
||||
},
|
||||
"required": ["location"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]
|
||||
|
||||
response = completion(
|
||||
model="vertex_ai/gemini-pro-vision",
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
)
|
||||
# Add any assertions, here to check response args
|
||||
print(response)
|
||||
assert isinstance(response.choices[0].message.tool_calls[0].function.name, str)
|
||||
assert isinstance(
|
||||
response.choices[0].message.tool_calls[0].function.arguments, str
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
|
||||
## Chat Models
|
||||
| Model Name | Function Call |
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue