forked from phoenix/litellm-mirror
docs(instructor.md): tutorial on using litellm with instructor
This commit is contained in:
parent
f98aead602
commit
53695943e3
2 changed files with 96 additions and 0 deletions
95
docs/my-website/docs/tutorials/instructor.md
Normal file
95
docs/my-website/docs/tutorials/instructor.md
Normal file
|
@ -0,0 +1,95 @@
|
|||
# Instructor - Function Calling
|
||||
|
||||
Use LiteLLM Router with [jxnl's instructor library](https://github.com/jxnl/instructor) for function calling in prod.
|
||||
|
||||
## Usage
|
||||
|
||||
```python
|
||||
import litellm
|
||||
from litellm import Router
|
||||
import instructor
|
||||
from pydantic import BaseModel
|
||||
|
||||
litellm.set_verbose = True # 👈 print DEBUG LOGS
|
||||
|
||||
client = instructor.patch(
|
||||
Router(
|
||||
model_list=[
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo", openai model name
|
||||
"litellm_params": { params for litellm completion/embedding call
|
||||
"model": "azure/chatgpt-v-2",
|
||||
"api_key": os.getenv("AZURE_API_KEY"),
|
||||
"api_version": os.getenv("AZURE_API_VERSION"),
|
||||
"api_base": os.getenv("AZURE_API_BASE"),
|
||||
},
|
||||
}
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class UserDetail(BaseModel):
|
||||
name: str
|
||||
age: int
|
||||
|
||||
|
||||
user = client.chat.completions.create(
|
||||
model="gpt-3.5-turbo",
|
||||
response_model=UserDetail,
|
||||
messages=[
|
||||
{"role": "user", "content": "Extract Jason is 25 years old"},
|
||||
],
|
||||
)
|
||||
|
||||
assert isinstance(user, UserDetail)
|
||||
assert user.name == "Jason"
|
||||
assert user.age == 25
|
||||
|
||||
print(f"user: {user}")
|
||||
```
|
||||
|
||||
## Async Calls
|
||||
|
||||
```python
|
||||
import litellm
|
||||
from litellm import Router
|
||||
import instructor, asyncio
|
||||
from pydantic import BaseModel
|
||||
|
||||
aclient = instructor.apatch(
|
||||
Router(
|
||||
model_list=[
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "azure/chatgpt-v-2",
|
||||
"api_key": os.getenv("AZURE_API_KEY"),
|
||||
"api_version": os.getenv("AZURE_API_VERSION"),
|
||||
"api_base": os.getenv("AZURE_API_BASE"),
|
||||
},
|
||||
}
|
||||
],
|
||||
default_litellm_params={"acompletion": True}, # 👈 IMPORTANT - tells litellm to route to async completion function.
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class UserExtract(BaseModel):
|
||||
name: str
|
||||
age: int
|
||||
|
||||
|
||||
async def main():
|
||||
model = await aclient.chat.completions.create(
|
||||
model="gpt-3.5-turbo",
|
||||
response_model=UserExtract,
|
||||
messages=[
|
||||
{"role": "user", "content": "Extract jason is 25 years old"},
|
||||
],
|
||||
)
|
||||
print(f"model: {model}")
|
||||
|
||||
|
||||
asyncio.run(main())
|
||||
```
|
|
@ -188,6 +188,7 @@ const sidebars = {
|
|||
label: 'Tutorials',
|
||||
items: [
|
||||
'tutorials/azure_openai',
|
||||
'tutorials/instructor',
|
||||
'tutorials/oobabooga',
|
||||
"tutorials/gradio_integration",
|
||||
'tutorials/huggingface_codellama',
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue