From 552dbb8d0473a9fd8655e6c38f313a129598e64c Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Mon, 25 Mar 2024 08:35:11 -0700 Subject: [PATCH] docs(instructor.md): tutorial on using litellm with instructor --- docs/my-website/docs/tutorials/instructor.md | 95 ++++++++++++++++++++ docs/my-website/sidebars.js | 1 + 2 files changed, 96 insertions(+) create mode 100644 docs/my-website/docs/tutorials/instructor.md diff --git a/docs/my-website/docs/tutorials/instructor.md b/docs/my-website/docs/tutorials/instructor.md new file mode 100644 index 0000000000..906fdb4c6f --- /dev/null +++ b/docs/my-website/docs/tutorials/instructor.md @@ -0,0 +1,95 @@ +# Instructor - Function Calling + +Use LiteLLM Router with [jxnl's instructor library](https://github.com/jxnl/instructor) for function calling in prod. + +## Usage + +```python +import litellm +from litellm import Router +import instructor +from pydantic import BaseModel + +litellm.set_verbose = True # 👈 print DEBUG LOGS + +client = instructor.patch( + Router( + model_list=[ + { + "model_name": "gpt-3.5-turbo", openai model name + "litellm_params": { params for litellm completion/embedding call + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE"), + }, + } + ] + ) +) + + +class UserDetail(BaseModel): + name: str + age: int + + +user = client.chat.completions.create( + model="gpt-3.5-turbo", + response_model=UserDetail, + messages=[ + {"role": "user", "content": "Extract Jason is 25 years old"}, + ], +) + +assert isinstance(user, UserDetail) +assert user.name == "Jason" +assert user.age == 25 + +print(f"user: {user}") +``` + +## Async Calls + +```python +import litellm +from litellm import Router +import instructor, asyncio +from pydantic import BaseModel + +aclient = instructor.apatch( + Router( + model_list=[ + { + "model_name": "gpt-3.5-turbo", + "litellm_params": { + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE"), + }, + } + ], + default_litellm_params={"acompletion": True}, # 👈 IMPORTANT - tells litellm to route to async completion function. + ) +) + + +class UserExtract(BaseModel): + name: str + age: int + + +async def main(): + model = await aclient.chat.completions.create( + model="gpt-3.5-turbo", + response_model=UserExtract, + messages=[ + {"role": "user", "content": "Extract jason is 25 years old"}, + ], + ) + print(f"model: {model}") + + +asyncio.run(main()) +``` \ No newline at end of file diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index 6d871b4903..b11ed9d294 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -188,6 +188,7 @@ const sidebars = { label: 'Tutorials', items: [ 'tutorials/azure_openai', + 'tutorials/instructor', 'tutorials/oobabooga', "tutorials/gradio_integration", 'tutorials/huggingface_codellama',