From 8440791e047f51cd13f90b46b97e86e3fd1e6267 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Wed, 6 Sep 2023 13:08:28 -0700 Subject: [PATCH] add batch_completions to docs --- docs/my-website/docs/completion/batching.md | 34 +++++++++++++++++++++ docs/my-website/sidebars.js | 9 +++++- 2 files changed, 42 insertions(+), 1 deletion(-) create mode 100644 docs/my-website/docs/completion/batching.md diff --git a/docs/my-website/docs/completion/batching.md b/docs/my-website/docs/completion/batching.md new file mode 100644 index 000000000..4b58a51b2 --- /dev/null +++ b/docs/my-website/docs/completion/batching.md @@ -0,0 +1,34 @@ +# Batching Completion Calls - batch_completion + +Batch Completion allows you to pass a batch of completion() requests to process multiple `messages` in a single API call. + +## Example Code +```python +import litellm +import os +from litellm import batch_completion + +os.environ['ANTHROPIC_API_KEY'] = "" + + +responses = batch_completion( + model="claude-2", + messages = [ + [ + { + "role": "user", + "content": "good morning? " + } + ], + [ + { + "role": "user", + "content": "what's the time? " + } + ] + ] +) +``` + + +In the batch_completion method, you provide a list of `messages` where each sub-list of messages is passed to `litellm.completion()`, allowing you to process multiple prompts efficiently in a single API call. \ No newline at end of file diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index 61901a009..86f443366 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -22,7 +22,14 @@ const sidebars = { { type: "category", label: "Completion()", - items: ["completion/input", "completion/output", "completion/model_alias", "completion/reliable_completions", "completion/stream"], + items: [ + "completion/input", + "completion/output", + "completion/model_alias", + "completion/reliable_completions", + "completion/stream", + "completion/batching" + ], }, { type: "category",