diff --git a/docs/my-website/docs/completion/batching.md b/docs/my-website/docs/completion/batching.md new file mode 100644 index 000000000..4b58a51b2 --- /dev/null +++ b/docs/my-website/docs/completion/batching.md @@ -0,0 +1,34 @@ +# Batching Completion Calls - batch_completion + +Batch Completion allows you to pass a batch of completion() requests to process multiple `messages` in a single API call. + +## Example Code +```python +import litellm +import os +from litellm import batch_completion + +os.environ['ANTHROPIC_API_KEY'] = "" + + +responses = batch_completion( + model="claude-2", + messages = [ + [ + { + "role": "user", + "content": "good morning? " + } + ], + [ + { + "role": "user", + "content": "what's the time? " + } + ] + ] +) +``` + + +In the batch_completion method, you provide a list of `messages` where each sub-list of messages is passed to `litellm.completion()`, allowing you to process multiple prompts efficiently in a single API call. \ No newline at end of file diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index 61901a009..86f443366 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -22,7 +22,14 @@ const sidebars = { { type: "category", label: "Completion()", - items: ["completion/input", "completion/output", "completion/model_alias", "completion/reliable_completions", "completion/stream"], + items: [ + "completion/input", + "completion/output", + "completion/model_alias", + "completion/reliable_completions", + "completion/stream", + "completion/batching" + ], }, { type: "category",