From b1ed53de72bcd7a757bff395aa72a4178496d09c Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Mon, 11 Dec 2023 08:30:55 -0800 Subject: [PATCH] Update README.md --- README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/README.md b/README.md index cc5c1a3a54..82cdceed5d 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,22 @@ response = completion(model="command-nightly", messages=messages) print(response) ``` +## Async ([Docs](https://docs.litellm.ai/docs/completion/stream#async-completion)) + +```python +from litellm import acompletion +import asyncio + +async def test_get_response(): + user_message = "Hello, how are you?" + messages = [{"content": user_message, "role": "user"}] + response = await acompletion(model="gpt-3.5-turbo", messages=messages) + return response + +response = asyncio.run(test_get_response()) +print(response) +``` + ## Streaming ([Docs](https://docs.litellm.ai/docs/completion/stream)) liteLLM supports streaming the model response back, pass `stream=True` to get a streaming iterator in response. Streaming is supported for all models (Bedrock, Huggingface, TogetherAI, Azure, OpenAI, etc.)