From 3aebc46ebf64decbd7fd13a2ef26f1e73f29cd73 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 21 Nov 2023 11:20:53 -0800 Subject: [PATCH] (docs) update streaming --- docs/my-website/docs/completion/stream.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/my-website/docs/completion/stream.md b/docs/my-website/docs/completion/stream.md index 413076dc9..aeabfbb10 100644 --- a/docs/my-website/docs/completion/stream.md +++ b/docs/my-website/docs/completion/stream.md @@ -10,8 +10,8 @@ LiteLLM supports streaming the model response back by passing `stream=True` as a ```python from litellm import completion response = completion(model="gpt-3.5-turbo", messages=messages, stream=True) -for chunk in response: - print(chunk['choices'][0]['delta']) +for part in response: + print(part.choices[0].delta.content or "") ```