From 38ba3b9f0ce33fe546ac82b94834590064175e4d Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Tue, 19 Nov 2024 13:36:14 -0800 Subject: [PATCH] Fix fireworks stream completion --- .../providers/remote/inference/fireworks/fireworks.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/llama_stack/providers/remote/inference/fireworks/fireworks.py b/llama_stack/providers/remote/inference/fireworks/fireworks.py index 3ff50d378..02d4b82ef 100644 --- a/llama_stack/providers/remote/inference/fireworks/fireworks.py +++ b/llama_stack/providers/remote/inference/fireworks/fireworks.py @@ -214,10 +214,10 @@ class FireworksInferenceAdapter( async def _to_async_generator(): if "messages" in params: - stream = await self._get_client().chat.completions.acreate(**params) + stream = self._get_client().chat.completions.acreate(**params) else: - stream = self._get_client().completion.create(**params) - for chunk in stream: + stream = self._get_client().completion.acreate(**params) + async for chunk in stream: yield chunk stream = _to_async_generator()