forked from phoenix-oss/llama-stack-mirror
Fix fireworks stream completion
This commit is contained in:
parent
05d1ead02f
commit
38ba3b9f0c
1 changed files with 3 additions and 3 deletions
|
@ -214,10 +214,10 @@ class FireworksInferenceAdapter(
|
|||
|
||||
async def _to_async_generator():
|
||||
if "messages" in params:
|
||||
stream = await self._get_client().chat.completions.acreate(**params)
|
||||
stream = self._get_client().chat.completions.acreate(**params)
|
||||
else:
|
||||
stream = self._get_client().completion.create(**params)
|
||||
for chunk in stream:
|
||||
stream = self._get_client().completion.acreate(**params)
|
||||
async for chunk in stream:
|
||||
yield chunk
|
||||
|
||||
stream = _to_async_generator()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue