forked from phoenix-oss/llama-stack-mirror
Fix fireworks stream completion
This commit is contained in:
parent
05d1ead02f
commit
38ba3b9f0c
1 changed files with 3 additions and 3 deletions
|
@ -214,10 +214,10 @@ class FireworksInferenceAdapter(
|
||||||
|
|
||||||
async def _to_async_generator():
|
async def _to_async_generator():
|
||||||
if "messages" in params:
|
if "messages" in params:
|
||||||
stream = await self._get_client().chat.completions.acreate(**params)
|
stream = self._get_client().chat.completions.acreate(**params)
|
||||||
else:
|
else:
|
||||||
stream = self._get_client().completion.create(**params)
|
stream = self._get_client().completion.acreate(**params)
|
||||||
for chunk in stream:
|
async for chunk in stream:
|
||||||
yield chunk
|
yield chunk
|
||||||
|
|
||||||
stream = _to_async_generator()
|
stream = _to_async_generator()
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue