forked from phoenix/litellm-mirror
(feat) ollama_chat add async stream
This commit is contained in:
parent
aea7faa2c1
commit
837ce269ae
1 changed files with 1 additions and 1 deletions
|
@ -263,7 +263,7 @@ async def ollama_async_streaming(url, data, model_response, encoding, logging_ob
|
|||
streamwrapper = litellm.CustomStreamWrapper(
|
||||
completion_stream=response.aiter_lines(),
|
||||
model=data["model"],
|
||||
custom_llm_provider="ollama",
|
||||
custom_llm_provider="ollama_chat",
|
||||
logging_obj=logging_obj,
|
||||
)
|
||||
async for transformed_chunk in streamwrapper:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue