mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
Fix bug when iterating over lines in ollama response
async for line in resp.content.iter_any() will return incomplete lines when the lines are long, and that results in an exception being thrown by json.loads() when it tries to parse the incomplete JSON The default behavior of the stream reader for aiohttp response objects is to iterate over lines, so just removing .iter_any() fixes the bug
This commit is contained in:
parent
a419d59542
commit
e214e6ab47
1 changed files with 1 additions and 1 deletions
|
@ -195,7 +195,7 @@ async def ollama_acompletion(url, data, model_response, encoding, logging_obj):
|
||||||
raise OllamaError(status_code=resp.status, message=text)
|
raise OllamaError(status_code=resp.status, message=text)
|
||||||
|
|
||||||
completion_string = ""
|
completion_string = ""
|
||||||
async for line in resp.content.iter_any():
|
async for line in resp.content:
|
||||||
if line:
|
if line:
|
||||||
try:
|
try:
|
||||||
json_chunk = line.decode("utf-8")
|
json_chunk = line.decode("utf-8")
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue