forked from phoenix/litellm-mirror
fix(azure.py): fix error handling for openai/azure streaming
This commit is contained in:
parent
b6bc75e27a
commit
a9ed768991
4 changed files with 32 additions and 8 deletions
|
@ -293,8 +293,7 @@ class OpenAIChatCompletion(BaseLLM):
|
|||
openai_client = client
|
||||
response = openai_client.chat.completions.create(**data)
|
||||
streamwrapper = CustomStreamWrapper(completion_stream=response, model=model, custom_llm_provider="openai",logging_obj=logging_obj)
|
||||
for transformed_chunk in streamwrapper:
|
||||
yield transformed_chunk
|
||||
return streamwrapper
|
||||
|
||||
async def async_streaming(self,
|
||||
logging_obj,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue