mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix(utils.py): don't raise error on openai content filter during streaming - return as is
Fixes issue where we would raise an error vs. openai who return the chunk with finish reason as 'content_filter'
This commit is contained in:
parent
5bec2bf513
commit
a2fd8459fc
2 changed files with 50 additions and 15 deletions
|
@ -8840,21 +8840,6 @@ class CustomStreamWrapper:
|
|||
if str_line.choices[0].finish_reason:
|
||||
is_finished = True
|
||||
finish_reason = str_line.choices[0].finish_reason
|
||||
if finish_reason == "content_filter":
|
||||
if hasattr(str_line.choices[0], "content_filter_result"):
|
||||
error_message = json.dumps(
|
||||
str_line.choices[0].content_filter_result
|
||||
)
|
||||
else:
|
||||
error_message = "{} Response={}".format(
|
||||
self.custom_llm_provider, str(dict(str_line))
|
||||
)
|
||||
|
||||
raise litellm.ContentPolicyViolationError(
|
||||
message=error_message,
|
||||
llm_provider=self.custom_llm_provider,
|
||||
model=self.model,
|
||||
)
|
||||
|
||||
# checking for logprobs
|
||||
if (
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue