forked from phoenix/litellm-mirror
Merge pull request #1693 from BerriAI/litellm_cache_controls_for_keys
Fixes for model cost check and streaming
This commit is contained in:
commit
21450b9a73
1 changed files with 4 additions and 1 deletions
|
@ -7487,7 +7487,10 @@ class CustomStreamWrapper:
|
||||||
logprobs = None
|
logprobs = None
|
||||||
original_chunk = None # this is used for function/tool calling
|
original_chunk = None # this is used for function/tool calling
|
||||||
if len(str_line.choices) > 0:
|
if len(str_line.choices) > 0:
|
||||||
if str_line.choices[0].delta.content is not None:
|
if (
|
||||||
|
str_line.choices[0].delta is not None
|
||||||
|
and str_line.choices[0].delta.content is not None
|
||||||
|
):
|
||||||
text = str_line.choices[0].delta.content
|
text = str_line.choices[0].delta.content
|
||||||
else: # function/tool calling chunk - when content is None. in this case we just return the original chunk from openai
|
else: # function/tool calling chunk - when content is None. in this case we just return the original chunk from openai
|
||||||
original_chunk = str_line
|
original_chunk = str_line
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue