mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix(utils.py): stream_options fix
This commit is contained in:
parent
344268e053
commit
8dbe0559dd
2 changed files with 6 additions and 6 deletions
|
@ -1,7 +1,7 @@
|
||||||
model_list:
|
model_list:
|
||||||
- model_name: claude-3-5-sonnet
|
- model_name: claude-3-5-sonnet
|
||||||
litellm_params:
|
litellm_params:
|
||||||
model: anthropic/claude-3-5-sonnet
|
model: claude-3-haiku-20240307
|
||||||
# - model_name: gemini-1.5-flash-gemini
|
# - model_name: gemini-1.5-flash-gemini
|
||||||
# litellm_params:
|
# litellm_params:
|
||||||
# model: vertex_ai_beta/gemini-1.5-flash
|
# model: vertex_ai_beta/gemini-1.5-flash
|
||||||
|
|
|
@ -8786,11 +8786,11 @@ class CustomStreamWrapper:
|
||||||
# return this for all models
|
# return this for all models
|
||||||
completion_obj = {"content": ""}
|
completion_obj = {"content": ""}
|
||||||
if self.custom_llm_provider and self.custom_llm_provider == "anthropic":
|
if self.custom_llm_provider and self.custom_llm_provider == "anthropic":
|
||||||
from litellm.types.llms.bedrock import GenericStreamingChunk
|
from litellm.types.utils import GenericStreamingChunk as GChunk
|
||||||
|
|
||||||
if self.received_finish_reason is not None:
|
if self.received_finish_reason is not None:
|
||||||
raise StopIteration
|
raise StopIteration
|
||||||
response_obj: GenericStreamingChunk = chunk
|
response_obj: GChunk = chunk
|
||||||
completion_obj["content"] = response_obj["text"]
|
completion_obj["content"] = response_obj["text"]
|
||||||
if response_obj["is_finished"]:
|
if response_obj["is_finished"]:
|
||||||
self.received_finish_reason = response_obj["finish_reason"]
|
self.received_finish_reason = response_obj["finish_reason"]
|
||||||
|
@ -8802,9 +8802,9 @@ class CustomStreamWrapper:
|
||||||
):
|
):
|
||||||
self.sent_stream_usage = True
|
self.sent_stream_usage = True
|
||||||
model_response.usage = litellm.Usage(
|
model_response.usage = litellm.Usage(
|
||||||
prompt_tokens=response_obj["usage"]["inputTokens"],
|
prompt_tokens=response_obj["usage"]["prompt_tokens"],
|
||||||
completion_tokens=response_obj["usage"]["outputTokens"],
|
completion_tokens=response_obj["usage"]["completion_tokens"],
|
||||||
total_tokens=response_obj["usage"]["totalTokens"],
|
total_tokens=response_obj["usage"]["total_tokens"],
|
||||||
)
|
)
|
||||||
|
|
||||||
if "tool_use" in response_obj and response_obj["tool_use"] is not None:
|
if "tool_use" in response_obj and response_obj["tool_use"] is not None:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue