mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
(feat) set custom_llm_provider in stream chunk builder
This commit is contained in:
parent
99dcce1e0f
commit
485f469518
1 changed files with 5 additions and 1 deletions
|
@ -3266,6 +3266,10 @@ def stream_chunk_builder_text_completion(chunks: list, messages: Optional[List]
|
||||||
|
|
||||||
|
|
||||||
def stream_chunk_builder(chunks: list, messages: Optional[list] = None):
|
def stream_chunk_builder(chunks: list, messages: Optional[list] = None):
|
||||||
|
model_response = litellm.ModelResponse()
|
||||||
|
# set hidden params from chunk to model_response
|
||||||
|
if model_response is not None and hasattr(model_response, "_hidden_params"):
|
||||||
|
model_response._hidden_params = chunks[0].get("_hidden_params", {})
|
||||||
id = chunks[0]["id"]
|
id = chunks[0]["id"]
|
||||||
object = chunks[0]["object"]
|
object = chunks[0]["object"]
|
||||||
created = chunks[0]["created"]
|
created = chunks[0]["created"]
|
||||||
|
@ -3436,5 +3440,5 @@ def stream_chunk_builder(chunks: list, messages: Optional[list] = None):
|
||||||
response["usage"]["prompt_tokens"] + response["usage"]["completion_tokens"]
|
response["usage"]["prompt_tokens"] + response["usage"]["completion_tokens"]
|
||||||
)
|
)
|
||||||
return convert_to_model_response_object(
|
return convert_to_model_response_object(
|
||||||
response_object=response, model_response_object=litellm.ModelResponse()
|
response_object=response, model_response_object=model_response
|
||||||
)
|
)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue