fix: support streaming custom cost completion tracking

This commit is contained in:
Krrish Dholakia 2024-01-22 13:41:22 -08:00
parent 82bbf336d5
commit 074ea17325
4 changed files with 58 additions and 11 deletions

View file

@ -3334,7 +3334,9 @@ def stream_chunk_builder_text_completion(chunks: list, messages: Optional[List]
return response
def stream_chunk_builder(chunks: list, messages: Optional[list] = None):
def stream_chunk_builder(
chunks: list, messages: Optional[list] = None, start_time=None, end_time=None
):
model_response = litellm.ModelResponse()
# set hidden params from chunk to model_response
if model_response is not None and hasattr(model_response, "_hidden_params"):
@ -3509,5 +3511,8 @@ def stream_chunk_builder(chunks: list, messages: Optional[list] = None):
response["usage"]["prompt_tokens"] + response["usage"]["completion_tokens"]
)
return convert_to_model_response_object(
response_object=response, model_response_object=model_response
response_object=response,
model_response_object=model_response,
start_time=start_time,
end_time=end_time,
)