mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
fix(test_text_completion.py): fix test
This commit is contained in:
parent
ff12e023ae
commit
fd6ccfca7d
3 changed files with 14 additions and 8 deletions
|
@ -3164,8 +3164,7 @@ def stream_chunk_builder_text_completion(chunks: list, messages: Optional[List]=
|
|||
else:
|
||||
completion_output = ""
|
||||
# # Update usage information if needed
|
||||
print(f"INSIDE TEXT COMPLETION STREAM CHUNK BUILDER")
|
||||
_usage = litellm.Usage
|
||||
_usage = litellm.Usage()
|
||||
print(f"messages: {messages}")
|
||||
_usage.prompt_tokens = token_counter(
|
||||
model=model, messages=messages, count_response_tokens=True
|
||||
|
@ -3180,6 +3179,7 @@ def stream_chunk_builder_text_completion(chunks: list, messages: Optional[List]=
|
|||
_usage.prompt_tokens + _usage.completion_tokens
|
||||
)
|
||||
response["usage"] = _usage
|
||||
print(f"final usage: {_usage}")
|
||||
return litellm.TextCompletionResponse(**response)
|
||||
|
||||
def stream_chunk_builder(chunks: list, messages: Optional[list] = None):
|
||||
|
|
|
@ -2930,9 +2930,10 @@ test_async_text_completion_stream()
|
|||
@pytest.mark.asyncio
|
||||
async def test_async_text_completion_chat_model_stream():
|
||||
try:
|
||||
prompt="good morning"
|
||||
response = await litellm.atext_completion(
|
||||
model="gpt-3.5-turbo",
|
||||
prompt="good morning",
|
||||
prompt=prompt,
|
||||
stream=True,
|
||||
max_tokens=10,
|
||||
)
|
||||
|
@ -2948,7 +2949,7 @@ async def test_async_text_completion_chat_model_stream():
|
|||
assert (
|
||||
num_finish_reason == 1
|
||||
), f"expected only one finish reason. Got {num_finish_reason}"
|
||||
response_obj = litellm.stream_chunk_builder(chunks=chunks)
|
||||
response_obj = litellm.stream_chunk_builder(chunks=chunks, messages=[{"role": "user", "content": prompt}])
|
||||
cost = litellm.completion_cost(completion_response=response_obj)
|
||||
assert cost > 0
|
||||
except Exception as e:
|
||||
|
|
|
@ -2267,11 +2267,16 @@ def client(original_function):
|
|||
and kwargs["complete_response"] == True
|
||||
):
|
||||
chunks = []
|
||||
for idx, chunk in enumerate(result):
|
||||
async for chunk in result:
|
||||
chunks.append(chunk)
|
||||
return litellm.stream_chunk_builder(
|
||||
chunks, messages=kwargs.get("messages", None)
|
||||
)
|
||||
if call_type == CallTypes.acompletion.value:
|
||||
return litellm.stream_chunk_builder(
|
||||
chunks, messages=kwargs.get("messages")
|
||||
)
|
||||
elif call_type == CallTypes.atext_completion.value:
|
||||
return litellm.stream_chunk_builder(
|
||||
chunks, messages=[{"role": "user", "content": kwargs.get("prompt")}]
|
||||
)
|
||||
else:
|
||||
return result
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue