def test_text_completion_with_echo(stream): (#6401)

test
This commit is contained in:
Ishaan Jaff 2024-10-23 23:27:19 +05:30 committed by GitHub
parent d063086bbf
commit 182adec7d0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 87 additions and 2 deletions

View file

@ -970,9 +970,9 @@ class EmbeddingResponse(OpenAIObject):
class Logprobs(OpenAIObject):
text_offset: List[int]
token_logprobs: List[float]
token_logprobs: List[Union[float, None]]
tokens: List[str]
top_logprobs: List[Dict[str, float]]
top_logprobs: List[Union[Dict[str, float], None]]
class TextChoices(OpenAIObject):

View file

@ -0,0 +1,64 @@
import json
import os
import sys
from datetime import datetime
from unittest.mock import AsyncMock
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
from litellm.types.utils import TextCompletionResponse
def test_convert_dict_to_text_completion_response():
input_dict = {
"id": "cmpl-ALVLPJgRkqpTomotoOMi3j0cAaL4L",
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": {
"text_offset": [0, 5],
"token_logprobs": [None, -12.203847],
"tokens": ["hello", " crisp"],
"top_logprobs": [None, {",": -2.1568563}],
},
"text": "hello crisp",
}
],
"created": 1729688739,
"model": "davinci-002",
"object": "text_completion",
"system_fingerprint": None,
"usage": {
"completion_tokens": 1,
"prompt_tokens": 1,
"total_tokens": 2,
"completion_tokens_details": None,
"prompt_tokens_details": None,
},
}
response = TextCompletionResponse(**input_dict)
assert response.id == "cmpl-ALVLPJgRkqpTomotoOMi3j0cAaL4L"
assert len(response.choices) == 1
assert response.choices[0].finish_reason == "length"
assert response.choices[0].index == 0
assert response.choices[0].text == "hello crisp"
assert response.created == 1729688739
assert response.model == "davinci-002"
assert response.object == "text_completion"
assert response.system_fingerprint is None
assert response.usage.completion_tokens == 1
assert response.usage.prompt_tokens == 1
assert response.usage.total_tokens == 2
assert response.usage.completion_tokens_details is None
assert response.usage.prompt_tokens_details is None
# Test logprobs
assert response.choices[0].logprobs.text_offset == [0, 5]
assert response.choices[0].logprobs.token_logprobs == [None, -12.203847]
assert response.choices[0].logprobs.tokens == ["hello", " crisp"]
assert response.choices[0].logprobs.top_logprobs == [None, {",": -2.1568563}]

View file

@ -4259,3 +4259,24 @@ def test_completion_fireworks_ai_multiple_choices():
print(response.choices)
assert len(response.choices) == 4
@pytest.mark.parametrize("stream", [True, False])
def test_text_completion_with_echo(stream):
litellm.set_verbose = True
response = litellm.text_completion(
model="davinci-002",
prompt="hello",
max_tokens=1, # only see the first token
stop="\n", # stop at the first newline
logprobs=1, # return log prob
echo=True, # if True, return the prompt as well
stream=stream,
)
print(response)
if stream:
for chunk in response:
print(chunk)
else:
assert isinstance(response, TextCompletionResponse)