fix testing

This commit is contained in:
Krrish Dholakia 2023-09-27 21:31:00 -07:00
parent 94eba84cf0
commit 3658bdb61b
3 changed files with 4 additions and 9 deletions

View file

@ -1432,7 +1432,7 @@ def text_completion(*args, **kwargs):
kwargs["messages"] = messages
kwargs.pop("prompt")
response = completion(*args, **kwargs) # assume the response is the openai response object
response_2 = {
formatted_response_obj = {
"id": response["id"],
"object": "text_completion",
"created": response["created"],
@ -1447,7 +1447,7 @@ def text_completion(*args, **kwargs):
],
"usage": response["usage"]
}
return response_2
return formatted_response_obj
else:
raise ValueError("please pass prompt into the `text_completion` endpoint - `text_completion(model, prompt='hello world')`")

View file

@ -346,12 +346,7 @@ def test_completion_openai_prompt():
response = text_completion(
model="gpt-3.5-turbo", prompt="What's the weather in SF?"
)
response_str = response["choices"][0]["message"]["content"]
response_str_2 = response.choices[0].message.content
print(response)
assert response_str == response_str_2
assert type(response_str) == str
assert len(response_str) > 1
response_str = response["choices"][0]["text"]
except Exception as e:
pytest.fail(f"Error occurred: {e}")

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "litellm"
version = "0.1.791"
version = "0.1.792"
description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"]
license = "MIT License"