mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
Add test_engines_model_chat_completions
This commit is contained in:
parent
2084cfd959
commit
4ce4927c0c
1 changed files with 34 additions and 1 deletions
|
@ -160,7 +160,40 @@ def test_chat_completion(mock_acompletion, client_no_auth):
|
||||||
pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}")
|
pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
# Run the test
|
@mock_patch_acompletion()
|
||||||
|
def test_engines_model_chat_completions(mock_acompletion, client_no_auth):
|
||||||
|
global headers
|
||||||
|
try:
|
||||||
|
# Your test data
|
||||||
|
test_data = {
|
||||||
|
"model": "gpt-3.5-turbo",
|
||||||
|
"messages": [
|
||||||
|
{"role": "user", "content": "hi"},
|
||||||
|
],
|
||||||
|
"max_tokens": 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
print("testing proxy server with chat completions")
|
||||||
|
response = client_no_auth.post("/engines/gpt-3.5-turbo/chat/completions", json=test_data)
|
||||||
|
mock_acompletion.assert_called_once_with(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=[
|
||||||
|
{"role": "user", "content": "hi"},
|
||||||
|
],
|
||||||
|
max_tokens=10,
|
||||||
|
litellm_call_id=mock.ANY,
|
||||||
|
litellm_logging_obj=mock.ANY,
|
||||||
|
request_timeout=mock.ANY,
|
||||||
|
specific_deployment=True,
|
||||||
|
metadata=mock.ANY,
|
||||||
|
proxy_server_request=mock.ANY,
|
||||||
|
)
|
||||||
|
print(f"response - {response.text}")
|
||||||
|
assert response.status_code == 200
|
||||||
|
result = response.json()
|
||||||
|
print(f"Received response: {result}")
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
@mock_patch_acompletion()
|
@mock_patch_acompletion()
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue