mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
(test) fix
This commit is contained in:
parent
1ca50c5fa2
commit
13731f4bd8
1 changed files with 2 additions and 2 deletions
|
@ -79,7 +79,7 @@ def test_chat_completion(client):
|
||||||
proxy_server_request_object = litellm_params.get("proxy_server_request")
|
proxy_server_request_object = litellm_params.get("proxy_server_request")
|
||||||
|
|
||||||
assert config_model_info == {'mode': 'chat', 'input_cost_per_token': 0.0002}
|
assert config_model_info == {'mode': 'chat', 'input_cost_per_token': 0.0002}
|
||||||
assert proxy_server_request_object == {'url': 'http://testserver/chat/completions', 'method': 'POST', 'headers': {'host': 'testserver', 'accept': '*/*', 'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive', 'user-agent': 'testclient', 'authorization': 'Bearer None', 'content-length': '105', 'content-type': 'application/json'}, 'body': {'model': 'Azure OpenAI GPT-4 Canada', 'messages': [{'role': 'user', 'content': 'hi'}], 'max_tokens': 10}}
|
assert proxy_server_request_object == {'url': 'http://testserver/chat/completions', 'method': 'POST', 'headers': {'host': 'testserver', 'accept': '*/*', 'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive', 'user-agent': 'testclient', 'authorization': 'Bearer sk-1234', 'content-length': '105', 'content-type': 'application/json'}, 'body': {'model': 'Azure OpenAI GPT-4 Canada', 'messages': [{'role': 'user', 'content': 'hi'}], 'max_tokens': 10}}
|
||||||
result = response.json()
|
result = response.json()
|
||||||
print(f"Received response: {result}")
|
print(f"Received response: {result}")
|
||||||
print("\nPassed /chat/completions with Custom Logger!")
|
print("\nPassed /chat/completions with Custom Logger!")
|
||||||
|
@ -174,7 +174,7 @@ def test_embedding(client):
|
||||||
litellm_params = kwargs.get("litellm_params")
|
litellm_params = kwargs.get("litellm_params")
|
||||||
proxy_server_request = litellm_params.get("proxy_server_request")
|
proxy_server_request = litellm_params.get("proxy_server_request")
|
||||||
model_info = litellm_params.get("model_info")
|
model_info = litellm_params.get("model_info")
|
||||||
assert proxy_server_request == {'url': 'http://testserver/embeddings', 'method': 'POST', 'headers': {'host': 'testserver', 'accept': '*/*', 'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive', 'user-agent': 'testclient', 'authorization': 'Bearer None', 'content-length': '54', 'content-type': 'application/json'}, 'body': {'model': 'azure-embedding-model', 'input': ['hello']}}
|
assert proxy_server_request == {'url': 'http://testserver/embeddings', 'method': 'POST', 'headers': {'host': 'testserver', 'accept': '*/*', 'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive', 'user-agent': 'testclient', 'authorization': 'Bearer sk-1234', 'content-length': '54', 'content-type': 'application/json'}, 'body': {'model': 'azure-embedding-model', 'input': ['hello']}}
|
||||||
assert model_info == {'input_cost_per_token': 0.002, 'mode': 'embedding'}
|
assert model_info == {'input_cost_per_token': 0.002, 'mode': 'embedding'}
|
||||||
result = response.json()
|
result = response.json()
|
||||||
print(f"Received response: {result}")
|
print(f"Received response: {result}")
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue