mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
(test) deepinfra with openai v1.0.0
This commit is contained in:
parent
27cbd7d895
commit
a21ff38694
1 changed files with 8 additions and 7 deletions
|
@ -941,7 +941,7 @@ def test_completion_bedrock_claude():
|
||||||
pass
|
pass
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
test_completion_bedrock_claude()
|
# test_completion_bedrock_claude()
|
||||||
|
|
||||||
def test_completion_bedrock_cohere():
|
def test_completion_bedrock_cohere():
|
||||||
print("calling bedrock cohere")
|
print("calling bedrock cohere")
|
||||||
|
@ -1340,7 +1340,7 @@ def test_completion_ai21():
|
||||||
# test_completion_ai21()
|
# test_completion_ai21()
|
||||||
## test deep infra
|
## test deep infra
|
||||||
def test_completion_deep_infra():
|
def test_completion_deep_infra():
|
||||||
# litellm.set_verbose = True
|
litellm.set_verbose = False
|
||||||
model_name = "deepinfra/meta-llama/Llama-2-70b-chat-hf"
|
model_name = "deepinfra/meta-llama/Llama-2-70b-chat-hf"
|
||||||
try:
|
try:
|
||||||
response = completion(
|
response = completion(
|
||||||
|
@ -1351,10 +1351,11 @@ def test_completion_deep_infra():
|
||||||
)
|
)
|
||||||
# Add any assertions here to check the response
|
# Add any assertions here to check the response
|
||||||
print(response)
|
print(response)
|
||||||
print(response.response_ms)
|
print(response._response_ms)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
# test_completion_deep_infra()
|
test_completion_deep_infra()
|
||||||
|
|
||||||
def test_completion_deep_infra_mistral():
|
def test_completion_deep_infra_mistral():
|
||||||
print("deep infra test with temp=0")
|
print("deep infra test with temp=0")
|
||||||
model_name = "deepinfra/mistralai/Mistral-7B-Instruct-v0.1"
|
model_name = "deepinfra/mistralai/Mistral-7B-Instruct-v0.1"
|
||||||
|
@ -1362,15 +1363,15 @@ def test_completion_deep_infra_mistral():
|
||||||
response = completion(
|
response = completion(
|
||||||
model=model_name,
|
model=model_name,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
temperature=0, # mistrail fails with temperature 0.001
|
temperature=0.01, # mistrail fails with temperature=0
|
||||||
max_tokens=10
|
max_tokens=10
|
||||||
)
|
)
|
||||||
# Add any assertions here to check the response
|
# Add any assertions here to check the response
|
||||||
print(response)
|
print(response)
|
||||||
print(response.response_ms)
|
print(response._response_ms)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
# test_completion_deep_infra_mistral()
|
test_completion_deep_infra_mistral()
|
||||||
|
|
||||||
# Palm tests
|
# Palm tests
|
||||||
def test_completion_palm():
|
def test_completion_palm():
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue