mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
bump pyproject version
This commit is contained in:
parent
6430bdb636
commit
b45bb78bee
5 changed files with 86 additions and 81 deletions
|
@ -214,6 +214,31 @@ def test_completion_cohere_stream():
|
|||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
def test_completion_claude_stream():
|
||||
try:
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "how does a court case get to the Supreme Court?",
|
||||
},
|
||||
]
|
||||
response = completion(
|
||||
model="claude-instant-1", messages=messages, stream=True, max_tokens=50
|
||||
)
|
||||
complete_response = ""
|
||||
# Add any assertions here to check the response
|
||||
for idx, chunk in enumerate(response):
|
||||
chunk, finished = streaming_format_tests(idx, chunk)
|
||||
if finished:
|
||||
break
|
||||
complete_response += chunk
|
||||
if complete_response.strip() == "":
|
||||
raise Exception("Empty response received")
|
||||
print(f"completion_response: {complete_response}")
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
# test_completion_claude_stream()
|
||||
|
||||
def test_completion_bedrock_ai21_stream():
|
||||
try:
|
||||
|
@ -327,28 +352,6 @@ def test_together_ai_completion_call_starcoder():
|
|||
print(f"error occurred: {traceback.format_exc()}")
|
||||
pass
|
||||
|
||||
def test_completion_nlp_cloud_streaming():
|
||||
try:
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "how does a court case get to the Supreme Court?",
|
||||
},
|
||||
]
|
||||
response = completion(model="dolphin", messages=messages, stream=True, logger_fn=logger_fn)
|
||||
complete_response = ""
|
||||
# Add any assertions here to check the response
|
||||
for idx, chunk in enumerate(response):
|
||||
chunk, finished = streaming_format_tests(idx, chunk)
|
||||
if finished:
|
||||
break
|
||||
complete_response += chunk
|
||||
if complete_response == "":
|
||||
raise Exception("Empty response received")
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
#### Test Function calling + streaming ####
|
||||
|
||||
def test_completion_openai_with_functions():
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue