This commit is contained in:
Krrish Dholakia 2023-08-30 09:55:45 -07:00
parent b90e5ed6db
commit f3d8445ff1
4 changed files with 38 additions and 33 deletions

View file

@ -72,17 +72,16 @@ def test_completion_claude_stream():
pytest.fail(f"Error occurred: {e}")
def test_completion_hf_api():
try:
user_message = "write some code to find the sum of two numbers"
messages = [{ "content": user_message,"role": "user"}]
response = completion(model="stabilityai/stablecode-completion-alpha-3b-4k", messages=messages, custom_llm_provider="huggingface", logger_fn=logger_fn)
# Add any assertions here to check the response
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# def test_completion_hf_api():
# try:
# user_message = "write some code to find the sum of two numbers"
# messages = [{ "content": user_message,"role": "user"}]
# response = completion(model="stabilityai/stablecode-completion-alpha-3b-4k", messages=messages, custom_llm_provider="huggingface", logger_fn=logger_fn)
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
test_completion_hf_api()
# def test_completion_hf_deployed_api():
# try:
# user_message = "There's a llama in my garden 😱 What should I do?"
@ -94,25 +93,25 @@ test_completion_hf_api()
# pytest.fail(f"Error occurred: {e}")
def test_completion_cohere():
try:
response = completion(
model="command-nightly",
messages=messages,
max_tokens=100,
logit_bias={40: 10},
)
# Add any assertions here to check the response
print(response)
response_str = response["choices"][0]["message"]["content"]
print(f"str response{response_str}")
response_str_2 = response.choices[0].message.content
if type(response_str) != str:
pytest.fail(f"Error occurred: {e}")
if type(response_str_2) != str:
pytest.fail(f"Error occurred: {e}")
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# def test_completion_cohere(): # commenting for now as the cohere endpoint is being flaky
# try:
# response = completion(
# model="command-nightly",
# messages=messages,
# max_tokens=100,
# logit_bias={40: 10},
# )
# # Add any assertions here to check the response
# print(response)
# response_str = response["choices"][0]["message"]["content"]
# print(f"str response{response_str}")
# response_str_2 = response.choices[0].message.content
# if type(response_str) != str:
# pytest.fail(f"Error occurred: {e}")
# if type(response_str_2) != str:
# pytest.fail(f"Error occurred: {e}")
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
def test_completion_cohere_stream():

View file

@ -11,20 +11,26 @@ from litellm import embedding, completion
litellm.set_verbose = True
# Test 1: On completion call - without setting client to true -> ensure no logs are created
response = completion(model="claude-instant-1", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
# print(f"response: {response}")
litellm.use_client = True
user_message = "Hello, how are you?"
messages = [{ "content": user_message,"role": "user"}]
# Test 1: On completion call
# Test 2: On completion call
response = completion(model="claude-instant-1", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
# print(f"response: {response}")
# # Test 2: On embedding call
# Test 3: On embedding call
response = embedding(model="text-embedding-ada-002", input=["sample text"])
# print(f"response: {response}")
# # Test 3: On streaming completion call
# Test 4: On streaming completion call
response = completion(model="replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}], stream=True)
print(f"response: {response}")

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "litellm"
version = "0.1.506"
version = "0.1.507"
description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"]
license = "MIT License"