mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
test: cleanup testing
This commit is contained in:
parent
a48445c11c
commit
a3d280baa3
3 changed files with 79 additions and 83 deletions
|
@ -265,17 +265,15 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
data: dict, headers: dict,
|
data: dict, headers: dict,
|
||||||
model_response: ModelResponse):
|
model_response: ModelResponse):
|
||||||
kwargs = locals()
|
kwargs = locals()
|
||||||
if self._aclient_session is None:
|
|
||||||
self._aclient_session = self.create_aclient_session()
|
|
||||||
client = self._aclient_session
|
|
||||||
try:
|
try:
|
||||||
response = await client.post(api_base, json=data, headers=headers, timeout=litellm.request_timeout)
|
async with httpx.AsyncClient() as client:
|
||||||
response_json = response.json()
|
response = await client.post(api_base, json=data, headers=headers, timeout=litellm.request_timeout)
|
||||||
if response.status_code != 200:
|
response_json = response.json()
|
||||||
raise OpenAIError(status_code=response.status_code, message=response.text, request=response.request, response=response)
|
if response.status_code != 200:
|
||||||
|
raise OpenAIError(status_code=response.status_code, message=response.text, request=response.request, response=response)
|
||||||
## RESPONSE OBJECT
|
|
||||||
return convert_to_model_response_object(response_object=response_json, model_response_object=model_response)
|
## RESPONSE OBJECT
|
||||||
|
return convert_to_model_response_object(response_object=response_json, model_response_object=model_response)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if isinstance(e, httpx.TimeoutException):
|
if isinstance(e, httpx.TimeoutException):
|
||||||
raise OpenAIError(status_code=500, message="Request Timeout Error")
|
raise OpenAIError(status_code=500, message="Request Timeout Error")
|
||||||
|
@ -292,9 +290,7 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
model_response: ModelResponse,
|
model_response: ModelResponse,
|
||||||
model: str
|
model: str
|
||||||
):
|
):
|
||||||
if self._client_session is None:
|
with httpx.stream(
|
||||||
self._client_session = self.create_client_session()
|
|
||||||
with self._client_session.stream(
|
|
||||||
url=f"{api_base}", # type: ignore
|
url=f"{api_base}", # type: ignore
|
||||||
json=data,
|
json=data,
|
||||||
headers=headers,
|
headers=headers,
|
||||||
|
@ -316,9 +312,8 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
headers: dict,
|
headers: dict,
|
||||||
model_response: ModelResponse,
|
model_response: ModelResponse,
|
||||||
model: str):
|
model: str):
|
||||||
if self._aclient_session is None:
|
client = httpx.AsyncClient()
|
||||||
self._aclient_session = self.create_aclient_session()
|
async with client.stream(
|
||||||
async with self._aclient_session.stream(
|
|
||||||
url=f"{api_base}",
|
url=f"{api_base}",
|
||||||
json=data,
|
json=data,
|
||||||
headers=headers,
|
headers=headers,
|
||||||
|
@ -361,7 +356,7 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
additional_args={"complete_input_dict": data},
|
additional_args={"complete_input_dict": data},
|
||||||
)
|
)
|
||||||
## COMPLETION CALL
|
## COMPLETION CALL
|
||||||
response = self._client_session.post(
|
response = httpx.post(
|
||||||
api_base, headers=headers, json=data, timeout=litellm.request_timeout
|
api_base, headers=headers, json=data, timeout=litellm.request_timeout
|
||||||
)
|
)
|
||||||
## LOGGING
|
## LOGGING
|
||||||
|
|
|
@ -9,9 +9,8 @@ sys.path.insert(
|
||||||
0, os.path.abspath("../..")
|
0, os.path.abspath("../..")
|
||||||
) # Adds the parent directory to the system path
|
) # Adds the parent directory to the system path
|
||||||
import pytest
|
import pytest
|
||||||
from openai import Timeout
|
|
||||||
import litellm
|
import litellm
|
||||||
from litellm import embedding, completion, completion_cost
|
from litellm import embedding, completion, completion_cost, Timeout
|
||||||
from litellm import RateLimitError
|
from litellm import RateLimitError
|
||||||
litellm.num_retries = 3
|
litellm.num_retries = 3
|
||||||
litellm.cache = None
|
litellm.cache = None
|
||||||
|
@ -419,7 +418,7 @@ def test_completion_openai():
|
||||||
pass
|
pass
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
test_completion_openai()
|
# test_completion_openai()
|
||||||
|
|
||||||
def test_completion_text_openai():
|
def test_completion_text_openai():
|
||||||
try:
|
try:
|
||||||
|
@ -442,11 +441,13 @@ def test_completion_openai_with_optional_params():
|
||||||
)
|
)
|
||||||
# Add any assertions here to check the response
|
# Add any assertions here to check the response
|
||||||
print(response)
|
print(response)
|
||||||
except Timeout as e:
|
except litellm.Timeout as e:
|
||||||
pass
|
pass
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
|
test_completion_openai_with_optional_params()
|
||||||
|
|
||||||
def test_completion_openai_litellm_key():
|
def test_completion_openai_litellm_key():
|
||||||
try:
|
try:
|
||||||
litellm.api_key = os.environ['OPENAI_API_KEY']
|
litellm.api_key = os.environ['OPENAI_API_KEY']
|
||||||
|
@ -606,7 +607,7 @@ def test_completion_openai_azure_with_functions():
|
||||||
print(response)
|
print(response)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
test_completion_openai_azure_with_functions()
|
# test_completion_openai_azure_with_functions()
|
||||||
|
|
||||||
|
|
||||||
def test_completion_azure():
|
def test_completion_azure():
|
||||||
|
|
|
@ -1,76 +1,76 @@
|
||||||
#### What this tests ####
|
# #### What this tests ####
|
||||||
# This tests if logging to the llmonitor integration actually works
|
# # This tests if logging to the llmonitor integration actually works
|
||||||
# Adds the parent directory to the system path
|
# # Adds the parent directory to the system path
|
||||||
import sys
|
# import sys
|
||||||
import os
|
# import os
|
||||||
|
|
||||||
sys.path.insert(0, os.path.abspath("../.."))
|
# sys.path.insert(0, os.path.abspath("../.."))
|
||||||
|
|
||||||
from litellm import completion, embedding
|
# from litellm import completion, embedding
|
||||||
import litellm
|
# import litellm
|
||||||
|
|
||||||
litellm.success_callback = ["llmonitor"]
|
# litellm.success_callback = ["llmonitor"]
|
||||||
litellm.failure_callback = ["llmonitor"]
|
# litellm.failure_callback = ["llmonitor"]
|
||||||
|
|
||||||
litellm.set_verbose = True
|
# litellm.set_verbose = True
|
||||||
|
|
||||||
|
|
||||||
def test_chat_openai():
|
# def test_chat_openai():
|
||||||
try:
|
# try:
|
||||||
response = completion(
|
# response = completion(
|
||||||
model="gpt-3.5-turbo",
|
# model="gpt-3.5-turbo",
|
||||||
messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}],
|
# messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}],
|
||||||
user="ishaan_from_litellm"
|
# user="ishaan_from_litellm"
|
||||||
)
|
# )
|
||||||
|
|
||||||
print(response)
|
# print(response)
|
||||||
|
|
||||||
except Exception as e:
|
# except Exception as e:
|
||||||
print(e)
|
# print(e)
|
||||||
|
|
||||||
|
|
||||||
def test_embedding_openai():
|
# def test_embedding_openai():
|
||||||
try:
|
# try:
|
||||||
response = embedding(model="text-embedding-ada-002", input=["test"])
|
# response = embedding(model="text-embedding-ada-002", input=["test"])
|
||||||
# Add any assertions here to check the response
|
# # Add any assertions here to check the response
|
||||||
print(f"response: {str(response)[:50]}")
|
# print(f"response: {str(response)[:50]}")
|
||||||
except Exception as e:
|
# except Exception as e:
|
||||||
print(e)
|
# print(e)
|
||||||
|
|
||||||
|
|
||||||
test_chat_openai()
|
# test_chat_openai()
|
||||||
# test_embedding_openai()
|
# # test_embedding_openai()
|
||||||
|
|
||||||
|
|
||||||
def test_llmonitor_logging_function_calling():
|
# def test_llmonitor_logging_function_calling():
|
||||||
function1 = [
|
# function1 = [
|
||||||
{
|
# {
|
||||||
"name": "get_current_weather",
|
# "name": "get_current_weather",
|
||||||
"description": "Get the current weather in a given location",
|
# "description": "Get the current weather in a given location",
|
||||||
"parameters": {
|
# "parameters": {
|
||||||
"type": "object",
|
# "type": "object",
|
||||||
"properties": {
|
# "properties": {
|
||||||
"location": {
|
# "location": {
|
||||||
"type": "string",
|
# "type": "string",
|
||||||
"description": "The city and state, e.g. San Francisco, CA",
|
# "description": "The city and state, e.g. San Francisco, CA",
|
||||||
},
|
# },
|
||||||
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
|
# "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
|
||||||
},
|
# },
|
||||||
"required": ["location"],
|
# "required": ["location"],
|
||||||
},
|
# },
|
||||||
}
|
# }
|
||||||
]
|
# ]
|
||||||
try:
|
# try:
|
||||||
response = completion(model="gpt-3.5-turbo",
|
# response = completion(model="gpt-3.5-turbo",
|
||||||
messages=[{
|
# messages=[{
|
||||||
"role": "user",
|
# "role": "user",
|
||||||
"content": "what's the weather in boston"
|
# "content": "what's the weather in boston"
|
||||||
}],
|
# }],
|
||||||
temperature=0.1,
|
# temperature=0.1,
|
||||||
functions=function1,
|
# functions=function1,
|
||||||
)
|
# )
|
||||||
print(response)
|
# print(response)
|
||||||
except Exception as e:
|
# except Exception as e:
|
||||||
print(e)
|
# print(e)
|
||||||
|
|
||||||
# test_llmonitor_logging_function_calling()
|
# # test_llmonitor_logging_function_calling()
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue