fix linting errors

This commit is contained in:
ishaan-jaff 2023-08-22 16:06:41 -07:00
parent 91901aa986
commit 6451183272
5 changed files with 23 additions and 10 deletions

View file

@ -6,7 +6,7 @@ input_callback: List[str] = []
success_callback: List[str] = [] success_callback: List[str] = []
failure_callback: List[str] = [] failure_callback: List[str] = []
set_verbose = False set_verbose = False
email = None # for hosted dashboard. Learn more - https://docs.litellm.ai/docs/debugging/hosted_debugging email: Optional[str] = None # for hosted dashboard. Learn more - https://docs.litellm.ai/docs/debugging/hosted_debugging
telemetry = True telemetry = True
max_tokens = 256 # OpenAI Defaults max_tokens = 256 # OpenAI Defaults
retry = True retry = True

View file

@ -1,2 +1,3 @@
import importlib_metadata import importlib_metadata
version = importlib_metadata.version('litellm')
version = importlib_metadata.version("litellm")

View file

@ -5,7 +5,9 @@ import sys, os
import traceback import traceback
import pytest import pytest
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import litellm import litellm
from litellm import embedding, completion from litellm import embedding, completion
@ -16,7 +18,12 @@ messages = [{ "content": user_message,"role": "user"}]
# openai call # openai call
response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}]) response = completion(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}]
)
# bad request call # bad request call
response = completion(model="chatgpt-test", messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}]) response = completion(
model="chatgpt-test",
messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}],
)

View file

@ -1022,6 +1022,7 @@ def handle_success(args, kwargs, result, start_time, end_time):
) )
pass pass
def get_model_list(): def get_model_list():
global last_fetched_at global last_fetched_at
# if user is using hosted product -> get their updated model list - refresh every 5 minutes # if user is using hosted product -> get their updated model list - refresh every 5 minutes
@ -1035,7 +1036,11 @@ def get_model_list():
# make the api call # make the api call
last_fetched_at = time.time() last_fetched_at = time.time()
print(f"last_fetched_at: {last_fetched_at}") print(f"last_fetched_at: {last_fetched_at}")
response = requests.post(url="http://api.litellm.ai/get_model_list", headers={"content-type": "application/json"}, data=json.dumps({"user_email": user_email})) response = requests.post(
url="http://api.litellm.ai/get_model_list",
headers={"content-type": "application/json"},
data=json.dumps({"user_email": user_email}),
)
print_verbose(f"get_model_list response: {response.text}") print_verbose(f"get_model_list response: {response.text}")
data = response.json() data = response.json()
# update model list # update model list