Litellm ruff linting enforcement (#5992)

* ci(config.yml): add a 'check_code_quality' step

Addresses https://github.com/BerriAI/litellm/issues/5991

* ci(config.yml): check why circle ci doesn't pick up this test

* ci(config.yml): fix to run 'check_code_quality' tests

* fix(__init__.py): fix unprotected import

* fix(__init__.py): don't remove unused imports

* build(ruff.toml): update ruff.toml to ignore unused imports

* fix: fix: ruff + pyright - fix linting + type-checking errors

* fix: fix linting errors

* fix(lago.py): fix module init error

* fix: fix linting errors

* ci(config.yml): cd into correct dir for checks

* fix(proxy_server.py): fix linting error

* fix(utils.py): fix bare except

causes ruff linting errors

* fix: ruff - fix remaining linting errors

* fix(clickhouse.py): use standard logging object

* fix(__init__.py): fix unprotected import

* fix: ruff - fix linting errors

* fix: fix linting errors

* ci(config.yml): cleanup code qa step (formatting handled in local_testing)

* fix(_health_endpoints.py): fix ruff linting errors

* ci(config.yml): just use ruff in check_code_quality pipeline for now

* build(custom_guardrail.py): include missing file

* style(embedding_handler.py): fix ruff check
This commit is contained in:
Krish Dholakia 2024-10-01 16:44:20 -07:00 committed by GitHub
parent 4fa8991a90
commit 94a05ca5d0
263 changed files with 1687 additions and 3320 deletions

View file

@ -48,7 +48,7 @@ def run_ollama_serve():
command = ["ollama", "serve"]
with open(os.devnull, "w") as devnull:
process = subprocess.Popen(command, stdout=devnull, stderr=devnull)
subprocess.Popen(command, stdout=devnull, stderr=devnull)
except Exception as e:
print( # noqa
f"""
@ -293,18 +293,18 @@ def run_server(
load_google_kms,
save_worker_config,
)
if version == True:
if version is True:
pkg_version = importlib.metadata.version("litellm") # type: ignore
click.echo(f"\nLiteLLM: Current Version = {pkg_version}\n")
return
if model and "ollama" in model and api_base is None:
run_ollama_serve()
import requests
if test_async is True:
import concurrent
import time
import requests # type: ignore
api_base = f"http://{host}:{port}"
def _make_openai_completion():
@ -328,7 +328,7 @@ def run_server(
print("\n RESPONSE FROM POLLING JOB", polling_response) # noqa
status = polling_response["status"]
if status == "finished":
llm_response = polling_response["result"]
polling_response["result"]
break
print( # noqa
f"POLLING JOB{polling_url}\nSTATUS: {status}, \n Response {polling_response}" # noqa
@ -371,21 +371,20 @@ def run_server(
print(f"Successful Calls: {successful_calls}") # noqa
print(f"Failed Calls: {failed_calls}") # noqa
return
if health != False:
import requests
if health is not False:
print("\nLiteLLM: Health Testing models in config") # noqa
response = requests.get(url=f"http://{host}:{port}/health")
print(json.dumps(response.json(), indent=4)) # noqa
return
if test != False:
if test is not False:
request_model = model or "gpt-3.5-turbo"
click.echo(
f"\nLiteLLM: Making a test ChatCompletions request to your proxy. Model={request_model}"
)
import openai
if test == True: # flag value set
if test is True: # flag value set
api_base = f"http://{host}:{port}"
else:
api_base = test
@ -455,7 +454,7 @@ def run_server(
pass
else:
import gunicorn.app.base
except:
except Exception:
raise ImportError(
"uvicorn, gunicorn needs to be imported. Run - `pip install 'litellm[proxy]'`"
)
@ -509,7 +508,7 @@ def run_server(
import asyncio
import yaml # type: ignore
except:
except Exception:
raise ImportError(
"yaml needs to be imported. Run - `pip install 'litellm[proxy]'`"
)
@ -522,7 +521,7 @@ def run_server(
if (
litellm_settings is not None
and "json_logs" in litellm_settings
and litellm_settings["json_logs"] == True
and litellm_settings["json_logs"] is True
):
import litellm
@ -673,9 +672,8 @@ def run_server(
port = random.randint(1024, 49152)
import litellm
from litellm.proxy.proxy_server import app
if run_gunicorn == False and run_hypercorn == False:
if run_gunicorn is False and run_hypercorn is False:
if ssl_certfile_path is not None and ssl_keyfile_path is not None:
print( # noqa
f"\033[1;32mLiteLLM Proxy: Using SSL with certfile: {ssl_certfile_path} and keyfile: {ssl_keyfile_path}\033[0m\n" # noqa
@ -694,9 +692,7 @@ def run_server(
) # run uvicorn w/ json
else:
uvicorn.run(app, host=host, port=port) # run uvicorn
elif run_gunicorn == True:
import gunicorn.app.base
elif run_gunicorn is True:
# Gunicorn Application Class
class StandaloneApplication(gunicorn.app.base.BaseApplication):
def __init__(self, app, options=None):
@ -725,7 +721,7 @@ def run_server(
)
print() # noqa
print( # noqa
f'\033[1;34mLiteLLM: Test your local proxy with: "litellm --test" This runs an openai.ChatCompletion request to your proxy [In a new terminal tab]\033[0m\n'
'\033[1;34mLiteLLM: Test your local proxy with: "litellm --test" This runs an openai.ChatCompletion request to your proxy [In a new terminal tab]\033[0m\n'
)
print( # noqa
f"\033[1;34mLiteLLM: Curl Command Test for your local proxy\n {curl_command} \033[0m\n"
@ -778,7 +774,7 @@ def run_server(
StandaloneApplication(
app=app, options=gunicorn_options
).run() # Run gunicorn
elif run_hypercorn == True:
elif run_hypercorn is True:
import asyncio
from hypercorn.asyncio import serve