mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Litellm ruff linting enforcement (#5992)
* ci(config.yml): add a 'check_code_quality' step Addresses https://github.com/BerriAI/litellm/issues/5991 * ci(config.yml): check why circle ci doesn't pick up this test * ci(config.yml): fix to run 'check_code_quality' tests * fix(__init__.py): fix unprotected import * fix(__init__.py): don't remove unused imports * build(ruff.toml): update ruff.toml to ignore unused imports * fix: fix: ruff + pyright - fix linting + type-checking errors * fix: fix linting errors * fix(lago.py): fix module init error * fix: fix linting errors * ci(config.yml): cd into correct dir for checks * fix(proxy_server.py): fix linting error * fix(utils.py): fix bare except causes ruff linting errors * fix: ruff - fix remaining linting errors * fix(clickhouse.py): use standard logging object * fix(__init__.py): fix unprotected import * fix: ruff - fix linting errors * fix: fix linting errors * ci(config.yml): cleanup code qa step (formatting handled in local_testing) * fix(_health_endpoints.py): fix ruff linting errors * ci(config.yml): just use ruff in check_code_quality pipeline for now * build(custom_guardrail.py): include missing file * style(embedding_handler.py): fix ruff check
This commit is contained in:
parent
3fc4ae0d65
commit
d57be47b0f
263 changed files with 1687 additions and 3320 deletions
|
@ -297,7 +297,7 @@ def handle_prediction_response_streaming(prediction_url, api_token, print_verbos
|
|||
if "output" in response_data:
|
||||
try:
|
||||
output_string = "".join(response_data["output"])
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise ReplicateError(
|
||||
status_code=422,
|
||||
message="Unable to parse response. Got={}".format(
|
||||
|
@ -344,7 +344,7 @@ async def async_handle_prediction_response_streaming(
|
|||
if "output" in response_data:
|
||||
try:
|
||||
output_string = "".join(response_data["output"])
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise ReplicateError(
|
||||
status_code=422,
|
||||
message="Unable to parse response. Got={}".format(
|
||||
|
@ -479,7 +479,7 @@ def completion(
|
|||
else:
|
||||
input_data = {"prompt": prompt, **optional_params}
|
||||
|
||||
if acompletion is not None and acompletion == True:
|
||||
if acompletion is not None and acompletion is True:
|
||||
return async_completion(
|
||||
model_response=model_response,
|
||||
model=model,
|
||||
|
@ -513,7 +513,7 @@ def completion(
|
|||
print_verbose(prediction_url)
|
||||
|
||||
# Handle the prediction response (streaming or non-streaming)
|
||||
if "stream" in optional_params and optional_params["stream"] == True:
|
||||
if "stream" in optional_params and optional_params["stream"] is True:
|
||||
print_verbose("streaming request")
|
||||
_response = handle_prediction_response_streaming(
|
||||
prediction_url, api_key, print_verbose
|
||||
|
@ -571,7 +571,7 @@ async def async_completion(
|
|||
http_handler=http_handler,
|
||||
)
|
||||
|
||||
if "stream" in optional_params and optional_params["stream"] == True:
|
||||
if "stream" in optional_params and optional_params["stream"] is True:
|
||||
_response = async_handle_prediction_response_streaming(
|
||||
prediction_url, api_key, print_verbose
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue