From c1987d6143f22574ce83ee134ec282fcb9589715 Mon Sep 17 00:00:00 2001 From: Yuan Tang Date: Thu, 2 Jan 2025 11:04:07 -0600 Subject: [PATCH] Fix failing flake8 E226 check (#701) This fixes the pre-commit check when running locally (not sure why this was not caught on CI check): ``` > pre-commit run --show-diff-on-failure --color=always --all-files trim trailing whitespace.................................................Passed check python ast.........................................................Passed check for merge conflicts................................................Passed check for added large files..............................................Passed fix end of files.........................................................Passed Insert license in comments...............................................Passed flake8...................................................................Failed - hook id: flake8 - exit code: 1 llama_stack/distribution/ui/page/evaluations/app_eval.py:132:65: E226 missing whitespace around arithmetic operator llama_stack/distribution/ui/page/evaluations/native_eval.py:235:61: E226 missing whitespace around arithmetic operator llama_stack/providers/utils/telemetry/trace_protocol.py:56:78: E226 missing whitespace around arithmetic operator ``` Signed-off-by: Yuan Tang --- llama_stack/distribution/ui/page/evaluations/app_eval.py | 2 +- llama_stack/distribution/ui/page/evaluations/native_eval.py | 2 +- llama_stack/providers/utils/telemetry/trace_protocol.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/llama_stack/distribution/ui/page/evaluations/app_eval.py b/llama_stack/distribution/ui/page/evaluations/app_eval.py index 5ec47ed45..a9dd50a04 100644 --- a/llama_stack/distribution/ui/page/evaluations/app_eval.py +++ b/llama_stack/distribution/ui/page/evaluations/app_eval.py @@ -129,7 +129,7 @@ def application_evaluation_page(): # Display current row results using separate containers progress_text_container.write( - f"Expand to see current processed result ({i+1}/{len(rows)})" + f"Expand to see current processed result ({i + 1} / {len(rows)})" ) results_container.json( score_res.to_json(), diff --git a/llama_stack/distribution/ui/page/evaluations/native_eval.py b/llama_stack/distribution/ui/page/evaluations/native_eval.py index b8cc8bfa6..2cbc8d63e 100644 --- a/llama_stack/distribution/ui/page/evaluations/native_eval.py +++ b/llama_stack/distribution/ui/page/evaluations/native_eval.py @@ -232,7 +232,7 @@ def run_evaluation_3(): output_res[scoring_fn].append(eval_res.scores[scoring_fn].score_rows[0]) progress_text_container.write( - f"Expand to see current processed result ({i+1}/{len(rows)})" + f"Expand to see current processed result ({i + 1} / {len(rows)})" ) results_container.json(eval_res, expanded=2) diff --git a/llama_stack/providers/utils/telemetry/trace_protocol.py b/llama_stack/providers/utils/telemetry/trace_protocol.py index 31897c0ae..38a56fdac 100644 --- a/llama_stack/providers/utils/telemetry/trace_protocol.py +++ b/llama_stack/providers/utils/telemetry/trace_protocol.py @@ -53,7 +53,7 @@ def trace_protocol(cls: Type[T]) -> Type[T]: combined_args = {} for i, arg in enumerate(args): param_name = ( - param_names[i] if i < len(param_names) else f"position_{i+1}" + param_names[i] if i < len(param_names) else f"position_{i + 1}" ) combined_args[param_name] = serialize_value(arg) for k, v in kwargs.items():