mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-07 02:58:21 +00:00
remove unnecessary print
This commit is contained in:
parent
cd9ef3f478
commit
a1dbb844fa
1 changed files with 0 additions and 2 deletions
|
@ -232,8 +232,6 @@ class DocVQAScoringFn(RegisteredBaseScoringFn):
|
||||||
scoring_fn_identifier: Optional[str] = "docvqa",
|
scoring_fn_identifier: Optional[str] = "docvqa",
|
||||||
scoring_params: Optional[ScoringFnParams] = None,
|
scoring_params: Optional[ScoringFnParams] = None,
|
||||||
) -> ScoringResultRow:
|
) -> ScoringResultRow:
|
||||||
print("SCORE_ROW ======")
|
|
||||||
print(input_row)
|
|
||||||
expected_answers = json.loads(input_row["expected_answer"])
|
expected_answers = json.loads(input_row["expected_answer"])
|
||||||
generated_answer = input_row["generated_answer"]
|
generated_answer = input_row["generated_answer"]
|
||||||
score = 1.0 if normalize_answer(generated_answer) in [normalize_answer(s) for s in expected_answers] else 0.0
|
score = 1.0 if normalize_answer(generated_answer) in [normalize_answer(s) for s in expected_answers] else 0.0
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue