chore: use openai_chat_completion for llm as a judge scoring (#3635)

# What does this PR do?

update llm as a judge to use openai_chat_completion, instead of
deprecated chat_completion


## Test Plan

ci
This commit is contained in:
Matthew Farrellee 2025-10-01 09:44:31 -04:00 committed by GitHub
parent ca47d90926
commit ea15f2a270
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -6,7 +6,7 @@
import re import re
from typing import Any from typing import Any
from llama_stack.apis.inference import Inference, UserMessage from llama_stack.apis.inference import Inference
from llama_stack.apis.scoring import ScoringResultRow from llama_stack.apis.scoring import ScoringResultRow
from llama_stack.apis.scoring_functions import ScoringFnParams from llama_stack.apis.scoring_functions import ScoringFnParams
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn
@ -55,15 +55,16 @@ class LlmAsJudgeScoringFn(RegisteredBaseScoringFn):
generated_answer=generated_answer, generated_answer=generated_answer,
) )
judge_response = await self.inference_api.chat_completion( judge_response = await self.inference_api.openai_chat_completion(
model_id=fn_def.params.judge_model, model=fn_def.params.judge_model,
messages=[ messages=[
UserMessage( {
content=judge_input_msg, "role": "user",
), "content": judge_input_msg,
}
], ],
) )
content = judge_response.completion_message.content content = judge_response.choices[0].message.content
rating_regexes = fn_def.params.judge_score_regexes rating_regexes = fn_def.params.judge_score_regexes
judge_rating = None judge_rating = None