fix evals and scoring

This commit is contained in:
Dinesh Yeduguru 2024-11-12 18:14:58 -08:00
parent 55d66ca918
commit 606df220f5
3 changed files with 2 additions and 3 deletions

View file

@ -150,7 +150,7 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate):
messages.append(candidate.system_message) messages.append(candidate.system_message)
messages += input_messages messages += input_messages
response = await self.inference_api.chat_completion( response = await self.inference_api.chat_completion(
model=candidate.model, model_id=candidate.model,
messages=messages, messages=messages,
sampling_params=candidate.sampling_params, sampling_params=candidate.sampling_params,
) )

View file

@ -62,7 +62,7 @@ class LlmAsJudgeScoringFn(BaseScoringFn):
) )
judge_response = await self.inference_api.chat_completion( judge_response = await self.inference_api.chat_completion(
model=fn_def.params.judge_model, model_id=fn_def.params.judge_model,
messages=[ messages=[
{ {
"role": "user", "role": "user",

View file

@ -85,7 +85,6 @@ class VLLMInferenceAdapter(Inference, ModelRegistryHelper, ModelsProtocolPrivate
logprobs: Optional[LogProbConfig] = None, logprobs: Optional[LogProbConfig] = None,
) -> AsyncGenerator: ) -> AsyncGenerator:
model = await self.model_store.get_model(model_id) model = await self.model_store.get_model(model_id)
print(f"model={model}")
request = ChatCompletionRequest( request = ChatCompletionRequest(
model=model.provider_resource_id, model=model.provider_resource_id,
messages=messages, messages=messages,