test full generation + eval

This commit is contained in:
Xi Yan 2024-10-25 16:52:59 -07:00
parent 16620a8185
commit 247a53d393
5 changed files with 15 additions and 3 deletions

View file

@ -97,7 +97,6 @@ class InferenceRouter(Inference):
logprobs=logprobs,
)
provider = self.routing_table.get_provider_impl(model)
if stream:
return (chunk async for chunk in await provider.chat_completion(**params))
else:

View file

@ -18,6 +18,7 @@ from .config import MetaReferenceEvalConfig
class ColumnName(Enum):
input_query = "input_query"
expected_answer = "expected_answer"
chat_completion_input = "chat_completion_input"
completion_input = "completion_input"
@ -53,10 +54,12 @@ class MetaReferenceEvalImpl(Eval):
expected_schemas = [
{
ColumnName.input_query.value: StringType(),
ColumnName.expected_answer.value: StringType(),
ColumnName.chat_completion_input.value: ChatCompletionInputType(),
},
{
ColumnName.input_query.value: StringType(),
ColumnName.expected_answer.value: StringType(),
ColumnName.completion_input.value: CompletionInputType(),
},

View file

@ -70,6 +70,7 @@ async def register_dataset(
if for_generation:
dataset_schema = {
"expected_answer": StringType(),
"input_query": StringType(),
"chat_completion_input": ChatCompletionInputType(),
}
else:

View file

@ -16,3 +16,7 @@ providers:
provider_type: remote::tgi
config:
url: http://127.0.0.1:5009
- provider_id: test-tgi-2
provider_type: remote::tgi
config:
url: http://127.0.0.1:5010

View file

@ -65,7 +65,10 @@ async def test_eval(eval_settings):
model="Llama3.2-1B-Instruct",
sampling_params=SamplingParams(),
),
scoring_functions=["subset_of"],
scoring_functions=[
"meta-reference::subset_of",
"meta-reference::llm_as_judge_8b_correctness",
],
)
assert response.job_id == "0"
job_status = await eval_impl.job_status(response.job_id)
@ -74,6 +77,8 @@ async def test_eval(eval_settings):
eval_response = await eval_impl.job_result(response.job_id)
print(eval_response)
assert eval_response is not None
assert len(eval_response.generations) == 5
assert "subset_of" in eval_response.scores
assert "meta-reference::subset_of" in eval_response.scores
assert "meta-reference::llm_as_judge_8b_correctness" in eval_response.scores