mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-01 16:24:44 +00:00
test full generation + eval
This commit is contained in:
parent
16620a8185
commit
247a53d393
5 changed files with 15 additions and 3 deletions
|
@ -97,7 +97,6 @@ class InferenceRouter(Inference):
|
||||||
logprobs=logprobs,
|
logprobs=logprobs,
|
||||||
)
|
)
|
||||||
provider = self.routing_table.get_provider_impl(model)
|
provider = self.routing_table.get_provider_impl(model)
|
||||||
|
|
||||||
if stream:
|
if stream:
|
||||||
return (chunk async for chunk in await provider.chat_completion(**params))
|
return (chunk async for chunk in await provider.chat_completion(**params))
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -18,6 +18,7 @@ from .config import MetaReferenceEvalConfig
|
||||||
|
|
||||||
|
|
||||||
class ColumnName(Enum):
|
class ColumnName(Enum):
|
||||||
|
input_query = "input_query"
|
||||||
expected_answer = "expected_answer"
|
expected_answer = "expected_answer"
|
||||||
chat_completion_input = "chat_completion_input"
|
chat_completion_input = "chat_completion_input"
|
||||||
completion_input = "completion_input"
|
completion_input = "completion_input"
|
||||||
|
@ -53,10 +54,12 @@ class MetaReferenceEvalImpl(Eval):
|
||||||
|
|
||||||
expected_schemas = [
|
expected_schemas = [
|
||||||
{
|
{
|
||||||
|
ColumnName.input_query.value: StringType(),
|
||||||
ColumnName.expected_answer.value: StringType(),
|
ColumnName.expected_answer.value: StringType(),
|
||||||
ColumnName.chat_completion_input.value: ChatCompletionInputType(),
|
ColumnName.chat_completion_input.value: ChatCompletionInputType(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
ColumnName.input_query.value: StringType(),
|
||||||
ColumnName.expected_answer.value: StringType(),
|
ColumnName.expected_answer.value: StringType(),
|
||||||
ColumnName.completion_input.value: CompletionInputType(),
|
ColumnName.completion_input.value: CompletionInputType(),
|
||||||
},
|
},
|
||||||
|
|
|
@ -70,6 +70,7 @@ async def register_dataset(
|
||||||
if for_generation:
|
if for_generation:
|
||||||
dataset_schema = {
|
dataset_schema = {
|
||||||
"expected_answer": StringType(),
|
"expected_answer": StringType(),
|
||||||
|
"input_query": StringType(),
|
||||||
"chat_completion_input": ChatCompletionInputType(),
|
"chat_completion_input": ChatCompletionInputType(),
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -16,3 +16,7 @@ providers:
|
||||||
provider_type: remote::tgi
|
provider_type: remote::tgi
|
||||||
config:
|
config:
|
||||||
url: http://127.0.0.1:5009
|
url: http://127.0.0.1:5009
|
||||||
|
- provider_id: test-tgi-2
|
||||||
|
provider_type: remote::tgi
|
||||||
|
config:
|
||||||
|
url: http://127.0.0.1:5010
|
||||||
|
|
|
@ -65,7 +65,10 @@ async def test_eval(eval_settings):
|
||||||
model="Llama3.2-1B-Instruct",
|
model="Llama3.2-1B-Instruct",
|
||||||
sampling_params=SamplingParams(),
|
sampling_params=SamplingParams(),
|
||||||
),
|
),
|
||||||
scoring_functions=["subset_of"],
|
scoring_functions=[
|
||||||
|
"meta-reference::subset_of",
|
||||||
|
"meta-reference::llm_as_judge_8b_correctness",
|
||||||
|
],
|
||||||
)
|
)
|
||||||
assert response.job_id == "0"
|
assert response.job_id == "0"
|
||||||
job_status = await eval_impl.job_status(response.job_id)
|
job_status = await eval_impl.job_status(response.job_id)
|
||||||
|
@ -74,6 +77,8 @@ async def test_eval(eval_settings):
|
||||||
|
|
||||||
eval_response = await eval_impl.job_result(response.job_id)
|
eval_response = await eval_impl.job_result(response.job_id)
|
||||||
|
|
||||||
|
print(eval_response)
|
||||||
assert eval_response is not None
|
assert eval_response is not None
|
||||||
assert len(eval_response.generations) == 5
|
assert len(eval_response.generations) == 5
|
||||||
assert "subset_of" in eval_response.scores
|
assert "meta-reference::subset_of" in eval_response.scores
|
||||||
|
assert "meta-reference::llm_as_judge_8b_correctness" in eval_response.scores
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue