mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-18 03:07:16 +00:00
reduce the accuracy requirements to pass the chat completion structured output test
This commit is contained in:
parent
34be07e0df
commit
107cd20e2b
1 changed files with 9 additions and 1 deletions
|
|
@ -211,7 +211,15 @@ class TestInference:
|
||||||
response = await inference_impl.chat_completion(
|
response = await inference_impl.chat_completion(
|
||||||
model_id=inference_model,
|
model_id=inference_model,
|
||||||
messages=[
|
messages=[
|
||||||
SystemMessage(content="You are a helpful assistant."),
|
# we include context about Michael Jordan in the prompt so that the test is
|
||||||
|
# focused on the funtionality of the model and not on the information embedded
|
||||||
|
# in the model. Llama 3.2 3B Instruct tends to think MJ played for 14 seasons.
|
||||||
|
SystemMessage(
|
||||||
|
content=(
|
||||||
|
"You are a helpful assistant.\n\n"
|
||||||
|
"Michael Jordan was born in 1963. He played basketball for the Chicago Bulls for 15 seasons."
|
||||||
|
)
|
||||||
|
),
|
||||||
UserMessage(content="Please give me information about Michael Jordan."),
|
UserMessage(content="Please give me information about Michael Jordan."),
|
||||||
],
|
],
|
||||||
stream=False,
|
stream=False,
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue