mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-28 11:00:25 +00:00
Merge branch 'main' into feat/litellm_sambanova_usage
This commit is contained in:
commit
5bd1bd30e2
76 changed files with 3534 additions and 2843 deletions
|
|
@ -271,7 +271,7 @@ def test_custom_tool(llama_stack_client_with_mocked_inference, agent_config):
|
|||
client_tool = get_boiling_point
|
||||
agent_config = {
|
||||
**agent_config,
|
||||
"tools": ["builtin::websearch", client_tool],
|
||||
"tools": [client_tool],
|
||||
}
|
||||
|
||||
agent = Agent(llama_stack_client_with_mocked_inference, **agent_config)
|
||||
|
|
@ -320,42 +320,55 @@ def test_custom_tool_infinite_loop(llama_stack_client_with_mocked_inference, age
|
|||
assert num_tool_calls <= 5
|
||||
|
||||
|
||||
def test_tool_choice(llama_stack_client_with_mocked_inference, agent_config):
|
||||
def run_agent(tool_choice):
|
||||
client_tool = get_boiling_point
|
||||
|
||||
test_agent_config = {
|
||||
**agent_config,
|
||||
"tool_config": {"tool_choice": tool_choice},
|
||||
"tools": [client_tool],
|
||||
}
|
||||
|
||||
agent = Agent(llama_stack_client_with_mocked_inference, **test_agent_config)
|
||||
session_id = agent.create_session(f"test-session-{uuid4()}")
|
||||
|
||||
response = agent.create_turn(
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "What is the boiling point of polyjuice?",
|
||||
},
|
||||
],
|
||||
session_id=session_id,
|
||||
stream=False,
|
||||
)
|
||||
|
||||
return [step for step in response.steps if step.step_type == "tool_execution"]
|
||||
|
||||
tool_execution_steps = run_agent("required")
|
||||
def test_tool_choice_required(llama_stack_client_with_mocked_inference, agent_config):
|
||||
tool_execution_steps = run_agent_with_tool_choice(
|
||||
llama_stack_client_with_mocked_inference, agent_config, "required"
|
||||
)
|
||||
assert len(tool_execution_steps) > 0
|
||||
|
||||
tool_execution_steps = run_agent("none")
|
||||
|
||||
def test_tool_choice_none(llama_stack_client_with_mocked_inference, agent_config):
|
||||
tool_execution_steps = run_agent_with_tool_choice(llama_stack_client_with_mocked_inference, agent_config, "none")
|
||||
assert len(tool_execution_steps) == 0
|
||||
|
||||
tool_execution_steps = run_agent("get_boiling_point")
|
||||
|
||||
def test_tool_choice_get_boiling_point(llama_stack_client_with_mocked_inference, agent_config):
|
||||
if "llama" not in agent_config["model"].lower():
|
||||
pytest.xfail("NotImplemented for non-llama models")
|
||||
|
||||
tool_execution_steps = run_agent_with_tool_choice(
|
||||
llama_stack_client_with_mocked_inference, agent_config, "get_boiling_point"
|
||||
)
|
||||
assert len(tool_execution_steps) >= 1 and tool_execution_steps[0].tool_calls[0].tool_name == "get_boiling_point"
|
||||
|
||||
|
||||
def run_agent_with_tool_choice(client, agent_config, tool_choice):
|
||||
client_tool = get_boiling_point
|
||||
|
||||
test_agent_config = {
|
||||
**agent_config,
|
||||
"tool_config": {"tool_choice": tool_choice},
|
||||
"tools": [client_tool],
|
||||
"max_infer_iters": 2,
|
||||
}
|
||||
|
||||
agent = Agent(client, **test_agent_config)
|
||||
session_id = agent.create_session(f"test-session-{uuid4()}")
|
||||
|
||||
response = agent.create_turn(
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "What is the boiling point of polyjuice?",
|
||||
},
|
||||
],
|
||||
session_id=session_id,
|
||||
stream=False,
|
||||
)
|
||||
|
||||
return [step for step in response.steps if step.step_type == "tool_execution"]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("rag_tool_name", ["builtin::rag/knowledge_search", "builtin::rag"])
|
||||
def test_rag_agent(llama_stack_client_with_mocked_inference, agent_config, rag_tool_name):
|
||||
urls = ["chat.rst", "llama3.rst", "memory_optimizations.rst", "lora_finetune.rst"]
|
||||
|
|
@ -571,7 +584,7 @@ def test_rag_and_code_agent(llama_stack_client_with_mocked_inference, agent_conf
|
|||
[(get_boiling_point, False), (get_boiling_point_with_metadata, True)],
|
||||
)
|
||||
def test_create_turn_response(llama_stack_client_with_mocked_inference, agent_config, client_tools):
|
||||
client_tool, expectes_metadata = client_tools
|
||||
client_tool, expects_metadata = client_tools
|
||||
agent_config = {
|
||||
**agent_config,
|
||||
"input_shields": [],
|
||||
|
|
@ -597,7 +610,7 @@ def test_create_turn_response(llama_stack_client_with_mocked_inference, agent_co
|
|||
assert steps[0].step_type == "inference"
|
||||
assert steps[1].step_type == "tool_execution"
|
||||
assert steps[1].tool_calls[0].tool_name.startswith("get_boiling_point")
|
||||
if expectes_metadata:
|
||||
if expects_metadata:
|
||||
assert steps[1].tool_responses[0].metadata["source"] == "https://www.google.com"
|
||||
assert steps[2].step_type == "inference"
|
||||
|
||||
|
|
@ -609,3 +622,44 @@ def test_create_turn_response(llama_stack_client_with_mocked_inference, agent_co
|
|||
assert last_step_completed_at < step.started_at
|
||||
assert step.started_at < step.completed_at
|
||||
last_step_completed_at = step.completed_at
|
||||
|
||||
|
||||
def test_multi_tool_calls(llama_stack_client_with_mocked_inference, agent_config):
|
||||
if "gpt" not in agent_config["model"]:
|
||||
pytest.xfail("Only tested on GPT models")
|
||||
|
||||
agent_config = {
|
||||
**agent_config,
|
||||
"tools": [get_boiling_point],
|
||||
}
|
||||
|
||||
agent = Agent(llama_stack_client_with_mocked_inference, **agent_config)
|
||||
session_id = agent.create_session(f"test-session-{uuid4()}")
|
||||
|
||||
response = agent.create_turn(
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Call get_boiling_point twice to answer: What is the boiling point of polyjuice in both celsius and fahrenheit?",
|
||||
},
|
||||
],
|
||||
session_id=session_id,
|
||||
stream=False,
|
||||
)
|
||||
steps = response.steps
|
||||
assert len(steps) == 7
|
||||
assert steps[0].step_type == "shield_call"
|
||||
assert steps[1].step_type == "inference"
|
||||
assert steps[2].step_type == "shield_call"
|
||||
assert steps[3].step_type == "tool_execution"
|
||||
assert steps[4].step_type == "shield_call"
|
||||
assert steps[5].step_type == "inference"
|
||||
assert steps[6].step_type == "shield_call"
|
||||
|
||||
tool_execution_step = steps[3]
|
||||
assert len(tool_execution_step.tool_calls) == 2
|
||||
assert tool_execution_step.tool_calls[0].tool_name.startswith("get_boiling_point")
|
||||
assert tool_execution_step.tool_calls[1].tool_name.startswith("get_boiling_point")
|
||||
|
||||
output = response.output_message.content.lower()
|
||||
assert "-100" in output and "-212" in output
|
||||
|
|
|
|||
|
|
@ -1,114 +0,0 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import base64
|
||||
import mimetypes
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
# How to run this test:
|
||||
#
|
||||
# LLAMA_STACK_CONFIG="template-name" pytest -v tests/integration/datasetio
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def dataset_for_test(llama_stack_client):
|
||||
dataset_id = "test_dataset"
|
||||
register_dataset(llama_stack_client, dataset_id=dataset_id)
|
||||
yield
|
||||
# Teardown - this always runs, even if the test fails
|
||||
try:
|
||||
llama_stack_client.datasets.unregister(dataset_id)
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to unregister test_dataset: {e}")
|
||||
|
||||
|
||||
def data_url_from_file(file_path: str) -> str:
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"File not found: {file_path}")
|
||||
|
||||
with open(file_path, "rb") as file:
|
||||
file_content = file.read()
|
||||
|
||||
base64_content = base64.b64encode(file_content).decode("utf-8")
|
||||
mime_type, _ = mimetypes.guess_type(file_path)
|
||||
|
||||
data_url = f"data:{mime_type};base64,{base64_content}"
|
||||
|
||||
return data_url
|
||||
|
||||
|
||||
def register_dataset(llama_stack_client, for_generation=False, for_rag=False, dataset_id="test_dataset"):
|
||||
if for_rag:
|
||||
test_file = Path(os.path.abspath(__file__)).parent / "test_rag_dataset.csv"
|
||||
else:
|
||||
test_file = Path(os.path.abspath(__file__)).parent / "test_dataset.csv"
|
||||
test_url = data_url_from_file(str(test_file))
|
||||
|
||||
if for_generation:
|
||||
dataset_schema = {
|
||||
"expected_answer": {"type": "string"},
|
||||
"input_query": {"type": "string"},
|
||||
"chat_completion_input": {"type": "chat_completion_input"},
|
||||
}
|
||||
elif for_rag:
|
||||
dataset_schema = {
|
||||
"expected_answer": {"type": "string"},
|
||||
"input_query": {"type": "string"},
|
||||
"generated_answer": {"type": "string"},
|
||||
"context": {"type": "string"},
|
||||
}
|
||||
else:
|
||||
dataset_schema = {
|
||||
"expected_answer": {"type": "string"},
|
||||
"input_query": {"type": "string"},
|
||||
"generated_answer": {"type": "string"},
|
||||
}
|
||||
|
||||
dataset_providers = [x for x in llama_stack_client.providers.list() if x.api == "datasetio"]
|
||||
dataset_provider_id = dataset_providers[0].provider_id
|
||||
|
||||
llama_stack_client.datasets.register(
|
||||
dataset_id=dataset_id,
|
||||
dataset_schema=dataset_schema,
|
||||
url=dict(uri=test_url),
|
||||
provider_id=dataset_provider_id,
|
||||
)
|
||||
|
||||
|
||||
def test_register_unregister_dataset(llama_stack_client):
|
||||
register_dataset(llama_stack_client)
|
||||
response = llama_stack_client.datasets.list()
|
||||
assert isinstance(response, list)
|
||||
assert len(response) == 1
|
||||
assert response[0].identifier == "test_dataset"
|
||||
|
||||
llama_stack_client.datasets.unregister("test_dataset")
|
||||
response = llama_stack_client.datasets.list()
|
||||
assert isinstance(response, list)
|
||||
assert len(response) == 0
|
||||
|
||||
|
||||
def test_get_rows_paginated(llama_stack_client, dataset_for_test):
|
||||
response = llama_stack_client.datasetio.get_rows_paginated(
|
||||
dataset_id="test_dataset",
|
||||
rows_in_page=3,
|
||||
)
|
||||
assert isinstance(response.rows, list)
|
||||
assert len(response.rows) == 3
|
||||
assert response.next_page_token == "3"
|
||||
|
||||
# iterate over all rows
|
||||
response = llama_stack_client.datasetio.get_rows_paginated(
|
||||
dataset_id="test_dataset",
|
||||
rows_in_page=2,
|
||||
page_token=response.next_page_token,
|
||||
)
|
||||
assert isinstance(response.rows, list)
|
||||
assert len(response.rows) == 2
|
||||
assert response.next_page_token == "5"
|
||||
95
tests/integration/datasets/test_datasets.py
Normal file
95
tests/integration/datasets/test_datasets.py
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
|
||||
import base64
|
||||
import mimetypes
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
# How to run this test:
|
||||
#
|
||||
# LLAMA_STACK_CONFIG="template-name" pytest -v tests/integration/datasets
|
||||
|
||||
|
||||
def data_url_from_file(file_path: str) -> str:
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"File not found: {file_path}")
|
||||
|
||||
with open(file_path, "rb") as file:
|
||||
file_content = file.read()
|
||||
|
||||
base64_content = base64.b64encode(file_content).decode("utf-8")
|
||||
mime_type, _ = mimetypes.guess_type(file_path)
|
||||
|
||||
data_url = f"data:{mime_type};base64,{base64_content}"
|
||||
|
||||
return data_url
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"purpose, source, provider_id, limit",
|
||||
[
|
||||
(
|
||||
"eval/messages-answer",
|
||||
{
|
||||
"type": "uri",
|
||||
"uri": "huggingface://datasets/llamastack/simpleqa?split=train",
|
||||
},
|
||||
"huggingface",
|
||||
10,
|
||||
),
|
||||
(
|
||||
"eval/messages-answer",
|
||||
{
|
||||
"type": "rows",
|
||||
"rows": [
|
||||
{
|
||||
"messages": [{"role": "user", "content": "Hello, world!"}],
|
||||
"answer": "Hello, world!",
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "What is the capital of France?",
|
||||
}
|
||||
],
|
||||
"answer": "Paris",
|
||||
},
|
||||
],
|
||||
},
|
||||
"localfs",
|
||||
2,
|
||||
),
|
||||
(
|
||||
"eval/messages-answer",
|
||||
{
|
||||
"type": "uri",
|
||||
"uri": data_url_from_file(os.path.join(os.path.dirname(__file__), "test_dataset.csv")),
|
||||
},
|
||||
"localfs",
|
||||
5,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_register_and_iterrows(llama_stack_client, purpose, source, provider_id, limit):
|
||||
dataset = llama_stack_client.datasets.register(
|
||||
purpose=purpose,
|
||||
source=source,
|
||||
)
|
||||
assert dataset.identifier is not None
|
||||
assert dataset.provider_id == provider_id
|
||||
iterrow_response = llama_stack_client.datasets.iterrows(dataset.identifier, limit=limit)
|
||||
assert len(iterrow_response.data) == limit
|
||||
|
||||
dataset_list = llama_stack_client.datasets.list()
|
||||
assert dataset.identifier in [d.identifier for d in dataset_list]
|
||||
|
||||
llama_stack_client.datasets.unregister(dataset.identifier)
|
||||
dataset_list = llama_stack_client.datasets.list()
|
||||
assert dataset.identifier not in [d.identifier for d in dataset_list]
|
||||
|
|
@ -4,10 +4,11 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from ..datasetio.test_datasetio import register_dataset
|
||||
from ..datasets.test_datasets import data_url_from_file
|
||||
|
||||
# How to run this test:
|
||||
#
|
||||
|
|
@ -16,15 +17,21 @@ from ..datasetio.test_datasetio import register_dataset
|
|||
|
||||
@pytest.mark.parametrize("scoring_fn_id", ["basic::equality"])
|
||||
def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
||||
register_dataset(llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval")
|
||||
response = llama_stack_client.datasets.list()
|
||||
assert any(x.identifier == "test_dataset_for_eval" for x in response)
|
||||
|
||||
rows = llama_stack_client.datasetio.get_rows_paginated(
|
||||
dataset_id="test_dataset_for_eval",
|
||||
rows_in_page=3,
|
||||
dataset = llama_stack_client.datasets.register(
|
||||
purpose="eval/messages-answer",
|
||||
source={
|
||||
"type": "uri",
|
||||
"uri": data_url_from_file(Path(__file__).parent.parent / "datasets" / "test_dataset.csv"),
|
||||
},
|
||||
)
|
||||
assert len(rows.rows) == 3
|
||||
response = llama_stack_client.datasets.list()
|
||||
assert any(x.identifier == dataset.identifier for x in response)
|
||||
|
||||
rows = llama_stack_client.datasets.iterrows(
|
||||
dataset_id=dataset.identifier,
|
||||
limit=3,
|
||||
)
|
||||
assert len(rows.data) == 3
|
||||
|
||||
scoring_functions = [
|
||||
scoring_fn_id,
|
||||
|
|
@ -32,7 +39,7 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
|||
benchmark_id = str(uuid.uuid4())
|
||||
llama_stack_client.benchmarks.register(
|
||||
benchmark_id=benchmark_id,
|
||||
dataset_id="test_dataset_for_eval",
|
||||
dataset_id=dataset.identifier,
|
||||
scoring_functions=scoring_functions,
|
||||
)
|
||||
list_benchmarks = llama_stack_client.benchmarks.list()
|
||||
|
|
@ -40,7 +47,7 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
|||
|
||||
response = llama_stack_client.eval.evaluate_rows(
|
||||
benchmark_id=benchmark_id,
|
||||
input_rows=rows.rows,
|
||||
input_rows=rows.data,
|
||||
scoring_functions=scoring_functions,
|
||||
benchmark_config={
|
||||
"eval_candidate": {
|
||||
|
|
@ -59,11 +66,17 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
|||
|
||||
@pytest.mark.parametrize("scoring_fn_id", ["basic::subset_of"])
|
||||
def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
|
||||
register_dataset(llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval_2")
|
||||
dataset = llama_stack_client.datasets.register(
|
||||
purpose="eval/messages-answer",
|
||||
source={
|
||||
"type": "uri",
|
||||
"uri": data_url_from_file(Path(__file__).parent.parent / "datasets" / "test_dataset.csv"),
|
||||
},
|
||||
)
|
||||
benchmark_id = str(uuid.uuid4())
|
||||
llama_stack_client.benchmarks.register(
|
||||
benchmark_id=benchmark_id,
|
||||
dataset_id="test_dataset_for_eval_2",
|
||||
dataset_id=dataset.identifier,
|
||||
scoring_functions=[scoring_fn_id],
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -5,23 +5,11 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
import pytest
|
||||
|
||||
from ..datasetio.test_datasetio import register_dataset
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def rag_dataset_for_test(llama_stack_client):
|
||||
dataset_id = "test_dataset"
|
||||
register_dataset(llama_stack_client, for_rag=True, dataset_id=dataset_id)
|
||||
yield # This is where the test function will run
|
||||
|
||||
# Teardown - this always runs, even if the test fails
|
||||
try:
|
||||
llama_stack_client.datasets.unregister(dataset_id)
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to unregister test_dataset: {e}")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_judge_prompt_template():
|
||||
|
|
@ -92,49 +80,34 @@ def test_scoring_functions_register(
|
|||
# TODO: add unregister api for scoring functions
|
||||
|
||||
|
||||
def test_scoring_score(llama_stack_client, rag_dataset_for_test):
|
||||
@pytest.mark.parametrize("scoring_fn_id", ["basic::equality"])
|
||||
def test_scoring_score(llama_stack_client, scoring_fn_id):
|
||||
# scoring individual rows
|
||||
rows = llama_stack_client.datasetio.get_rows_paginated(
|
||||
dataset_id="test_dataset",
|
||||
rows_in_page=3,
|
||||
)
|
||||
assert len(rows.rows) == 3
|
||||
df = pd.read_csv(Path(__file__).parent.parent / "datasets" / "test_dataset.csv")
|
||||
rows = df.to_dict(orient="records")
|
||||
|
||||
scoring_fns_list = llama_stack_client.scoring_functions.list()
|
||||
scoring_functions = {
|
||||
scoring_fns_list[0].identifier: None,
|
||||
scoring_fn_id: None,
|
||||
}
|
||||
|
||||
response = llama_stack_client.scoring.score(
|
||||
input_rows=rows.rows,
|
||||
input_rows=rows,
|
||||
scoring_functions=scoring_functions,
|
||||
)
|
||||
assert len(response.results) == len(scoring_functions)
|
||||
for x in scoring_functions:
|
||||
assert x in response.results
|
||||
assert len(response.results[x].score_rows) == len(rows.rows)
|
||||
|
||||
# score batch
|
||||
response = llama_stack_client.scoring.score_batch(
|
||||
dataset_id="test_dataset",
|
||||
scoring_functions=scoring_functions,
|
||||
save_results_dataset=False,
|
||||
)
|
||||
assert len(response.results) == len(scoring_functions)
|
||||
for x in scoring_functions:
|
||||
assert x in response.results
|
||||
assert len(response.results[x].score_rows) == 5
|
||||
assert len(response.results[x].score_rows) == len(rows)
|
||||
|
||||
|
||||
def test_scoring_score_with_params_llm_as_judge(
|
||||
llama_stack_client, sample_judge_prompt_template, judge_model_id, rag_dataset_for_test
|
||||
llama_stack_client,
|
||||
sample_judge_prompt_template,
|
||||
judge_model_id,
|
||||
):
|
||||
# scoring individual rows
|
||||
rows = llama_stack_client.datasetio.get_rows_paginated(
|
||||
dataset_id="test_dataset",
|
||||
rows_in_page=3,
|
||||
)
|
||||
assert len(rows.rows) == 3
|
||||
df = pd.read_csv(Path(__file__).parent.parent / "datasets" / "test_dataset.csv")
|
||||
rows = df.to_dict(orient="records")
|
||||
|
||||
scoring_functions = {
|
||||
"llm-as-judge::base": dict(
|
||||
|
|
@ -149,24 +122,13 @@ def test_scoring_score_with_params_llm_as_judge(
|
|||
}
|
||||
|
||||
response = llama_stack_client.scoring.score(
|
||||
input_rows=rows.rows,
|
||||
input_rows=rows,
|
||||
scoring_functions=scoring_functions,
|
||||
)
|
||||
assert len(response.results) == len(scoring_functions)
|
||||
for x in scoring_functions:
|
||||
assert x in response.results
|
||||
assert len(response.results[x].score_rows) == len(rows.rows)
|
||||
|
||||
# score batch
|
||||
response = llama_stack_client.scoring.score_batch(
|
||||
dataset_id="test_dataset",
|
||||
scoring_functions=scoring_functions,
|
||||
save_results_dataset=False,
|
||||
)
|
||||
assert len(response.results) == len(scoring_functions)
|
||||
for x in scoring_functions:
|
||||
assert x in response.results
|
||||
assert len(response.results[x].score_rows) == 5
|
||||
assert len(response.results[x].score_rows) == len(rows)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
|
@ -178,13 +140,14 @@ def test_scoring_score_with_params_llm_as_judge(
|
|||
],
|
||||
)
|
||||
def test_scoring_score_with_aggregation_functions(
|
||||
llama_stack_client, sample_judge_prompt_template, judge_model_id, provider_id, rag_dataset_for_test
|
||||
llama_stack_client,
|
||||
sample_judge_prompt_template,
|
||||
judge_model_id,
|
||||
provider_id,
|
||||
rag_dataset_for_test,
|
||||
):
|
||||
rows = llama_stack_client.datasetio.get_rows_paginated(
|
||||
dataset_id="test_dataset",
|
||||
rows_in_page=3,
|
||||
)
|
||||
assert len(rows.rows) == 3
|
||||
df = pd.read_csv(Path(__file__).parent.parent / "datasets" / "test_dataset.csv")
|
||||
rows = df.to_dict(orient="records")
|
||||
|
||||
scoring_fns_list = [x for x in llama_stack_client.scoring_functions.list() if x.provider_id == provider_id]
|
||||
if len(scoring_fns_list) == 0:
|
||||
|
|
@ -224,12 +187,12 @@ def test_scoring_score_with_aggregation_functions(
|
|||
scoring_functions[scoring_fn.identifier] = None
|
||||
|
||||
response = llama_stack_client.scoring.score(
|
||||
input_rows=rows.rows,
|
||||
input_rows=rows,
|
||||
scoring_functions=scoring_functions,
|
||||
)
|
||||
|
||||
assert len(response.results) == len(scoring_functions)
|
||||
for x in scoring_functions:
|
||||
assert x in response.results
|
||||
assert len(response.results[x].score_rows) == len(rows.rows)
|
||||
assert len(response.results[x].score_rows) == len(rows)
|
||||
assert len(response.results[x].aggregated_results) == len(aggr_fns)
|
||||
|
|
|
|||
|
|
@ -187,8 +187,8 @@ def test_chat_completion_doesnt_block_event_loop(caplog):
|
|||
loop.set_debug(True)
|
||||
caplog.set_level(logging.WARNING)
|
||||
|
||||
# Log when event loop is blocked for more than 100ms
|
||||
loop.slow_callback_duration = 0.1
|
||||
# Log when event loop is blocked for more than 200ms
|
||||
loop.slow_callback_duration = 0.2
|
||||
# Sleep for 500ms in our delayed http response
|
||||
sleep_time = 0.5
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue