fix eval test w/ tools

This commit is contained in:
Xi Yan 2025-01-15 11:45:59 -08:00
parent d0a25dd453
commit b7e59ba002
2 changed files with 15 additions and 1 deletions

View file

@ -15,6 +15,7 @@ from ..inference.fixtures import INFERENCE_FIXTURES
from ..memory.fixtures import MEMORY_FIXTURES
from ..safety.fixtures import SAFETY_FIXTURES
from ..scoring.fixtures import SCORING_FIXTURES
from ..tools.fixtures import TOOL_RUNTIME_FIXTURES
from .fixtures import EVAL_FIXTURES
DEFAULT_PROVIDER_COMBINATIONS = [
@ -27,6 +28,7 @@ DEFAULT_PROVIDER_COMBINATIONS = [
"agents": "meta_reference",
"safety": "llama_guard",
"memory": "faiss",
"tool_runtime": "memory_and_search",
},
id="meta_reference_eval_fireworks_inference",
marks=pytest.mark.meta_reference_eval_fireworks_inference,
@ -40,6 +42,7 @@ DEFAULT_PROVIDER_COMBINATIONS = [
"agents": "meta_reference",
"safety": "llama_guard",
"memory": "faiss",
"tool_runtime": "memory_and_search",
},
id="meta_reference_eval_together_inference",
marks=pytest.mark.meta_reference_eval_together_inference,
@ -53,6 +56,7 @@ DEFAULT_PROVIDER_COMBINATIONS = [
"agents": "meta_reference",
"safety": "llama_guard",
"memory": "faiss",
"tool_runtime": "memory_and_search",
},
id="meta_reference_eval_together_inference_huggingface_datasetio",
marks=pytest.mark.meta_reference_eval_together_inference_huggingface_datasetio,
@ -98,6 +102,7 @@ def pytest_generate_tests(metafunc):
"agents": AGENTS_FIXTURES,
"safety": SAFETY_FIXTURES,
"memory": MEMORY_FIXTURES,
"tool_runtime": TOOL_RUNTIME_FIXTURES,
}
combinations = (
get_provider_fixture_overrides(metafunc.config, available_fixtures)

View file

@ -35,7 +35,13 @@ EVAL_FIXTURES = ["meta_reference", "remote"]
@pytest_asyncio.fixture(scope="session")
async def eval_stack(request, inference_model, judge_model):
async def eval_stack(
request,
inference_model,
judge_model,
tool_group_input_memory,
tool_group_input_tavily_search,
):
fixture_dict = request.param
providers = {}
@ -48,6 +54,7 @@ async def eval_stack(request, inference_model, judge_model):
"agents",
"safety",
"memory",
"tool_runtime",
]:
fixture = request.getfixturevalue(f"{key}_{fixture_dict[key]}")
providers[key] = fixture.providers
@ -63,6 +70,7 @@ async def eval_stack(request, inference_model, judge_model):
Api.agents,
Api.safety,
Api.memory,
Api.tool_runtime,
],
providers,
provider_data,
@ -73,6 +81,7 @@ async def eval_stack(request, inference_model, judge_model):
judge_model,
]
],
tool_groups=[tool_group_input_memory, tool_group_input_tavily_search],
)
return test_stack.impls