forked from phoenix-oss/llama-stack-mirror
# What does this PR do? - Configured ruff linter to automatically fix import sorting issues. - Set --exit-non-zero-on-fix to ensure non-zero exit code when fixes are applied. - Enabled the 'I' selection to focus on import-related linting rules. - Ran the linter, and formatted all codebase imports accordingly. - Removed the black dep from the "dev" group since we use ruff Signed-off-by: Sébastien Han <seb@redhat.com> [//]: # (If resolving an issue, uncomment and update the line below) [//]: # (Closes #[issue-number]) ## Test Plan [Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.*] [//]: # (## Documentation) [//]: # (- [ ] Added a Changelog entry if the change is significant) Signed-off-by: Sébastien Han <seb@redhat.com>
75 lines
2.3 KiB
Python
75 lines
2.3 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import pytest
|
|
|
|
from ..conftest import get_provider_fixture_overrides
|
|
from ..datasetio.fixtures import DATASETIO_FIXTURES
|
|
from ..inference.fixtures import INFERENCE_FIXTURES
|
|
from .fixtures import SCORING_FIXTURES
|
|
|
|
DEFAULT_PROVIDER_COMBINATIONS = [
|
|
pytest.param(
|
|
{
|
|
"scoring": "basic",
|
|
"datasetio": "localfs",
|
|
"inference": "together",
|
|
},
|
|
id="basic_scoring_together_inference",
|
|
marks=pytest.mark.basic_scoring_together_inference,
|
|
),
|
|
pytest.param(
|
|
{
|
|
"scoring": "braintrust",
|
|
"datasetio": "localfs",
|
|
"inference": "together",
|
|
},
|
|
id="braintrust_scoring_together_inference",
|
|
marks=pytest.mark.braintrust_scoring_together_inference,
|
|
),
|
|
pytest.param(
|
|
{
|
|
"scoring": "llm_as_judge",
|
|
"datasetio": "localfs",
|
|
"inference": "together",
|
|
},
|
|
id="llm_as_judge_scoring_together_inference",
|
|
marks=pytest.mark.llm_as_judge_scoring_together_inference,
|
|
),
|
|
]
|
|
|
|
|
|
def pytest_configure(config):
|
|
for fixture_name in [
|
|
"basic_scoring_together_inference",
|
|
"braintrust_scoring_together_inference",
|
|
"llm_as_judge_scoring_together_inference",
|
|
]:
|
|
config.addinivalue_line(
|
|
"markers",
|
|
f"{fixture_name}: marks tests as {fixture_name} specific",
|
|
)
|
|
|
|
|
|
def pytest_generate_tests(metafunc):
|
|
judge_model = metafunc.config.getoption("--judge-model")
|
|
if "judge_model" in metafunc.fixturenames:
|
|
metafunc.parametrize(
|
|
"judge_model",
|
|
[pytest.param(judge_model, id="")],
|
|
indirect=True,
|
|
)
|
|
|
|
if "scoring_stack" in metafunc.fixturenames:
|
|
available_fixtures = {
|
|
"scoring": SCORING_FIXTURES,
|
|
"datasetio": DATASETIO_FIXTURES,
|
|
"inference": INFERENCE_FIXTURES,
|
|
}
|
|
combinations = (
|
|
get_provider_fixture_overrides(metafunc.config, available_fixtures) or DEFAULT_PROVIDER_COMBINATIONS
|
|
)
|
|
metafunc.parametrize("scoring_stack", combinations, indirect=True)
|