mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-16 23:03:49 +00:00
yet another refactor to make this more general
now it accepts --inference-model, --safety-model options also
This commit is contained in:
parent
2ed0267fbb
commit
60800bc09b
13 changed files with 127 additions and 61 deletions
|
@ -64,16 +64,8 @@ def memory_weaviate() -> ProviderFixture:
|
|||
|
||||
MEMORY_FIXTURES = ["meta_reference", "pgvector", "weaviate"]
|
||||
|
||||
PROVIDER_PARAMS = [
|
||||
pytest.param(fixture_name, marks=getattr(pytest.mark, fixture_name))
|
||||
for fixture_name in MEMORY_FIXTURES
|
||||
]
|
||||
|
||||
|
||||
@pytest_asyncio.fixture(
|
||||
scope="session",
|
||||
params=PROVIDER_PARAMS,
|
||||
)
|
||||
@pytest_asyncio.fixture(scope="session")
|
||||
async def memory_stack(request):
|
||||
fixture_name = request.param
|
||||
fixture = request.getfixturevalue(f"memory_{fixture_name}")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue