llama-stack-mirror/llama_stack/providers/tests/vector_io/conftest.py
Bill Murdock 32d1e50a6f
test: Add qdrant to provider tests (#1039)
# What does this PR do?

This is a follow on to #1022 . It includes the changes I needed to be
able to test the Qdrant support as requested by @terrytangyuan .

I uncovered a lot of bigger, more systemic issues with the vector DB
testing and I will open a new issue for those. For now, I am just
delivering the work I already did on that.

## Test Plan

As discussed on #1022:

```
podman pull qdrant/qdrant
mkdir qdrant-data
podman run -p 6333:6333 -v $(pwd)/qdrant-data:/qdrant/storage qdrant/qdrant
```


```
ollama pull all-minilm:l6-v2
curl http://localhost:11434/api/embeddings -d '{"model": "all-minilm", "prompt": "Hello world"}'
```

```
EMBEDDING_DIMENSION=384 QDRANT_URL=http://localhost pytest llama_stack/providers/tests/vector_io/test_vector_io.py -m "qdrant" -v -s --tb=short --embedding-model all-minilm:latest --disable-warnings
```

These show 3 tests passing and 15 deselected which is presumably working
as intended.

---------

Signed-off-by: Bill Murdock <bmurdock@redhat.com>
2025-02-13 15:44:55 -08:00

108 lines
3 KiB
Python

# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import pytest
from ..conftest import (
get_provider_fixture_overrides,
get_provider_fixture_overrides_from_test_config,
get_test_config_for_api,
)
from ..inference.fixtures import INFERENCE_FIXTURES
from .fixtures import VECTOR_IO_FIXTURES
DEFAULT_PROVIDER_COMBINATIONS = [
pytest.param(
{
"inference": "sentence_transformers",
"vector_io": "faiss",
},
id="sentence_transformers",
marks=pytest.mark.sentence_transformers,
),
pytest.param(
{
"inference": "ollama",
"vector_io": "pgvector",
},
id="pgvector",
marks=pytest.mark.pgvector,
),
pytest.param(
{
"inference": "ollama",
"vector_io": "faiss",
},
id="ollama",
marks=pytest.mark.ollama,
),
pytest.param(
{
"inference": "ollama",
"vector_io": "sqlite_vec",
},
id="sqlite_vec",
marks=pytest.mark.ollama,
),
pytest.param(
{
"inference": "sentence_transformers",
"vector_io": "chroma",
},
id="chroma",
marks=pytest.mark.chroma,
),
pytest.param(
{
"inference": "ollama",
"vector_io": "qdrant",
},
id="qdrant",
marks=pytest.mark.qdrant,
),
pytest.param(
{
"inference": "fireworks",
"vector_io": "weaviate",
},
id="weaviate",
marks=pytest.mark.weaviate,
),
]
def pytest_configure(config):
for fixture_name in VECTOR_IO_FIXTURES:
config.addinivalue_line(
"markers",
f"{fixture_name}: marks tests as {fixture_name} specific",
)
def pytest_generate_tests(metafunc):
test_config = get_test_config_for_api(metafunc.config, "vector_io")
if "embedding_model" in metafunc.fixturenames:
model = getattr(test_config, "embedding_model", None)
# Fall back to the default if not specified by the config file
model = model or metafunc.config.getoption("--embedding-model")
if model:
params = [pytest.param(model, id="")]
else:
params = [pytest.param("all-minilm:l6-v2", id="")]
metafunc.parametrize("embedding_model", params, indirect=True)
if "vector_io_stack" in metafunc.fixturenames:
available_fixtures = {
"inference": INFERENCE_FIXTURES,
"vector_io": VECTOR_IO_FIXTURES,
}
combinations = (
get_provider_fixture_overrides_from_test_config(metafunc.config, "vector_io", DEFAULT_PROVIDER_COMBINATIONS)
or get_provider_fixture_overrides(metafunc.config, available_fixtures)
or DEFAULT_PROVIDER_COMBINATIONS
)
metafunc.parametrize("vector_io_stack", combinations, indirect=True)