mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-12 12:06:04 +00:00
chore: Updating Vector IO integration tests to use llama stack build
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
parent
a701f68bd7
commit
da7b39a3e3
13 changed files with 298 additions and 19 deletions
|
|
@ -153,6 +153,29 @@ SETUP_DEFINITIONS: dict[str, Setup] = {
|
|||
"text_model": "groq/llama-3.3-70b-versatile",
|
||||
},
|
||||
),
|
||||
"milvus": Setup(
|
||||
name="milvus",
|
||||
description="Milvus vector database provider for vector_io tests",
|
||||
env={
|
||||
"MILVUS_URL": "dummy",
|
||||
},
|
||||
),
|
||||
"chromadb": Setup(
|
||||
name="chromadb",
|
||||
description="ChromaDB vector database provider for vector_io tests",
|
||||
env={
|
||||
"CHROMADB_URL": "http://localhost:8000",
|
||||
},
|
||||
),
|
||||
"pgvector": Setup(
|
||||
name="pgvector",
|
||||
description="PGVector database provider for vector_io tests",
|
||||
env={
|
||||
"PGVECTOR_DB": "llama_stack_test",
|
||||
"PGVECTOR_USER": "postgres",
|
||||
"PGVECTOR_PASSWORD": "password",
|
||||
},
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -179,4 +202,9 @@ SUITE_DEFINITIONS: dict[str, Suite] = {
|
|||
roots=["tests/integration/inference/test_vision_inference.py"],
|
||||
default_setup="ollama-vision",
|
||||
),
|
||||
"vector_io": Suite(
|
||||
name="vector_io",
|
||||
roots=["tests/integration/vector_io"],
|
||||
default_setup="milvus",
|
||||
),
|
||||
}
|
||||
|
|
|
|||
88
tests/unit/distribution/test_single_provider_filter.py
Normal file
88
tests/unit/distribution/test_single_provider_filter.py
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import pytest
|
||||
|
||||
from llama_stack.cli.stack._build import _apply_single_provider_filter
|
||||
from llama_stack.core.datatypes import BuildConfig, BuildProvider, DistributionSpec
|
||||
from llama_stack.core.utils.image_types import LlamaStackImageType
|
||||
|
||||
|
||||
def test_filters_single_api():
|
||||
"""Test filtering keeps only specified provider for one API."""
|
||||
build_config = BuildConfig(
|
||||
image_type=LlamaStackImageType.VENV.value,
|
||||
distribution_spec=DistributionSpec(
|
||||
providers={
|
||||
"vector_io": [
|
||||
BuildProvider(provider_type="inline::faiss"),
|
||||
BuildProvider(provider_type="inline::sqlite-vec"),
|
||||
],
|
||||
"inference": [
|
||||
BuildProvider(provider_type="remote::openai"),
|
||||
],
|
||||
},
|
||||
description="Test",
|
||||
),
|
||||
)
|
||||
|
||||
filtered = _apply_single_provider_filter(build_config, "vector_io=inline::sqlite-vec")
|
||||
|
||||
assert len(filtered.distribution_spec.providers["vector_io"]) == 1
|
||||
assert filtered.distribution_spec.providers["vector_io"][0].provider_type == "inline::sqlite-vec"
|
||||
assert len(filtered.distribution_spec.providers["inference"]) == 1 # unchanged
|
||||
|
||||
|
||||
def test_filters_multiple_apis():
|
||||
"""Test filtering multiple APIs."""
|
||||
build_config = BuildConfig(
|
||||
image_type=LlamaStackImageType.VENV.value,
|
||||
distribution_spec=DistributionSpec(
|
||||
providers={
|
||||
"vector_io": [
|
||||
BuildProvider(provider_type="inline::faiss"),
|
||||
BuildProvider(provider_type="inline::sqlite-vec"),
|
||||
],
|
||||
"inference": [
|
||||
BuildProvider(provider_type="remote::openai"),
|
||||
BuildProvider(provider_type="remote::anthropic"),
|
||||
],
|
||||
},
|
||||
description="Test",
|
||||
),
|
||||
)
|
||||
|
||||
filtered = _apply_single_provider_filter(build_config, "vector_io=inline::faiss,inference=remote::openai")
|
||||
|
||||
assert len(filtered.distribution_spec.providers["vector_io"]) == 1
|
||||
assert filtered.distribution_spec.providers["vector_io"][0].provider_type == "inline::faiss"
|
||||
assert len(filtered.distribution_spec.providers["inference"]) == 1
|
||||
assert filtered.distribution_spec.providers["inference"][0].provider_type == "remote::openai"
|
||||
|
||||
|
||||
def test_provider_not_found_exits():
|
||||
"""Test error when specified provider doesn't exist."""
|
||||
build_config = BuildConfig(
|
||||
image_type=LlamaStackImageType.VENV.value,
|
||||
distribution_spec=DistributionSpec(
|
||||
providers={"vector_io": [BuildProvider(provider_type="inline::faiss")]},
|
||||
description="Test",
|
||||
),
|
||||
)
|
||||
|
||||
with pytest.raises(SystemExit):
|
||||
_apply_single_provider_filter(build_config, "vector_io=inline::nonexistent")
|
||||
|
||||
|
||||
def test_invalid_format_exits():
|
||||
"""Test error for invalid filter format."""
|
||||
build_config = BuildConfig(
|
||||
image_type=LlamaStackImageType.VENV.value,
|
||||
distribution_spec=DistributionSpec(providers={}, description="Test"),
|
||||
)
|
||||
|
||||
with pytest.raises(SystemExit):
|
||||
_apply_single_provider_filter(build_config, "invalid_format")
|
||||
Loading…
Add table
Add a link
Reference in a new issue