diff --git a/docs/source/distributions/remote_hosted_distro/nvidia.md b/docs/source/distributions/remote_hosted_distro/nvidia.md index 4028ed384..e4c3a155f 100644 --- a/docs/source/distributions/remote_hosted_distro/nvidia.md +++ b/docs/source/distributions/remote_hosted_distro/nvidia.md @@ -8,11 +8,11 @@ The `llamastack/distribution-nvidia` distribution consists of the following prov | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::nvidia` | -| memory | `inline::faiss` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime`, `remote::model-context-protocol` | +| vector_io | `inline::faiss` | ### Environment Variables diff --git a/docs/source/distributions/self_hosted_distro/bedrock.md b/docs/source/distributions/self_hosted_distro/bedrock.md index dd4e51264..a66325560 100644 --- a/docs/source/distributions/self_hosted_distro/bedrock.md +++ b/docs/source/distributions/self_hosted_distro/bedrock.md @@ -15,11 +15,11 @@ The `llamastack/distribution-bedrock` distribution consists of the following pro | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::bedrock` | -| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | | safety | `remote::bedrock` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime`, `remote::model-context-protocol` | +| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | diff --git a/docs/source/distributions/self_hosted_distro/cerebras.md b/docs/source/distributions/self_hosted_distro/cerebras.md index 22e4125bd..211082b7a 100644 --- a/docs/source/distributions/self_hosted_distro/cerebras.md +++ b/docs/source/distributions/self_hosted_distro/cerebras.md @@ -8,11 +8,11 @@ The `llamastack/distribution-cerebras` distribution consists of the following pr | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::cerebras` | -| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime` | +| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | ### Environment Variables diff --git a/docs/source/distributions/self_hosted_distro/fireworks.md b/docs/source/distributions/self_hosted_distro/fireworks.md index 7ed174984..39043b1c1 100644 --- a/docs/source/distributions/self_hosted_distro/fireworks.md +++ b/docs/source/distributions/self_hosted_distro/fireworks.md @@ -18,11 +18,11 @@ The `llamastack/distribution-fireworks` distribution consists of the following p | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::fireworks` | -| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime`, `remote::model-context-protocol` | +| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | ### Environment Variables diff --git a/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md b/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md index 269354e98..8475aab3a 100644 --- a/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md +++ b/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md @@ -18,11 +18,11 @@ The `llamastack/distribution-meta-reference-gpu` distribution consists of the fo | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `inline::meta-reference` | -| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime`, `remote::model-context-protocol` | +| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | Note that you need access to nvidia GPUs to run this distribution. This distribution is not compatible with CPU-only machines or machines with AMD GPUs. diff --git a/docs/source/distributions/self_hosted_distro/meta-reference-quantized-gpu.md b/docs/source/distributions/self_hosted_distro/meta-reference-quantized-gpu.md index 937dbbdbd..6f1adb5a9 100644 --- a/docs/source/distributions/self_hosted_distro/meta-reference-quantized-gpu.md +++ b/docs/source/distributions/self_hosted_distro/meta-reference-quantized-gpu.md @@ -18,11 +18,11 @@ The `llamastack/distribution-meta-reference-quantized-gpu` distribution consists | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `inline::meta-reference-quantized` | -| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime`, `remote::model-context-protocol` | +| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | The only difference vs. the `meta-reference-gpu` distribution is that it has support for more efficient inference -- with fp8, int4 quantization, etc. diff --git a/docs/source/distributions/self_hosted_distro/ollama.md b/docs/source/distributions/self_hosted_distro/ollama.md index e8e5dd397..f5ba31feb 100644 --- a/docs/source/distributions/self_hosted_distro/ollama.md +++ b/docs/source/distributions/self_hosted_distro/ollama.md @@ -18,11 +18,11 @@ The `llamastack/distribution-ollama` distribution consists of the following prov | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::ollama` | -| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime` | +| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | You should use this distribution if you have a regular desktop machine without very powerful GPUs. Of course, if you have powerful GPUs, you can still continue using this distribution since Ollama supports GPU acceleration.### Environment Variables diff --git a/docs/source/distributions/self_hosted_distro/remote-vllm.md b/docs/source/distributions/self_hosted_distro/remote-vllm.md index 2bb5329b9..c2b3544d3 100644 --- a/docs/source/distributions/self_hosted_distro/remote-vllm.md +++ b/docs/source/distributions/self_hosted_distro/remote-vllm.md @@ -17,11 +17,11 @@ The `llamastack/distribution-remote-vllm` distribution consists of the following | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::vllm` | -| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime`, `remote::model-context-protocol` | +| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | You can use this distribution if you have GPUs and want to run an independent vLLM server container for running inference. diff --git a/docs/source/distributions/self_hosted_distro/tgi.md b/docs/source/distributions/self_hosted_distro/tgi.md index 0fd6a693c..c21a6a586 100644 --- a/docs/source/distributions/self_hosted_distro/tgi.md +++ b/docs/source/distributions/self_hosted_distro/tgi.md @@ -19,11 +19,11 @@ The `llamastack/distribution-tgi` distribution consists of the following provide | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::tgi` | -| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime`, `remote::model-context-protocol` | +| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | You can use this distribution if you have GPUs and want to run an independent TGI server container for running inference. diff --git a/docs/source/distributions/self_hosted_distro/together.md b/docs/source/distributions/self_hosted_distro/together.md index e990e273f..65a711522 100644 --- a/docs/source/distributions/self_hosted_distro/together.md +++ b/docs/source/distributions/self_hosted_distro/together.md @@ -18,11 +18,11 @@ The `llamastack/distribution-together` distribution consists of the following pr | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::together` | -| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime`, `remote::model-context-protocol` | +| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | ### Environment Variables diff --git a/llama_stack/apis/agents/agents.py b/llama_stack/apis/agents/agents.py index 20cb8f828..c19f28054 100644 --- a/llama_stack/apis/agents/agents.py +++ b/llama_stack/apis/agents/agents.py @@ -88,7 +88,7 @@ class MemoryRetrievalStep(StepCommon): step_type: Literal[StepType.memory_retrieval.value] = ( StepType.memory_retrieval.value ) - memory_bank_ids: List[str] + vector_db_ids: str inserted_context: InterleavedContent diff --git a/llama_stack/apis/agents/event_logger.py b/llama_stack/apis/agents/event_logger.py index 9e2f14805..ddb2a7cf4 100644 --- a/llama_stack/apis/agents/event_logger.py +++ b/llama_stack/apis/agents/event_logger.py @@ -208,7 +208,7 @@ class EventLogger: ): details = event.payload.step_details inserted_context = interleaved_content_as_str(details.inserted_context) - content = f"fetched {len(inserted_context)} bytes from {details.memory_bank_ids}" + content = f"fetched {len(inserted_context)} bytes from {details.vector_db_ids}" yield ( event, diff --git a/llama_stack/apis/resource.py b/llama_stack/apis/resource.py index dfe3ddb24..d0ce72644 100644 --- a/llama_stack/apis/resource.py +++ b/llama_stack/apis/resource.py @@ -37,5 +37,5 @@ class Resource(BaseModel): provider_id: str = Field(description="ID of the provider that owns this resource") type: ResourceType = Field( - description="Type of resource (e.g. 'model', 'shield', 'memory_bank', etc.)" + description="Type of resource (e.g. 'model', 'shield', 'vector_db', etc.)" ) diff --git a/llama_stack/distribution/store/tests/test_registry.py b/llama_stack/distribution/store/tests/test_registry.py index 9c5b72f93..78d59a088 100644 --- a/llama_stack/distribution/store/tests/test_registry.py +++ b/llama_stack/distribution/store/tests/test_registry.py @@ -9,7 +9,7 @@ import os import pytest import pytest_asyncio from llama_stack.apis.inference import Model -from llama_stack.apis.memory_banks import VectorMemoryBank +from llama_stack.apis.vector_dbs import VectorDB from llama_stack.distribution.store.registry import ( CachedDiskDistributionRegistry, @@ -42,13 +42,12 @@ async def cached_registry(config): @pytest.fixture -def sample_bank(): - return VectorMemoryBank( - identifier="test_bank", +def sample_vector_db(): + return VectorDB( + identifier="test_vector_db", embedding_model="all-MiniLM-L6-v2", - chunk_size_in_tokens=512, - overlap_size_in_tokens=64, - provider_resource_id="test_bank", + embedding_dimension=384, + provider_resource_id="test_vector_db", provider_id="test-provider", ) @@ -70,19 +69,17 @@ async def test_registry_initialization(registry): @pytest.mark.asyncio -async def test_basic_registration(registry, sample_bank, sample_model): - print(f"Registering {sample_bank}") - await registry.register(sample_bank) +async def test_basic_registration(registry, sample_vector_db, sample_model): + print(f"Registering {sample_vector_db}") + await registry.register(sample_vector_db) print(f"Registering {sample_model}") await registry.register(sample_model) - print("Getting bank") - result_bank = await registry.get("memory_bank", "test_bank") - assert result_bank is not None - assert result_bank.identifier == sample_bank.identifier - assert result_bank.embedding_model == sample_bank.embedding_model - assert result_bank.chunk_size_in_tokens == sample_bank.chunk_size_in_tokens - assert result_bank.overlap_size_in_tokens == sample_bank.overlap_size_in_tokens - assert result_bank.provider_id == sample_bank.provider_id + print("Getting vector_db") + result_vector_db = await registry.get("vector_db", "test_vector_db") + assert result_vector_db is not None + assert result_vector_db.identifier == sample_vector_db.identifier + assert result_vector_db.embedding_model == sample_vector_db.embedding_model + assert result_vector_db.provider_id == sample_vector_db.provider_id result_model = await registry.get("model", "test_model") assert result_model is not None @@ -91,24 +88,23 @@ async def test_basic_registration(registry, sample_bank, sample_model): @pytest.mark.asyncio -async def test_cached_registry_initialization(config, sample_bank, sample_model): +async def test_cached_registry_initialization(config, sample_vector_db, sample_model): # First populate the disk registry disk_registry = DiskDistributionRegistry(await kvstore_impl(config)) await disk_registry.initialize() - await disk_registry.register(sample_bank) + await disk_registry.register(sample_vector_db) await disk_registry.register(sample_model) # Test cached version loads from disk cached_registry = CachedDiskDistributionRegistry(await kvstore_impl(config)) await cached_registry.initialize() - result_bank = await cached_registry.get("memory_bank", "test_bank") - assert result_bank is not None - assert result_bank.identifier == sample_bank.identifier - assert result_bank.embedding_model == sample_bank.embedding_model - assert result_bank.chunk_size_in_tokens == sample_bank.chunk_size_in_tokens - assert result_bank.overlap_size_in_tokens == sample_bank.overlap_size_in_tokens - assert result_bank.provider_id == sample_bank.provider_id + result_vector_db = await cached_registry.get("vector_db", "test_vector_db") + assert result_vector_db is not None + assert result_vector_db.identifier == sample_vector_db.identifier + assert result_vector_db.embedding_model == sample_vector_db.embedding_model + assert result_vector_db.embedding_dimension == sample_vector_db.embedding_dimension + assert result_vector_db.provider_id == sample_vector_db.provider_id @pytest.mark.asyncio @@ -116,29 +112,28 @@ async def test_cached_registry_updates(config): cached_registry = CachedDiskDistributionRegistry(await kvstore_impl(config)) await cached_registry.initialize() - new_bank = VectorMemoryBank( - identifier="test_bank_2", + new_vector_db = VectorDB( + identifier="test_vector_db_2", embedding_model="all-MiniLM-L6-v2", - chunk_size_in_tokens=256, - overlap_size_in_tokens=32, - provider_resource_id="test_bank_2", + embedding_dimension=384, + provider_resource_id="test_vector_db_2", provider_id="baz", ) - await cached_registry.register(new_bank) + await cached_registry.register(new_vector_db) # Verify in cache - result_bank = await cached_registry.get("memory_bank", "test_bank_2") - assert result_bank is not None - assert result_bank.identifier == new_bank.identifier - assert result_bank.provider_id == new_bank.provider_id + result_vector_db = await cached_registry.get("vector_db", "test_vector_db_2") + assert result_vector_db is not None + assert result_vector_db.identifier == new_vector_db.identifier + assert result_vector_db.provider_id == new_vector_db.provider_id # Verify persisted to disk new_registry = DiskDistributionRegistry(await kvstore_impl(config)) await new_registry.initialize() - result_bank = await new_registry.get("memory_bank", "test_bank_2") - assert result_bank is not None - assert result_bank.identifier == new_bank.identifier - assert result_bank.provider_id == new_bank.provider_id + result_vector_db = await new_registry.get("vector_db", "test_vector_db_2") + assert result_vector_db is not None + assert result_vector_db.identifier == new_vector_db.identifier + assert result_vector_db.provider_id == new_vector_db.provider_id @pytest.mark.asyncio @@ -146,30 +141,28 @@ async def test_duplicate_provider_registration(config): cached_registry = CachedDiskDistributionRegistry(await kvstore_impl(config)) await cached_registry.initialize() - original_bank = VectorMemoryBank( - identifier="test_bank_2", + original_vector_db = VectorDB( + identifier="test_vector_db_2", embedding_model="all-MiniLM-L6-v2", - chunk_size_in_tokens=256, - overlap_size_in_tokens=32, - provider_resource_id="test_bank_2", + embedding_dimension=384, + provider_resource_id="test_vector_db_2", provider_id="baz", ) - await cached_registry.register(original_bank) + await cached_registry.register(original_vector_db) - duplicate_bank = VectorMemoryBank( - identifier="test_bank_2", + duplicate_vector_db = VectorDB( + identifier="test_vector_db_2", embedding_model="different-model", - chunk_size_in_tokens=128, - overlap_size_in_tokens=16, - provider_resource_id="test_bank_2", + embedding_dimension=384, + provider_resource_id="test_vector_db_2", provider_id="baz", # Same provider_id ) - await cached_registry.register(duplicate_bank) + await cached_registry.register(duplicate_vector_db) - result = await cached_registry.get("memory_bank", "test_bank_2") + result = await cached_registry.get("vector_db", "test_vector_db_2") assert result is not None assert ( - result.embedding_model == original_bank.embedding_model + result.embedding_model == original_vector_db.embedding_model ) # Original values preserved @@ -179,36 +172,35 @@ async def test_get_all_objects(config): await cached_registry.initialize() # Create multiple test banks - test_banks = [ - VectorMemoryBank( - identifier=f"test_bank_{i}", + test_vector_dbs = [ + VectorDB( + identifier=f"test_vector_db_{i}", embedding_model="all-MiniLM-L6-v2", - chunk_size_in_tokens=256, - overlap_size_in_tokens=32, - provider_resource_id=f"test_bank_{i}", + embedding_dimension=384, + provider_resource_id=f"test_vector_db_{i}", provider_id=f"provider_{i}", ) for i in range(3) ] - # Register all banks - for bank in test_banks: - await cached_registry.register(bank) + # Register all vector_dbs + for vector_db in test_vector_dbs: + await cached_registry.register(vector_db) # Test get_all retrieval all_results = await cached_registry.get_all() assert len(all_results) == 3 - # Verify each bank was stored correctly - for original_bank in test_banks: - matching_banks = [ - b for b in all_results if b.identifier == original_bank.identifier + # Verify each vector_db was stored correctly + for original_vector_db in test_vector_dbs: + matching_vector_dbs = [ + v for v in all_results if v.identifier == original_vector_db.identifier ] - assert len(matching_banks) == 1 - stored_bank = matching_banks[0] - assert stored_bank.embedding_model == original_bank.embedding_model - assert stored_bank.provider_id == original_bank.provider_id - assert stored_bank.chunk_size_in_tokens == original_bank.chunk_size_in_tokens + assert len(matching_vector_dbs) == 1 + stored_vector_db = matching_vector_dbs[0] + assert stored_vector_db.embedding_model == original_vector_db.embedding_model + assert stored_vector_db.provider_id == original_vector_db.provider_id assert ( - stored_bank.overlap_size_in_tokens == original_bank.overlap_size_in_tokens + stored_vector_db.embedding_dimension + == original_vector_db.embedding_dimension ) diff --git a/llama_stack/distribution/ui/page/distribution/memory_banks.py b/llama_stack/distribution/ui/page/distribution/memory_banks.py deleted file mode 100644 index f28010bf2..000000000 --- a/llama_stack/distribution/ui/page/distribution/memory_banks.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import streamlit as st -from modules.api import llama_stack_api - - -def memory_banks(): - st.header("Memory Banks") - memory_banks_info = { - m.identifier: m.to_dict() for m in llama_stack_api.client.memory_banks.list() - } - - if len(memory_banks_info) > 0: - selected_memory_bank = st.selectbox( - "Select a memory bank", list(memory_banks_info.keys()) - ) - st.json(memory_banks_info[selected_memory_bank]) - else: - st.info("No memory banks found") diff --git a/llama_stack/distribution/ui/page/distribution/resources.py b/llama_stack/distribution/ui/page/distribution/resources.py index 6b3ea0e3a..38d494570 100644 --- a/llama_stack/distribution/ui/page/distribution/resources.py +++ b/llama_stack/distribution/ui/page/distribution/resources.py @@ -6,10 +6,10 @@ from page.distribution.datasets import datasets from page.distribution.eval_tasks import eval_tasks -from page.distribution.memory_banks import memory_banks from page.distribution.models import models from page.distribution.scoring_functions import scoring_functions from page.distribution.shields import shields +from page.distribution.vector_dbs import vector_dbs from streamlit_option_menu import option_menu @@ -17,7 +17,7 @@ from streamlit_option_menu import option_menu def resources_page(): options = [ "Models", - "Memory Banks", + "Vector Databases", "Shields", "Scoring Functions", "Datasets", @@ -37,8 +37,8 @@ def resources_page(): ) if selected_resource == "Eval Tasks": eval_tasks() - elif selected_resource == "Memory Banks": - memory_banks() + elif selected_resource == "Vector Databases": + vector_dbs() elif selected_resource == "Datasets": datasets() elif selected_resource == "Models": diff --git a/llama_stack/distribution/ui/page/distribution/vector_dbs.py b/llama_stack/distribution/ui/page/distribution/vector_dbs.py new file mode 100644 index 000000000..9afa6de1f --- /dev/null +++ b/llama_stack/distribution/ui/page/distribution/vector_dbs.py @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +import streamlit as st +from modules.api import llama_stack_api + + +def vector_dbs(): + st.header("Vector Databases") + vector_dbs_info = { + v.identifier: v.to_dict() for v in llama_stack_api.client.vector_dbs.list() + } + + if len(vector_dbs_info) > 0: + selected_vector_db = st.selectbox( + "Select a vector database", list(vector_dbs_info.keys()) + ) + st.json(vector_dbs_info[selected_vector_db]) + else: + st.info("No vector databases found") diff --git a/llama_stack/distribution/ui/page/playground/rag.py b/llama_stack/distribution/ui/page/playground/rag.py index 11b05718d..465e11560 100644 --- a/llama_stack/distribution/ui/page/playground/rag.py +++ b/llama_stack/distribution/ui/page/playground/rag.py @@ -29,12 +29,12 @@ def rag_chat_page(): if uploaded_files: st.success(f"Successfully uploaded {len(uploaded_files)} files") # Add memory bank name input field - memory_bank_name = st.text_input( - "Memory Bank Name", - value="rag_bank", - help="Enter a unique identifier for this memory bank", + vector_db_name = st.text_input( + "Vector Database Name", + value="rag_vector_db", + help="Enter a unique identifier for this vector database", ) - if st.button("Create Memory Bank"): + if st.button("Create Vector Database"): documents = [ Document( document_id=uploaded_file.name, @@ -44,37 +44,33 @@ def rag_chat_page(): ] providers = llama_stack_api.client.providers.list() - memory_provider = None + vector_io_provider = None for x in providers: - if x.api == "memory": - memory_provider = x.provider_id + if x.api == "vector_io": + vector_io_provider = x.provider_id - llama_stack_api.client.memory_banks.register( - memory_bank_id=memory_bank_name, # Use the user-provided name - params={ - "memory_bank_type": "vector", - "embedding_model": "all-MiniLM-L6-v2", - "chunk_size_in_tokens": 512, - "overlap_size_in_tokens": 64, - }, - provider_id=memory_provider, + llama_stack_api.client.vector_dbs.register( + vector_db_id=vector_db_name, # Use the user-provided name + embedding_dimension=384, + embedding_model="all-MiniLM-L6-v2", + provider_id=vector_io_provider, ) - # insert documents using the custom bank name - llama_stack_api.client.memory.insert( - bank_id=memory_bank_name, # Use the user-provided name + # insert documents using the custom vector db name + llama_stack_api.client.tool_runtime.rag_tool.insert( + vector_db_id=vector_db_name, # Use the user-provided name documents=documents, ) - st.success("Memory bank created successfully!") + st.success("Vector database created successfully!") st.subheader("Configure Agent") # select memory banks - memory_banks = llama_stack_api.client.memory_banks.list() - memory_banks = [bank.identifier for bank in memory_banks] - selected_memory_banks = st.multiselect( - "Select Memory Banks", - memory_banks, + vector_dbs = llama_stack_api.client.vector_dbs.list() + vector_dbs = [vector_db.identifier for vector_db in vector_dbs] + selected_vector_dbs = st.multiselect( + "Select Vector Databases", + vector_dbs, ) available_models = llama_stack_api.client.models.list() @@ -141,14 +137,14 @@ def rag_chat_page(): dict( name="builtin::memory", args={ - "memory_bank_ids": [bank_id for bank_id in selected_memory_banks], + "vector_db_ids": [ + vector_db_id for vector_db_id in selected_vector_dbs + ], }, ) ], tool_choice="auto", tool_prompt_format="json", - input_shields=[], - output_shields=[], enable_session_persistence=False, ) diff --git a/llama_stack/providers/inline/vector_io/chroma/__init__.py b/llama_stack/providers/inline/vector_io/chroma/__init__.py index 80620c780..68e28da63 100644 --- a/llama_stack/providers/inline/vector_io/chroma/__init__.py +++ b/llama_stack/providers/inline/vector_io/chroma/__init__.py @@ -14,8 +14,10 @@ from .config import ChromaInlineImplConfig async def get_provider_impl( config: ChromaInlineImplConfig, deps: Dict[Api, ProviderSpec] ): - from llama_stack.providers.remote.memory.chroma.chroma import ChromaMemoryAdapter + from llama_stack.providers.remote.vector_io.chroma.chroma import ( + ChromaVectorIOAdapter, + ) - impl = ChromaMemoryAdapter(config, deps[Api.inference]) + impl = ChromaVectorIOAdapter(config, deps[Api.inference]) await impl.initialize() return impl diff --git a/llama_stack/providers/remote/vector_io/chroma/__init__.py b/llama_stack/providers/remote/vector_io/chroma/__init__.py index 581d60e75..d66a93ac7 100644 --- a/llama_stack/providers/remote/vector_io/chroma/__init__.py +++ b/llama_stack/providers/remote/vector_io/chroma/__init__.py @@ -14,8 +14,8 @@ from .config import ChromaRemoteImplConfig async def get_adapter_impl( config: ChromaRemoteImplConfig, deps: Dict[Api, ProviderSpec] ): - from .chroma import ChromaMemoryAdapter + from .chroma import ChromaVectorIOAdapter - impl = ChromaMemoryAdapter(config, deps[Api.inference]) + impl = ChromaVectorIOAdapter(config, deps[Api.inference]) await impl.initialize() return impl diff --git a/llama_stack/providers/remote/vector_io/chroma/chroma.py b/llama_stack/providers/remote/vector_io/chroma/chroma.py index e33980be1..724dc3f51 100644 --- a/llama_stack/providers/remote/vector_io/chroma/chroma.py +++ b/llama_stack/providers/remote/vector_io/chroma/chroma.py @@ -86,13 +86,13 @@ class ChromaIndex(EmbeddingIndex): await maybe_await(self.client.delete_collection(self.collection.name)) -class ChromaMemoryAdapter(VectorIO, VectorDBsProtocolPrivate): +class ChromaVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate): def __init__( self, config: Union[ChromaRemoteImplConfig, ChromaInlineImplConfig], inference_api: Api.inference, ) -> None: - log.info(f"Initializing ChromaMemoryAdapter with url: {config}") + log.info(f"Initializing ChromaVectorIOAdapter with url: {config}") self.config = config self.inference_api = inference_api diff --git a/llama_stack/templates/bedrock/bedrock.py b/llama_stack/templates/bedrock/bedrock.py index 668134be8..20f670891 100644 --- a/llama_stack/templates/bedrock/bedrock.py +++ b/llama_stack/templates/bedrock/bedrock.py @@ -10,7 +10,7 @@ from llama_models.sku_list import all_registered_models from llama_stack.apis.models import ModelInput from llama_stack.distribution.datatypes import Provider, ToolGroupInput -from llama_stack.providers.inline.memory.faiss.config import FaissImplConfig +from llama_stack.providers.inline.vector_io.faiss.config import FaissImplConfig from llama_stack.providers.remote.inference.bedrock.bedrock import MODEL_ALIASES from llama_stack.templates.template import DistributionTemplate, RunConfigSettings @@ -18,7 +18,7 @@ from llama_stack.templates.template import DistributionTemplate, RunConfigSettin def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::bedrock"], - "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], "safety": ["remote::bedrock"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], @@ -34,7 +34,7 @@ def get_distribution_template() -> DistributionTemplate: ], } name = "bedrock" - memory_provider = Provider( + vector_io_provider = Provider( provider_id="faiss", provider_type="inline::faiss", config=FaissImplConfig.sample_run_config(f"distributions/{name}"), @@ -78,7 +78,7 @@ def get_distribution_template() -> DistributionTemplate: run_configs={ "run.yaml": RunConfigSettings( provider_overrides={ - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=default_models, default_tool_groups=default_tool_groups, diff --git a/llama_stack/templates/bedrock/build.yaml b/llama_stack/templates/bedrock/build.yaml index 95b8684e3..9ae11e9bb 100644 --- a/llama_stack/templates/bedrock/build.yaml +++ b/llama_stack/templates/bedrock/build.yaml @@ -4,7 +4,7 @@ distribution_spec: providers: inference: - remote::bedrock - memory: + vector_io: - inline::faiss - remote::chromadb - remote::pgvector diff --git a/llama_stack/templates/bedrock/run.yaml b/llama_stack/templates/bedrock/run.yaml index 118723bbc..577263bbf 100644 --- a/llama_stack/templates/bedrock/run.yaml +++ b/llama_stack/templates/bedrock/run.yaml @@ -5,17 +5,17 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: bedrock provider_type: remote::bedrock config: {} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -104,7 +104,7 @@ models: provider_model_id: meta.llama3-1-405b-instruct-v1:0 model_type: llm shields: [] -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/cerebras/build.yaml b/llama_stack/templates/cerebras/build.yaml index 9f187d3c7..6d43ed0ca 100644 --- a/llama_stack/templates/cerebras/build.yaml +++ b/llama_stack/templates/cerebras/build.yaml @@ -6,7 +6,7 @@ distribution_spec: - remote::cerebras safety: - inline::llama-guard - memory: + vector_io: - inline::faiss - remote::chromadb - remote::pgvector diff --git a/llama_stack/templates/cerebras/cerebras.py b/llama_stack/templates/cerebras/cerebras.py index 8f6bd77af..be51e635d 100644 --- a/llama_stack/templates/cerebras/cerebras.py +++ b/llama_stack/templates/cerebras/cerebras.py @@ -13,7 +13,7 @@ from llama_stack.distribution.datatypes import ModelInput, Provider, ToolGroupIn from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, ) -from llama_stack.providers.inline.memory.faiss.config import FaissImplConfig +from llama_stack.providers.inline.vector_io.faiss.config import FaissImplConfig from llama_stack.providers.remote.inference.cerebras import CerebrasImplConfig from llama_stack.providers.remote.inference.cerebras.cerebras import model_aliases from llama_stack.templates.template import DistributionTemplate, RunConfigSettings @@ -23,7 +23,7 @@ def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::cerebras"], "safety": ["inline::llama-guard"], - "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], "agents": ["inline::meta-reference"], "eval": ["inline::meta-reference"], "datasetio": ["remote::huggingface", "inline::localfs"], @@ -68,7 +68,7 @@ def get_distribution_template() -> DistributionTemplate: "embedding_dimension": 384, }, ) - memory_provider = Provider( + vector_io_provider = Provider( provider_id="faiss", provider_type="inline::faiss", config=FaissImplConfig.sample_run_config(f"distributions/{name}"), @@ -100,7 +100,7 @@ def get_distribution_template() -> DistributionTemplate: "run.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider, embedding_provider], - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=default_models + [embedding_model], default_shields=[], diff --git a/llama_stack/templates/cerebras/run.yaml b/llama_stack/templates/cerebras/run.yaml index bfc492bda..0553f0749 100644 --- a/llama_stack/templates/cerebras/run.yaml +++ b/llama_stack/templates/cerebras/run.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: cerebras @@ -24,7 +24,7 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -106,7 +106,7 @@ models: provider_id: sentence-transformers model_type: embedding shields: [] -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/experimental-post-training/run.yaml b/llama_stack/templates/experimental-post-training/run.yaml index 87465137f..14323573c 100644 --- a/llama_stack/templates/experimental-post-training/run.yaml +++ b/llama_stack/templates/experimental-post-training/run.yaml @@ -60,7 +60,7 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -82,7 +82,7 @@ metadata_store: db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/registry.db models: [] shields: [] -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/fireworks/build.yaml b/llama_stack/templates/fireworks/build.yaml index d8e1e27ee..7e19cd5e6 100644 --- a/llama_stack/templates/fireworks/build.yaml +++ b/llama_stack/templates/fireworks/build.yaml @@ -4,7 +4,7 @@ distribution_spec: providers: inference: - remote::fireworks - memory: + vector_io: - inline::faiss - remote::chromadb - remote::pgvector diff --git a/llama_stack/templates/fireworks/fireworks.py b/llama_stack/templates/fireworks/fireworks.py index 14fd392c4..5f1b9e8a0 100644 --- a/llama_stack/templates/fireworks/fireworks.py +++ b/llama_stack/templates/fireworks/fireworks.py @@ -18,7 +18,7 @@ from llama_stack.distribution.datatypes import ( from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, ) -from llama_stack.providers.inline.memory.faiss.config import FaissImplConfig +from llama_stack.providers.inline.vector_io.faiss.config import FaissImplConfig from llama_stack.providers.remote.inference.fireworks import FireworksImplConfig from llama_stack.providers.remote.inference.fireworks.fireworks import MODEL_ALIASES from llama_stack.templates.template import DistributionTemplate, RunConfigSettings @@ -27,7 +27,7 @@ from llama_stack.templates.template import DistributionTemplate, RunConfigSettin def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::fireworks"], - "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], "safety": ["inline::llama-guard"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], @@ -55,7 +55,7 @@ def get_distribution_template() -> DistributionTemplate: provider_type="inline::sentence-transformers", config=SentenceTransformersInferenceConfig.sample_run_config(), ) - memory_provider = Provider( + vector_io_provider = Provider( provider_id="faiss", provider_type="inline::faiss", config=FaissImplConfig.sample_run_config(f"distributions/{name}"), @@ -107,7 +107,7 @@ def get_distribution_template() -> DistributionTemplate: "run.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider, embedding_provider], - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=default_models + [embedding_model], default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")], @@ -119,7 +119,7 @@ def get_distribution_template() -> DistributionTemplate: inference_provider, embedding_provider, ], - "memory": [memory_provider], + "vector_io": [vector_io_provider], "safety": [ Provider( provider_id="llama-guard", diff --git a/llama_stack/templates/fireworks/run-with-safety.yaml b/llama_stack/templates/fireworks/run-with-safety.yaml index dd21120ed..659ec5191 100644 --- a/llama_stack/templates/fireworks/run-with-safety.yaml +++ b/llama_stack/templates/fireworks/run-with-safety.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: fireworks @@ -20,7 +20,7 @@ providers: - provider_id: sentence-transformers provider_type: inline::sentence-transformers config: {} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -161,7 +161,7 @@ shields: provider_id: llama-guard-vision - shield_id: CodeScanner provider_id: code-scanner -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/fireworks/run.yaml b/llama_stack/templates/fireworks/run.yaml index 993417b50..9fb61f842 100644 --- a/llama_stack/templates/fireworks/run.yaml +++ b/llama_stack/templates/fireworks/run.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: fireworks @@ -20,7 +20,7 @@ providers: - provider_id: sentence-transformers provider_type: inline::sentence-transformers config: {} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -150,7 +150,7 @@ models: model_type: embedding shields: - shield_id: meta-llama/Llama-Guard-3-8B -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/hf-endpoint/build.yaml b/llama_stack/templates/hf-endpoint/build.yaml index f4fdc4a3d..82a460bd9 100644 --- a/llama_stack/templates/hf-endpoint/build.yaml +++ b/llama_stack/templates/hf-endpoint/build.yaml @@ -4,7 +4,7 @@ distribution_spec: providers: inference: - remote::hf::endpoint - memory: + vector_io: - inline::faiss - remote::chromadb - remote::pgvector diff --git a/llama_stack/templates/hf-endpoint/hf_endpoint.py b/llama_stack/templates/hf-endpoint/hf_endpoint.py index 1a5c23a42..f9bfe85f9 100644 --- a/llama_stack/templates/hf-endpoint/hf_endpoint.py +++ b/llama_stack/templates/hf-endpoint/hf_endpoint.py @@ -14,7 +14,7 @@ from llama_stack.distribution.datatypes import ( from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, ) -from llama_stack.providers.inline.memory.faiss.config import FaissImplConfig +from llama_stack.providers.inline.vector_io.faiss.config import FaissImplConfig from llama_stack.providers.remote.inference.tgi import InferenceEndpointImplConfig from llama_stack.templates.template import DistributionTemplate, RunConfigSettings @@ -22,7 +22,7 @@ from llama_stack.templates.template import DistributionTemplate, RunConfigSettin def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::hf::endpoint"], - "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], "safety": ["inline::llama-guard"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], @@ -48,7 +48,7 @@ def get_distribution_template() -> DistributionTemplate: provider_type="inline::sentence-transformers", config=SentenceTransformersInferenceConfig.sample_run_config(), ) - memory_provider = Provider( + vector_io_provider = Provider( provider_id="faiss", provider_type="inline::faiss", config=FaissImplConfig.sample_run_config(f"distributions/{name}"), @@ -97,7 +97,7 @@ def get_distribution_template() -> DistributionTemplate: "run.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider, embedding_provider], - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, @@ -115,7 +115,7 @@ def get_distribution_template() -> DistributionTemplate: ), ), ], - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=[ inference_model, diff --git a/llama_stack/templates/hf-endpoint/run-with-safety.yaml b/llama_stack/templates/hf-endpoint/run-with-safety.yaml index 537e4024f..dfa094fe6 100644 --- a/llama_stack/templates/hf-endpoint/run-with-safety.yaml +++ b/llama_stack/templates/hf-endpoint/run-with-safety.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: hf-endpoint @@ -25,7 +25,7 @@ providers: config: endpoint_name: ${env.SAFETY_INFERENCE_ENDPOINT_NAME} api_token: ${env.HF_API_TOKEN} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -113,7 +113,7 @@ models: model_type: embedding shields: - shield_id: ${env.SAFETY_MODEL} -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/hf-endpoint/run.yaml b/llama_stack/templates/hf-endpoint/run.yaml index b31f28434..fb5d7fa31 100644 --- a/llama_stack/templates/hf-endpoint/run.yaml +++ b/llama_stack/templates/hf-endpoint/run.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: hf-endpoint @@ -20,7 +20,7 @@ providers: - provider_id: sentence-transformers provider_type: inline::sentence-transformers config: {} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -103,7 +103,7 @@ models: provider_id: sentence-transformers model_type: embedding shields: [] -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/hf-serverless/build.yaml b/llama_stack/templates/hf-serverless/build.yaml index d075a7449..0eb4e0509 100644 --- a/llama_stack/templates/hf-serverless/build.yaml +++ b/llama_stack/templates/hf-serverless/build.yaml @@ -4,7 +4,7 @@ distribution_spec: providers: inference: - remote::hf::serverless - memory: + vector_io: - inline::faiss - remote::chromadb - remote::pgvector diff --git a/llama_stack/templates/hf-serverless/hf_serverless.py b/llama_stack/templates/hf-serverless/hf_serverless.py index 0292f13e2..4f3c29404 100644 --- a/llama_stack/templates/hf-serverless/hf_serverless.py +++ b/llama_stack/templates/hf-serverless/hf_serverless.py @@ -14,7 +14,7 @@ from llama_stack.distribution.datatypes import ( from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, ) -from llama_stack.providers.inline.memory.faiss.config import FaissImplConfig +from llama_stack.providers.inline.vector_io.faiss.config import FaissImplConfig from llama_stack.providers.remote.inference.tgi import InferenceAPIImplConfig from llama_stack.templates.template import DistributionTemplate, RunConfigSettings @@ -22,7 +22,7 @@ from llama_stack.templates.template import DistributionTemplate, RunConfigSettin def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::hf::serverless"], - "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], "safety": ["inline::llama-guard"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], @@ -49,7 +49,7 @@ def get_distribution_template() -> DistributionTemplate: provider_type="inline::sentence-transformers", config=SentenceTransformersInferenceConfig.sample_run_config(), ) - memory_provider = Provider( + vector_io_provider = Provider( provider_id="faiss", provider_type="inline::faiss", config=FaissImplConfig.sample_run_config(f"distributions/{name}"), @@ -98,7 +98,7 @@ def get_distribution_template() -> DistributionTemplate: "run.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider, embedding_provider], - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, @@ -116,7 +116,7 @@ def get_distribution_template() -> DistributionTemplate: ), ), ], - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=[ inference_model, diff --git a/llama_stack/templates/hf-serverless/run-with-safety.yaml b/llama_stack/templates/hf-serverless/run-with-safety.yaml index 484b2d0bd..0575efaef 100644 --- a/llama_stack/templates/hf-serverless/run-with-safety.yaml +++ b/llama_stack/templates/hf-serverless/run-with-safety.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: hf-serverless @@ -25,7 +25,7 @@ providers: config: huggingface_repo: ${env.SAFETY_MODEL} api_token: ${env.HF_API_TOKEN} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -113,7 +113,7 @@ models: model_type: embedding shields: - shield_id: ${env.SAFETY_MODEL} -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/hf-serverless/run.yaml b/llama_stack/templates/hf-serverless/run.yaml index a75baf1f9..b87edd744 100644 --- a/llama_stack/templates/hf-serverless/run.yaml +++ b/llama_stack/templates/hf-serverless/run.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: hf-serverless @@ -20,7 +20,7 @@ providers: - provider_id: sentence-transformers provider_type: inline::sentence-transformers config: {} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -103,7 +103,7 @@ models: provider_id: sentence-transformers model_type: embedding shields: [] -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/meta-reference-gpu/build.yaml b/llama_stack/templates/meta-reference-gpu/build.yaml index a75d3604b..f5371f0d6 100644 --- a/llama_stack/templates/meta-reference-gpu/build.yaml +++ b/llama_stack/templates/meta-reference-gpu/build.yaml @@ -4,7 +4,7 @@ distribution_spec: providers: inference: - inline::meta-reference - memory: + vector_io: - inline::faiss - remote::chromadb - remote::pgvector diff --git a/llama_stack/templates/meta-reference-gpu/meta_reference.py b/llama_stack/templates/meta-reference-gpu/meta_reference.py index 584d38256..dae4f0218 100644 --- a/llama_stack/templates/meta-reference-gpu/meta_reference.py +++ b/llama_stack/templates/meta-reference-gpu/meta_reference.py @@ -19,14 +19,14 @@ from llama_stack.providers.inline.inference.meta_reference import ( from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, ) -from llama_stack.providers.inline.memory.faiss.config import FaissImplConfig +from llama_stack.providers.inline.vector_io.faiss.config import FaissImplConfig from llama_stack.templates.template import DistributionTemplate, RunConfigSettings def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["inline::meta-reference"], - "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], "safety": ["inline::llama-guard"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], @@ -55,7 +55,7 @@ def get_distribution_template() -> DistributionTemplate: provider_type="inline::sentence-transformers", config=SentenceTransformersInferenceConfig.sample_run_config(), ) - memory_provider = Provider( + vector_io_provider = Provider( provider_id="faiss", provider_type="inline::faiss", config=FaissImplConfig.sample_run_config(f"distributions/{name}"), @@ -103,7 +103,7 @@ def get_distribution_template() -> DistributionTemplate: "run.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider, embedding_provider], - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, @@ -122,7 +122,7 @@ def get_distribution_template() -> DistributionTemplate: ), ), ], - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=[ inference_model, diff --git a/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml b/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml index 9dbdb6fa5..54ddef155 100644 --- a/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml +++ b/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: meta-reference-inference @@ -27,7 +27,7 @@ providers: model: ${env.SAFETY_MODEL} max_seq_len: 4096 checkpoint_dir: ${env.SAFETY_CHECKPOINT_DIR:null} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -115,7 +115,7 @@ models: model_type: embedding shields: - shield_id: ${env.SAFETY_MODEL} -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/meta-reference-gpu/run.yaml b/llama_stack/templates/meta-reference-gpu/run.yaml index 6465215f0..cde581d19 100644 --- a/llama_stack/templates/meta-reference-gpu/run.yaml +++ b/llama_stack/templates/meta-reference-gpu/run.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: meta-reference-inference @@ -21,7 +21,7 @@ providers: - provider_id: sentence-transformers provider_type: inline::sentence-transformers config: {} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -104,7 +104,7 @@ models: provider_id: sentence-transformers model_type: embedding shields: [] -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/meta-reference-quantized-gpu/build.yaml b/llama_stack/templates/meta-reference-quantized-gpu/build.yaml index 4c3e2f492..aa23ad313 100644 --- a/llama_stack/templates/meta-reference-quantized-gpu/build.yaml +++ b/llama_stack/templates/meta-reference-quantized-gpu/build.yaml @@ -4,7 +4,7 @@ distribution_spec: providers: inference: - inline::meta-reference-quantized - memory: + vector_io: - inline::faiss - remote::chromadb - remote::pgvector diff --git a/llama_stack/templates/meta-reference-quantized-gpu/meta_reference.py b/llama_stack/templates/meta-reference-quantized-gpu/meta_reference.py index 56293f42c..4e9cbf1fe 100644 --- a/llama_stack/templates/meta-reference-quantized-gpu/meta_reference.py +++ b/llama_stack/templates/meta-reference-quantized-gpu/meta_reference.py @@ -14,14 +14,14 @@ from llama_stack.providers.inline.inference.meta_reference import ( from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, ) -from llama_stack.providers.inline.memory.faiss.config import FaissImplConfig +from llama_stack.providers.inline.vector_io.faiss.config import FaissImplConfig from llama_stack.templates.template import DistributionTemplate, RunConfigSettings def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["inline::meta-reference-quantized"], - "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], "safety": ["inline::llama-guard"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], @@ -64,7 +64,7 @@ def get_distribution_template() -> DistributionTemplate: provider_type="inline::sentence-transformers", config=SentenceTransformersInferenceConfig.sample_run_config(), ) - memory_provider = Provider( + vector_io_provider = Provider( provider_id="faiss", provider_type="inline::faiss", config=FaissImplConfig.sample_run_config(f"distributions/{name}"), @@ -93,7 +93,7 @@ def get_distribution_template() -> DistributionTemplate: "run.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider, embedding_provider], - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, diff --git a/llama_stack/templates/meta-reference-quantized-gpu/run.yaml b/llama_stack/templates/meta-reference-quantized-gpu/run.yaml index 059034741..cc5793f8f 100644 --- a/llama_stack/templates/meta-reference-quantized-gpu/run.yaml +++ b/llama_stack/templates/meta-reference-quantized-gpu/run.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: meta-reference-inference @@ -23,7 +23,7 @@ providers: - provider_id: sentence-transformers provider_type: inline::sentence-transformers config: {} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -106,7 +106,7 @@ models: provider_id: sentence-transformers model_type: embedding shields: [] -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/nvidia/build.yaml b/llama_stack/templates/nvidia/build.yaml index 7bd2a3865..d6a510e2e 100644 --- a/llama_stack/templates/nvidia/build.yaml +++ b/llama_stack/templates/nvidia/build.yaml @@ -4,7 +4,7 @@ distribution_spec: providers: inference: - remote::nvidia - memory: + vector_io: - inline::faiss safety: - inline::llama-guard diff --git a/llama_stack/templates/nvidia/nvidia.py b/llama_stack/templates/nvidia/nvidia.py index e72fe359f..5693ba12d 100644 --- a/llama_stack/templates/nvidia/nvidia.py +++ b/llama_stack/templates/nvidia/nvidia.py @@ -17,7 +17,7 @@ from llama_stack.templates.template import DistributionTemplate, RunConfigSettin def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::nvidia"], - "memory": ["inline::faiss"], + "vector_io": ["inline::faiss"], "safety": ["inline::llama-guard"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], diff --git a/llama_stack/templates/nvidia/run.yaml b/llama_stack/templates/nvidia/run.yaml index 07c901371..317aa1031 100644 --- a/llama_stack/templates/nvidia/run.yaml +++ b/llama_stack/templates/nvidia/run.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: nvidia @@ -17,7 +17,7 @@ providers: config: url: https://integrate.api.nvidia.com api_key: ${env.NVIDIA_API_KEY} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -136,7 +136,7 @@ models: provider_model_id: meta/llama-3.2-90b-vision-instruct model_type: llm shields: [] -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/ollama/build.yaml b/llama_stack/templates/ollama/build.yaml index 5f2e010ee..c3ed88fb8 100644 --- a/llama_stack/templates/ollama/build.yaml +++ b/llama_stack/templates/ollama/build.yaml @@ -4,7 +4,7 @@ distribution_spec: providers: inference: - remote::ollama - memory: + vector_io: - inline::faiss - remote::chromadb - remote::pgvector diff --git a/llama_stack/templates/ollama/ollama.py b/llama_stack/templates/ollama/ollama.py index 2288ea3a6..bdbd1e142 100644 --- a/llama_stack/templates/ollama/ollama.py +++ b/llama_stack/templates/ollama/ollama.py @@ -16,7 +16,7 @@ from llama_stack.distribution.datatypes import ( from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, ) -from llama_stack.providers.inline.memory.faiss.config import FaissImplConfig +from llama_stack.providers.inline.vector_io.faiss.config import FaissImplConfig from llama_stack.providers.remote.inference.ollama import OllamaImplConfig from llama_stack.templates.template import DistributionTemplate, RunConfigSettings @@ -24,7 +24,7 @@ from llama_stack.templates.template import DistributionTemplate, RunConfigSettin def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::ollama"], - "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], "safety": ["inline::llama-guard"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], @@ -49,7 +49,7 @@ def get_distribution_template() -> DistributionTemplate: provider_type="inline::sentence-transformers", config=SentenceTransformersInferenceConfig.sample_run_config(), ) - memory_provider = Provider( + vector_io_provider = Provider( provider_id="faiss", provider_type="inline::faiss", config=FaissImplConfig.sample_run_config(f"distributions/{name}"), @@ -98,7 +98,7 @@ def get_distribution_template() -> DistributionTemplate: "run.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider, embedding_provider], - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, @@ -109,7 +109,7 @@ def get_distribution_template() -> DistributionTemplate: inference_provider, embedding_provider, ], - "memory": [memory_provider], + "vector_io": [vector_io_provider], "safety": [ Provider( provider_id="llama-guard", diff --git a/llama_stack/templates/ollama/run-with-safety.yaml b/llama_stack/templates/ollama/run-with-safety.yaml index a808590c3..afb0b1938 100644 --- a/llama_stack/templates/ollama/run-with-safety.yaml +++ b/llama_stack/templates/ollama/run-with-safety.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: ollama @@ -19,7 +19,7 @@ providers: - provider_id: sentence-transformers provider_type: inline::sentence-transformers config: {} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -110,7 +110,7 @@ shields: provider_id: llama-guard - shield_id: CodeScanner provider_id: code-scanner -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/ollama/run.yaml b/llama_stack/templates/ollama/run.yaml index 2c69296fc..976068670 100644 --- a/llama_stack/templates/ollama/run.yaml +++ b/llama_stack/templates/ollama/run.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: ollama @@ -19,7 +19,7 @@ providers: - provider_id: sentence-transformers provider_type: inline::sentence-transformers config: {} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -99,7 +99,7 @@ models: provider_id: sentence-transformers model_type: embedding shields: [] -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/remote-vllm/build.yaml b/llama_stack/templates/remote-vllm/build.yaml index 6f301914c..409b2ba10 100644 --- a/llama_stack/templates/remote-vllm/build.yaml +++ b/llama_stack/templates/remote-vllm/build.yaml @@ -4,7 +4,7 @@ distribution_spec: providers: inference: - remote::vllm - memory: + vector_io: - inline::faiss - remote::chromadb - remote::pgvector diff --git a/llama_stack/templates/remote-vllm/run-with-safety.yaml b/llama_stack/templates/remote-vllm/run-with-safety.yaml index 5e5bd6af6..e26d0f99f 100644 --- a/llama_stack/templates/remote-vllm/run-with-safety.yaml +++ b/llama_stack/templates/remote-vllm/run-with-safety.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: vllm-inference @@ -27,7 +27,7 @@ providers: - provider_id: sentence-transformers provider_type: inline::sentence-transformers config: {} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -115,7 +115,7 @@ models: model_type: embedding shields: - shield_id: ${env.SAFETY_MODEL} -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/remote-vllm/run.yaml b/llama_stack/templates/remote-vllm/run.yaml index 4eac4dad7..dc54d216d 100644 --- a/llama_stack/templates/remote-vllm/run.yaml +++ b/llama_stack/templates/remote-vllm/run.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: vllm-inference @@ -21,7 +21,7 @@ providers: - provider_id: sentence-transformers provider_type: inline::sentence-transformers config: {} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -104,7 +104,7 @@ models: provider_id: sentence-transformers model_type: embedding shields: [] -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/remote-vllm/vllm.py b/llama_stack/templates/remote-vllm/vllm.py index 296e2b4f5..f91ad24a7 100644 --- a/llama_stack/templates/remote-vllm/vllm.py +++ b/llama_stack/templates/remote-vllm/vllm.py @@ -16,7 +16,7 @@ from llama_stack.distribution.datatypes import ( from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, ) -from llama_stack.providers.inline.memory.faiss.config import FaissImplConfig +from llama_stack.providers.inline.vector_io.faiss.config import FaissImplConfig from llama_stack.providers.remote.inference.vllm import VLLMInferenceAdapterConfig from llama_stack.templates.template import DistributionTemplate, RunConfigSettings @@ -24,7 +24,7 @@ from llama_stack.templates.template import DistributionTemplate, RunConfigSettin def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::vllm"], - "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], "safety": ["inline::llama-guard"], "agents": ["inline::meta-reference"], "eval": ["inline::meta-reference"], @@ -52,7 +52,7 @@ def get_distribution_template() -> DistributionTemplate: provider_type="inline::sentence-transformers", config=SentenceTransformersInferenceConfig.sample_run_config(), ) - memory_provider = Provider( + vector_io_provider = Provider( provider_id="faiss", provider_type="inline::faiss", config=FaissImplConfig.sample_run_config(f"distributions/{name}"), @@ -100,7 +100,7 @@ def get_distribution_template() -> DistributionTemplate: "run.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider, embedding_provider], - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, @@ -118,7 +118,7 @@ def get_distribution_template() -> DistributionTemplate: ), embedding_provider, ], - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=[ inference_model, diff --git a/llama_stack/templates/tgi/build.yaml b/llama_stack/templates/tgi/build.yaml index 4391ddd5d..bc31ef7e7 100644 --- a/llama_stack/templates/tgi/build.yaml +++ b/llama_stack/templates/tgi/build.yaml @@ -4,7 +4,7 @@ distribution_spec: providers: inference: - remote::tgi - memory: + vector_io: - inline::faiss - remote::chromadb - remote::pgvector diff --git a/llama_stack/templates/tgi/run-with-safety.yaml b/llama_stack/templates/tgi/run-with-safety.yaml index 9bd06d650..ea8057137 100644 --- a/llama_stack/templates/tgi/run-with-safety.yaml +++ b/llama_stack/templates/tgi/run-with-safety.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: tgi-inference @@ -20,7 +20,7 @@ providers: provider_type: remote::tgi config: url: ${env.TGI_SAFETY_URL} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -103,7 +103,7 @@ models: model_type: llm shields: - shield_id: ${env.SAFETY_MODEL} -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/tgi/run.yaml b/llama_stack/templates/tgi/run.yaml index 2fc1b52d9..d537d0fce 100644 --- a/llama_stack/templates/tgi/run.yaml +++ b/llama_stack/templates/tgi/run.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: tgi-inference @@ -19,7 +19,7 @@ providers: - provider_id: sentence-transformers provider_type: inline::sentence-transformers config: {} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -102,7 +102,7 @@ models: provider_id: sentence-transformers model_type: embedding shields: [] -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/tgi/tgi.py b/llama_stack/templates/tgi/tgi.py index 8ad9725e3..230fcac2a 100644 --- a/llama_stack/templates/tgi/tgi.py +++ b/llama_stack/templates/tgi/tgi.py @@ -16,7 +16,7 @@ from llama_stack.distribution.datatypes import ( from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, ) -from llama_stack.providers.inline.memory.faiss.config import FaissImplConfig +from llama_stack.providers.inline.vector_io.faiss.config import FaissImplConfig from llama_stack.providers.remote.inference.tgi import TGIImplConfig from llama_stack.templates.template import DistributionTemplate, RunConfigSettings @@ -24,7 +24,7 @@ from llama_stack.templates.template import DistributionTemplate, RunConfigSettin def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::tgi"], - "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], "safety": ["inline::llama-guard"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], @@ -52,7 +52,7 @@ def get_distribution_template() -> DistributionTemplate: provider_type="inline::sentence-transformers", config=SentenceTransformersInferenceConfig.sample_run_config(), ) - memory_provider = Provider( + vector_io_provider = Provider( provider_id="faiss", provider_type="inline::faiss", config=FaissImplConfig.sample_run_config(f"distributions/{name}"), @@ -101,7 +101,7 @@ def get_distribution_template() -> DistributionTemplate: "run.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider, embedding_provider], - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, @@ -118,7 +118,7 @@ def get_distribution_template() -> DistributionTemplate: ), ), ], - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=[ inference_model, diff --git a/llama_stack/templates/together/run-with-safety.yaml b/llama_stack/templates/together/run-with-safety.yaml index c1461d75d..54b918eea 100644 --- a/llama_stack/templates/together/run-with-safety.yaml +++ b/llama_stack/templates/together/run-with-safety.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: together @@ -20,7 +20,7 @@ providers: - provider_id: sentence-transformers provider_type: inline::sentence-transformers config: {} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -156,7 +156,7 @@ shields: provider_id: llama-guard-vision - shield_id: CodeScanner provider_id: code-scanner -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/together/run.yaml b/llama_stack/templates/together/run.yaml index 135b124e4..2c0475796 100644 --- a/llama_stack/templates/together/run.yaml +++ b/llama_stack/templates/together/run.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- vector_io - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: together @@ -145,6 +145,7 @@ models: model_type: embedding shields: - shield_id: meta-llama/Llama-Guard-3-8B +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/together/together.py b/llama_stack/templates/together/together.py index 1e2def3bd..ec64527d2 100644 --- a/llama_stack/templates/together/together.py +++ b/llama_stack/templates/together/together.py @@ -18,7 +18,7 @@ from llama_stack.distribution.datatypes import ( from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, ) -from llama_stack.providers.inline.memory.faiss.config import FaissImplConfig +from llama_stack.providers.inline.vector_io.faiss.config import FaissImplConfig from llama_stack.providers.remote.inference.together import TogetherImplConfig from llama_stack.providers.remote.inference.together.together import MODEL_ALIASES from llama_stack.templates.template import DistributionTemplate, RunConfigSettings @@ -27,7 +27,7 @@ from llama_stack.templates.template import DistributionTemplate, RunConfigSettin def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::together"], - "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], "safety": ["inline::llama-guard"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], @@ -48,7 +48,7 @@ def get_distribution_template() -> DistributionTemplate: provider_type="remote::together", config=TogetherImplConfig.sample_run_config(), ) - memory_provider = Provider( + vector_io_provider = Provider( provider_id="faiss", provider_type="inline::faiss", config=FaissImplConfig.sample_run_config(f"distributions/{name}"), @@ -105,7 +105,7 @@ def get_distribution_template() -> DistributionTemplate: "run.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider, embedding_provider], - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=default_models + [embedding_model], default_tool_groups=default_tool_groups, @@ -117,7 +117,7 @@ def get_distribution_template() -> DistributionTemplate: inference_provider, embedding_provider, ], - "memory": [memory_provider], + "vector_io": [vector_io_provider], "safety": [ Provider( provider_id="llama-guard", diff --git a/llama_stack/templates/vllm-gpu/build.yaml b/llama_stack/templates/vllm-gpu/build.yaml index e8a1693d0..45f543071 100644 --- a/llama_stack/templates/vllm-gpu/build.yaml +++ b/llama_stack/templates/vllm-gpu/build.yaml @@ -4,7 +4,7 @@ distribution_spec: providers: inference: - inline::vllm - memory: + vector_io: - inline::faiss - remote::chromadb - remote::pgvector diff --git a/llama_stack/templates/vllm-gpu/run.yaml b/llama_stack/templates/vllm-gpu/run.yaml index cc0ff047f..2d9ec6a3f 100644 --- a/llama_stack/templates/vllm-gpu/run.yaml +++ b/llama_stack/templates/vllm-gpu/run.yaml @@ -5,11 +5,11 @@ apis: - datasetio - eval - inference -- memory - safety - scoring - telemetry - tool_runtime +- vector_io providers: inference: - provider_id: vllm @@ -23,7 +23,7 @@ providers: - provider_id: sentence-transformers provider_type: inline::sentence-transformers config: {} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: @@ -106,7 +106,7 @@ models: provider_id: sentence-transformers model_type: embedding shields: [] -memory_banks: [] +vector_dbs: [] datasets: [] scoring_fns: [] eval_tasks: [] diff --git a/llama_stack/templates/vllm-gpu/vllm.py b/llama_stack/templates/vllm-gpu/vllm.py index 71b24482d..a8f13ce40 100644 --- a/llama_stack/templates/vllm-gpu/vllm.py +++ b/llama_stack/templates/vllm-gpu/vllm.py @@ -10,7 +10,7 @@ from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, ) from llama_stack.providers.inline.inference.vllm import VLLMConfig -from llama_stack.providers.inline.memory.faiss.config import FaissImplConfig +from llama_stack.providers.inline.vector_io.faiss.config import FaissImplConfig from llama_stack.templates.template import ( DistributionTemplate, RunConfigSettings, @@ -21,7 +21,7 @@ from llama_stack.templates.template import ( def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["inline::vllm"], - "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], "safety": ["inline::llama-guard"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], @@ -43,7 +43,7 @@ def get_distribution_template() -> DistributionTemplate: provider_type="inline::vllm", config=VLLMConfig.sample_run_config(), ) - memory_provider = Provider( + vector_io_provider = Provider( provider_id="faiss", provider_type="inline::faiss", config=FaissImplConfig.sample_run_config(f"distributions/{name}"), @@ -93,7 +93,7 @@ def get_distribution_template() -> DistributionTemplate: "run.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider, embedding_provider], - "memory": [memory_provider], + "vector_io": [vector_io_provider], }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups,