migrate memory banks to Resource and new registration

This commit is contained in:
Dinesh Yeduguru 2024-11-08 15:45:26 -08:00
parent b4416b72fd
commit c82f13bf9e
16 changed files with 178 additions and 104 deletions

View file

@ -271,7 +271,7 @@ class Session(BaseModel):
turns: List[Turn]
started_at: datetime
memory_bank: Optional[MemoryBankDef] = None
memory_bank: Optional[MemoryBank] = None
class AgentConfigCommon(BaseModel):

View file

@ -75,14 +75,16 @@ class MemoryClient(Memory):
async def run_main(host: str, port: int, stream: bool):
banks_client = MemoryBanksClient(f"http://{host}:{port}")
bank = VectorMemoryBankDef(
bank = VectorMemoryBank(
identifier="test_bank",
provider_id="",
embedding_model="all-MiniLM-L6-v2",
chunk_size_in_tokens=512,
overlap_size_in_tokens=64,
)
await banks_client.register_memory_bank(bank)
await banks_client.register_memory_bank(
bank.identifier, bank.memory_bank_type, provider_resource_id=bank.identifier
)
retrieved_bank = await banks_client.get_memory_bank(bank.identifier)
assert retrieved_bank is not None

View file

@ -39,7 +39,7 @@ class QueryDocumentsResponse(BaseModel):
class MemoryBankStore(Protocol):
def get_memory_bank(self, bank_id: str) -> Optional[MemoryBankDef]: ...
def get_memory_bank(self, bank_id: str) -> Optional[MemoryBank]: ...
@runtime_checkable

View file

@ -5,7 +5,6 @@
# the root directory of this source tree.
import asyncio
import json
from typing import Any, Dict, List, Optional
@ -26,13 +25,13 @@ def deserialize_memory_bank_def(
raise ValueError("Memory bank type not specified")
type = j["type"]
if type == MemoryBankType.vector.value:
return VectorMemoryBankDef(**j)
return VectorMemoryBank(**j)
elif type == MemoryBankType.keyvalue.value:
return KeyValueMemoryBankDef(**j)
return KeyValueMemoryBank(**j)
elif type == MemoryBankType.keyword.value:
return KeywordMemoryBankDef(**j)
return KeywordMemoryBank(**j)
elif type == MemoryBankType.graph.value:
return GraphMemoryBankDef(**j)
return GraphMemoryBank(**j)
else:
raise ValueError(f"Unknown memory bank type: {type}")
@ -47,7 +46,7 @@ class MemoryBanksClient(MemoryBanks):
async def shutdown(self) -> None:
pass
async def list_memory_banks(self) -> List[MemoryBankDefWithProvider]:
async def list_memory_banks(self) -> List[MemoryBank]:
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.base_url}/memory_banks/list",
@ -57,13 +56,20 @@ class MemoryBanksClient(MemoryBanks):
return [deserialize_memory_bank_def(x) for x in response.json()]
async def register_memory_bank(
self, memory_bank: MemoryBankDefWithProvider
self,
memory_bank_id: str,
memory_bank_type: MemoryBankType,
provider_resource_id: Optional[str] = None,
provider_id: Optional[str] = None,
) -> None:
async with httpx.AsyncClient() as client:
response = await client.post(
f"{self.base_url}/memory_banks/register",
json={
"memory_bank": json.loads(memory_bank.json()),
"memory_bank_id": memory_bank_id,
"memory_bank_type": memory_bank_type.value,
"provider_resource_id": provider_resource_id,
"provider_id": provider_id,
},
headers={"Content-Type": "application/json"},
)
@ -71,13 +77,13 @@ class MemoryBanksClient(MemoryBanks):
async def get_memory_bank(
self,
identifier: str,
) -> Optional[MemoryBankDefWithProvider]:
memory_bank_id: str,
) -> Optional[MemoryBank]:
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.base_url}/memory_banks/get",
params={
"identifier": identifier,
"memory_bank_id": memory_bank_id,
},
headers={"Content-Type": "application/json"},
)
@ -94,7 +100,7 @@ async def run_main(host: str, port: int, stream: bool):
# register memory bank for the first time
response = await client.register_memory_bank(
VectorMemoryBankDef(
VectorMemoryBank(
identifier="test_bank2",
embedding_model="all-MiniLM-L6-v2",
chunk_size_in_tokens=512,

View file

@ -8,8 +8,10 @@ from enum import Enum
from typing import List, Literal, Optional, Protocol, runtime_checkable, Union
from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel, Field
from typing_extensions import Annotated
from pydantic import BaseModel
from llama_stack.apis.resource import Resource, ResourceType
@json_schema_type
@ -20,59 +22,121 @@ class MemoryBankType(Enum):
graph = "graph"
class CommonDef(BaseModel):
identifier: str
# Hack: move this out later
provider_id: str = ""
@json_schema_type
class MemoryBank(Resource):
type: Literal[ResourceType.memory_bank.value] = ResourceType.memory_bank.value
memory_bank_type: MemoryBankType
@json_schema_type
class VectorMemoryBankDef(CommonDef):
type: Literal[MemoryBankType.vector.value] = MemoryBankType.vector.value
class VectorMemoryBank(MemoryBank):
memory_bank_type: Literal[MemoryBankType.vector.value] = MemoryBankType.vector.value
embedding_model: str
chunk_size_in_tokens: int
overlap_size_in_tokens: Optional[int] = None
@json_schema_type
class KeyValueMemoryBankDef(CommonDef):
type: Literal[MemoryBankType.keyvalue.value] = MemoryBankType.keyvalue.value
class KeyValueMemoryBank(MemoryBank):
memory_bank_type: Literal[MemoryBankType.keyvalue.value] = (
MemoryBankType.keyvalue.value
)
@json_schema_type
class KeywordMemoryBankDef(CommonDef):
type: Literal[MemoryBankType.keyword.value] = MemoryBankType.keyword.value
class KeywordMemoryBank(MemoryBank):
memory_bank_type: Literal[MemoryBankType.keyword.value] = (
MemoryBankType.keyword.value
)
@json_schema_type
class GraphMemoryBankDef(CommonDef):
type: Literal[MemoryBankType.graph.value] = MemoryBankType.graph.value
class GraphMemoryBank(MemoryBank):
memory_bank_type: Literal[MemoryBankType.graph.value] = MemoryBankType.graph.value
MemoryBankDef = Annotated[
Union[
VectorMemoryBankDef,
KeyValueMemoryBankDef,
KeywordMemoryBankDef,
GraphMemoryBankDef,
],
Field(discriminator="type"),
@json_schema_type
class BaseRegistration(BaseModel):
memory_bank_id: str
provider_resource_id: Optional[str] = None
provider_id: Optional[str] = None
@json_schema_type
class VectorRegistration(BaseRegistration):
embedding_model: str
chunk_size_in_tokens: int
overlap_size_in_tokens: Optional[int] = None
@json_schema_type
class KeyValueRegistration(BaseRegistration):
pass
@json_schema_type
class KeywordRegistration(BaseRegistration):
pass
@json_schema_type
class GraphRegistration(BaseRegistration):
pass
RegistrationRequest = Union[
VectorRegistration,
KeyValueRegistration,
KeywordRegistration,
GraphRegistration,
]
MemoryBankDefWithProvider = MemoryBankDef
def registration_request_to_memory_bank(request: RegistrationRequest) -> MemoryBank:
"""Convert registration request to memory bank object"""
if isinstance(request, VectorRegistration):
return VectorMemoryBank(
identifier=request.memory_bank_id,
provider_resource_id=request.provider_resource_id,
provider_id=request.provider_id,
embedding_model=request.embedding_model,
chunk_size_in_tokens=request.chunk_size_in_tokens,
overlap_size_in_tokens=request.overlap_size_in_tokens,
)
elif isinstance(request, KeyValueRegistration):
return KeyValueMemoryBank(
identifier=request.memory_bank_id,
provider_resource_id=request.provider_resource_id,
provider_id=request.provider_id,
memory_bank_type=MemoryBankType.keyvalue,
)
elif isinstance(request, KeywordRegistration):
return KeywordMemoryBank(
identifier=request.memory_bank_id,
provider_resource_id=request.provider_resource_id,
provider_id=request.provider_id,
memory_bank_type=MemoryBankType.keyword,
)
elif isinstance(request, GraphRegistration):
return GraphMemoryBank(
identifier=request.memory_bank_id,
provider_resource_id=request.provider_resource_id,
provider_id=request.provider_id,
memory_bank_type=MemoryBankType.graph,
)
else:
raise ValueError(f"Unknown registration type: {type(request)}")
@runtime_checkable
class MemoryBanks(Protocol):
@webmethod(route="/memory_banks/list", method="GET")
async def list_memory_banks(self) -> List[MemoryBankDefWithProvider]: ...
async def list_memory_banks(self) -> List[MemoryBank]: ...
@webmethod(route="/memory_banks/get", method="GET")
async def get_memory_bank(
self, identifier: str
) -> Optional[MemoryBankDefWithProvider]: ...
async def get_memory_bank(self, memory_bank_id: str) -> Optional[MemoryBank]: ...
@webmethod(route="/memory_banks/register", method="POST")
async def register_memory_bank(
self, memory_bank: MemoryBankDefWithProvider
) -> None: ...
self, request: RegistrationRequest
) -> MemoryBank: ...

View file

@ -33,7 +33,7 @@ RoutingKey = Union[str, List[str]]
RoutableObject = Union[
Model,
Shield,
MemoryBankDef,
MemoryBank,
DatasetDef,
ScoringFnDef,
]
@ -43,7 +43,7 @@ RoutableObjectWithProvider = Annotated[
Union[
Model,
Shield,
MemoryBankDefWithProvider,
MemoryBank,
DatasetDefWithProvider,
ScoringFnDefWithProvider,
],

View file

@ -32,8 +32,11 @@ class MemoryRouter(Memory):
async def shutdown(self) -> None:
pass
async def register_memory_bank(self, memory_bank: MemoryBankDef) -> None:
await self.routing_table.register_memory_bank(memory_bank)
async def register_memory_bank(
self,
request: RegistrationRequest,
) -> None:
await self.routing_table.register_memory_bank(request)
async def insert_documents(
self,

View file

@ -188,12 +188,6 @@ class CommonRoutingTableImpl(RoutingTable):
objs = await self.dist_registry.get_all()
return [obj for obj in objs if obj.type == type]
async def get_all_with_types(
self, types: List[str]
) -> List[RoutableObjectWithProvider]:
objs = await self.dist_registry.get_all()
return [obj for obj in objs if obj.type in types]
class ModelsRoutingTable(CommonRoutingTableImpl, Models):
async def list_models(self) -> List[Model]:
@ -233,7 +227,7 @@ class ModelsRoutingTable(CommonRoutingTableImpl, Models):
class ShieldsRoutingTable(CommonRoutingTableImpl, Shields):
async def list_shields(self) -> List[Shield]:
return await self.get_all_with_type("shield")
return await self.get_all_with_type(ResourceType.shield.value)
async def get_shield(self, identifier: str) -> Optional[Shield]:
return await self.get_object_by_identifier(identifier)
@ -270,25 +264,29 @@ class ShieldsRoutingTable(CommonRoutingTableImpl, Shields):
class MemoryBanksRoutingTable(CommonRoutingTableImpl, MemoryBanks):
async def list_memory_banks(self) -> List[MemoryBankDefWithProvider]:
return await self.get_all_with_types(
[
MemoryBankType.vector.value,
MemoryBankType.keyvalue.value,
MemoryBankType.keyword.value,
MemoryBankType.graph.value,
]
)
async def list_memory_banks(self) -> List[MemoryBank]:
return await self.get_all_with_type(ResourceType.memory_bank.value)
async def get_memory_bank(
self, identifier: str
) -> Optional[MemoryBankDefWithProvider]:
return await self.get_object_by_identifier(identifier)
async def get_memory_bank(self, memory_bank_id: str) -> Optional[MemoryBank]:
return await self.get_object_by_identifier(memory_bank_id)
async def register_memory_bank(
self, memory_bank: MemoryBankDefWithProvider
) -> None:
self,
request: RegistrationRequest,
) -> MemoryBank:
if request.provider_resource_id is None:
request.provider_resource_id = request.memory_bank_id
if request.provider_id is None:
# If provider_id not specified, use the only provider if it supports this shield type
if len(self.impls_by_provider_id) == 1:
request.provider_id = list(self.impls_by_provider_id.keys())[0]
else:
raise ValueError(
"No provider specified and multiple providers available. Please specify a provider_id."
)
memory_bank = registration_request_to_memory_bank(request)
await self.register_object(memory_bank)
return memory_bank
class DatasetsRoutingTable(CommonRoutingTableImpl, Datasets):

View file

@ -10,7 +10,7 @@ import pytest
import pytest_asyncio
from llama_stack.distribution.store import * # noqa F403
from llama_stack.apis.inference import Model
from llama_stack.apis.memory_banks import VectorMemoryBankDef
from llama_stack.apis.memory_banks import VectorMemoryBank
from llama_stack.providers.utils.kvstore import kvstore_impl, SqliteKVStoreConfig
from llama_stack.distribution.datatypes import * # noqa F403
@ -39,7 +39,7 @@ async def cached_registry(config):
@pytest.fixture
def sample_bank():
return VectorMemoryBankDef(
return VectorMemoryBank(
identifier="test_bank",
embedding_model="all-MiniLM-L6-v2",
chunk_size_in_tokens=512,
@ -113,7 +113,7 @@ async def test_cached_registry_updates(config):
cached_registry = CachedDiskDistributionRegistry(await kvstore_impl(config))
await cached_registry.initialize()
new_bank = VectorMemoryBankDef(
new_bank = VectorMemoryBank(
identifier="test_bank_2",
embedding_model="all-MiniLM-L6-v2",
chunk_size_in_tokens=256,
@ -144,7 +144,7 @@ async def test_duplicate_provider_registration(config):
cached_registry = CachedDiskDistributionRegistry(await kvstore_impl(config))
await cached_registry.initialize()
original_bank = VectorMemoryBankDef(
original_bank = VectorMemoryBank(
identifier="test_bank_2",
embedding_model="all-MiniLM-L6-v2",
chunk_size_in_tokens=256,
@ -153,7 +153,7 @@ async def test_duplicate_provider_registration(config):
)
await cached_registry.register(original_bank)
duplicate_bank = VectorMemoryBankDef(
duplicate_bank = VectorMemoryBank(
identifier="test_bank_2",
embedding_model="different-model",
chunk_size_in_tokens=128,

View file

@ -13,7 +13,6 @@ from pydantic import BaseModel, Field
from llama_stack.apis.datasets import DatasetDef
from llama_stack.apis.eval_tasks import EvalTaskDef
from llama_stack.apis.memory_banks import MemoryBankDef
from llama_stack.apis.models import Model
from llama_stack.apis.scoring_functions import ScoringFnDef
from llama_stack.apis.shields import Shield
@ -51,9 +50,9 @@ class ShieldsProtocolPrivate(Protocol):
class MemoryBanksProtocolPrivate(Protocol):
async def list_memory_banks(self) -> List[MemoryBankDef]: ...
async def list_memory_banks(self) -> List[MemoryBank]: ...
async def register_memory_bank(self, memory_bank: MemoryBankDef) -> None: ...
async def register_memory_bank(self, memory_bank: MemoryBank) -> None: ...
class DatasetsProtocolPrivate(Protocol):

View file

@ -641,7 +641,7 @@ class ChatAgent(ShieldRunnerMixin):
if session_info.memory_bank_id is None:
bank_id = f"memory_bank_{session_id}"
memory_bank = VectorMemoryBankDef(
memory_bank = VectorMemoryBank(
identifier=bank_id,
embedding_model="all-MiniLM-L6-v2",
chunk_size_in_tokens=512,

View file

@ -83,7 +83,7 @@ class FaissMemoryImpl(Memory, MemoryBanksProtocolPrivate):
stored_banks = await self.kvstore.range(start_key, end_key)
for bank_data in stored_banks:
bank = VectorMemoryBankDef.model_validate_json(bank_data)
bank = VectorMemoryBank.model_validate_json(bank_data)
index = BankWithIndex(
bank=bank, index=FaissIndex(ALL_MINILM_L6_V2_DIMENSION)
)
@ -95,10 +95,10 @@ class FaissMemoryImpl(Memory, MemoryBanksProtocolPrivate):
async def register_memory_bank(
self,
memory_bank: MemoryBankDef,
memory_bank: MemoryBank,
) -> None:
assert (
memory_bank.type == MemoryBankType.vector.value
memory_bank.memory_bank_type == MemoryBankType.vector.value
), f"Only vector banks are supported {memory_bank.type}"
# Store in kvstore
@ -114,7 +114,7 @@ class FaissMemoryImpl(Memory, MemoryBanksProtocolPrivate):
)
self.cache[memory_bank.identifier] = index
async def list_memory_banks(self) -> List[MemoryBankDef]:
async def list_memory_banks(self) -> List[MemoryBank]:
return [i.bank for i in self.cache.values()]
async def insert_documents(

View file

@ -12,6 +12,7 @@ from numpy.typing import NDArray
from qdrant_client import AsyncQdrantClient, models
from qdrant_client.models import PointStruct
from llama_stack.apis.memory_banks import * # noqa: F403
from llama_stack.providers.datatypes import MemoryBanksProtocolPrivate
from llama_stack.apis.memory import * # noqa: F403
@ -112,11 +113,11 @@ class QdrantVectorMemoryAdapter(Memory, MemoryBanksProtocolPrivate):
async def register_memory_bank(
self,
memory_bank: MemoryBankDef,
memory_bank: MemoryBank,
) -> None:
assert (
memory_bank.type == MemoryBankType.vector.value
), f"Only vector banks are supported {memory_bank.type}"
memory_bank.memory_bank_type == MemoryBankType.vector
), f"Only vector banks are supported {memory_bank.memory_bank_type}"
index = BankWithIndex(
bank=memory_bank,
@ -125,7 +126,7 @@ class QdrantVectorMemoryAdapter(Memory, MemoryBanksProtocolPrivate):
self.cache[memory_bank.identifier] = index
async def list_memory_banks(self) -> List[MemoryBankDef]:
async def list_memory_banks(self) -> List[MemoryBank]:
# Qdrant doesn't have collection level metadata to store the bank properties
# So we only return from the cache value
return [i.bank for i in self.cache.values()]

View file

@ -114,11 +114,11 @@ class WeaviateMemoryAdapter(
async def register_memory_bank(
self,
memory_bank: MemoryBankDef,
memory_bank: MemoryBank,
) -> None:
assert (
memory_bank.type == MemoryBankType.vector.value
), f"Only vector banks are supported {memory_bank.type}"
memory_bank.memory_bank_type == MemoryBankType.vector
), f"Only vector banks are supported {memory_bank.memory_bank_type}"
client = self._get_client()
@ -141,7 +141,7 @@ class WeaviateMemoryAdapter(
)
self.cache[memory_bank.identifier] = index
async def list_memory_banks(self) -> List[MemoryBankDef]:
async def list_memory_banks(self) -> List[MemoryBank]:
# TODO: right now the Llama Stack is the source of truth for these banks. That is
# not ideal. It should be Weaviate which is the source of truth. Unfortunately,
# list() happens at Stack startup when the Weaviate client (credentials) is not
@ -157,8 +157,8 @@ class WeaviateMemoryAdapter(
raise ValueError(f"Bank {bank_id} not found")
client = self._get_client()
if not client.collections.exists(bank_id):
raise ValueError(f"Collection with name `{bank_id}` not found")
if not client.collections.exists(bank.identifier):
raise ValueError(f"Collection with name `{bank.identifier}` not found")
index = BankWithIndex(
bank=bank,

View file

@ -43,14 +43,15 @@ def sample_documents():
async def register_memory_bank(banks_impl: MemoryBanks):
bank = VectorMemoryBankDef(
identifier="test_bank",
embedding_model="all-MiniLM-L6-v2",
chunk_size_in_tokens=512,
overlap_size_in_tokens=64,
)
await banks_impl.register_memory_bank(bank)
await banks_impl.register_memory_bank(
VectorRegistration(
memory_bank_id="test_bank",
embedding_model="all-MiniLM-L6-v2",
chunk_size_in_tokens=512,
overlap_size_in_tokens=64,
)
)
class TestMemory:
@ -68,8 +69,8 @@ class TestMemory:
# NOTE: this needs you to ensure that you are starting from a clean state
# but so far we don't have an unregister API unfortunately, so be careful
_, banks_impl = memory_stack
bank = VectorMemoryBankDef(
identifier="test_bank_no_provider",
bank = VectorRegistration(
memory_bank_id="test_bank_no_provider",
embedding_model="all-MiniLM-L6-v2",
chunk_size_in_tokens=512,
overlap_size_in_tokens=64,

View file

@ -148,7 +148,7 @@ class EmbeddingIndex(ABC):
@dataclass
class BankWithIndex:
bank: MemoryBankDef
bank: VectorMemoryBank
index: EmbeddingIndex
async def insert_documents(