forked from phoenix-oss/llama-stack-mirror
## What does this PR do?
This is a long-pending change and particularly important to get done
now.
Specifically:
- we cannot "localize" (aka download) any URLs from media attachments
anywhere near our modeling code. it must be done within llama-stack.
- `PIL.Image` is infesting all our APIs via `ImageMedia ->
InterleavedTextMedia` and that cannot be right at all. Anything in the
API surface must be "naturally serializable". We need a standard `{
type: "image", image_url: "<...>" }` which is more extensible
- `UserMessage`, `SystemMessage`, etc. are moved completely to
llama-stack from the llama-models repository.
See https://github.com/meta-llama/llama-models/pull/244 for the
corresponding PR in llama-models.
## Test Plan
```bash
cd llama_stack/providers/tests
pytest -s -v -k "fireworks or ollama or together" inference/test_vision_inference.py
pytest -s -v -k "(fireworks or ollama or together) and llama_3b" inference/test_text_inference.py
pytest -s -v -k chroma memory/test_memory.py \
--env EMBEDDING_DIMENSION=384 --env CHROMA_DB_PATH=/tmp/foobar
pytest -s -v -k fireworks agents/test_agents.py \
--safety-shield=meta-llama/Llama-Guard-3-8B \
--inference-model=meta-llama/Llama-3.1-8B-Instruct
```
Updated the client sdk (see PR ...), installed the SDK in the same
environment and then ran the SDK tests:
```bash
cd tests/client-sdk
LLAMA_STACK_CONFIG=together pytest -s -v agents/test_agents.py
LLAMA_STACK_CONFIG=ollama pytest -s -v memory/test_memory.py
# this one needed a bit of hacking in the run.yaml to ensure I could register the vision model correctly
INFERENCE_MODEL=llama3.2-vision:latest LLAMA_STACK_CONFIG=ollama pytest -s -v inference/test_inference.py
```
67 lines
2 KiB
Python
67 lines
2 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
from typing import Any, Dict, List, Optional, Protocol, runtime_checkable
|
|
|
|
from llama_models.schema_utils import json_schema_type, webmethod
|
|
from pydantic import BaseModel, Field
|
|
|
|
from llama_stack.apis.common.content_types import URL
|
|
from llama_stack.apis.inference import InterleavedContent
|
|
from llama_stack.apis.memory_banks import MemoryBank
|
|
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
|
|
|
|
|
|
@json_schema_type
|
|
class MemoryBankDocument(BaseModel):
|
|
document_id: str
|
|
content: InterleavedContent | URL
|
|
mime_type: str | None = None
|
|
metadata: Dict[str, Any] = Field(default_factory=dict)
|
|
|
|
|
|
class Chunk(BaseModel):
|
|
content: InterleavedContent
|
|
token_count: int
|
|
document_id: str
|
|
|
|
|
|
@json_schema_type
|
|
class QueryDocumentsResponse(BaseModel):
|
|
chunks: List[Chunk]
|
|
scores: List[float]
|
|
|
|
|
|
class MemoryBankStore(Protocol):
|
|
def get_memory_bank(self, bank_id: str) -> Optional[MemoryBank]: ...
|
|
|
|
|
|
@runtime_checkable
|
|
@trace_protocol
|
|
class Memory(Protocol):
|
|
memory_bank_store: MemoryBankStore
|
|
|
|
# this will just block now until documents are inserted, but it should
|
|
# probably return a Job instance which can be polled for completion
|
|
@webmethod(route="/memory/insert")
|
|
async def insert_documents(
|
|
self,
|
|
bank_id: str,
|
|
documents: List[MemoryBankDocument],
|
|
ttl_seconds: Optional[int] = None,
|
|
) -> None: ...
|
|
|
|
@webmethod(route="/memory/query")
|
|
async def query_documents(
|
|
self,
|
|
bank_id: str,
|
|
query: InterleavedContent,
|
|
params: Optional[Dict[str, Any]] = None,
|
|
) -> QueryDocumentsResponse: ...
|