mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-22 22:42:25 +00:00
Merge branch 'main' into pgvector-openai-compat
This commit is contained in:
commit
c0022be33d
11 changed files with 95 additions and 103 deletions
69
.github/workflows/tests.yml
vendored
69
.github/workflows/tests.yml
vendored
|
|
@ -1,69 +0,0 @@
|
|||
name: auto-tests
|
||||
|
||||
on:
|
||||
# pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
commit_sha:
|
||||
description: 'Specific Commit SHA to trigger on'
|
||||
required: false
|
||||
default: $GITHUB_SHA # default to the last commit of $GITHUB_REF branch
|
||||
|
||||
jobs:
|
||||
test-llama-stack-as-library:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
TAVILY_SEARCH_API_KEY: ${{ secrets.TAVILY_SEARCH_API_KEY }}
|
||||
strategy:
|
||||
matrix:
|
||||
provider: [fireworks, together]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ github.event.inputs.commit_sha }}
|
||||
|
||||
- name: Echo commit SHA
|
||||
run: |
|
||||
echo "Triggered on commit SHA: ${{ github.event.inputs.commit_sha }}"
|
||||
git rev-parse HEAD
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt pytest
|
||||
pip install -e .
|
||||
|
||||
- name: Build providers
|
||||
run: |
|
||||
llama stack build --template ${{ matrix.provider }} --image-type venv
|
||||
|
||||
- name: Install the latest llama-stack-client & llama-models packages
|
||||
run: |
|
||||
pip install -e git+https://github.com/meta-llama/llama-stack-client-python.git#egg=llama-stack-client
|
||||
pip install -e git+https://github.com/meta-llama/llama-models.git#egg=llama-models
|
||||
|
||||
- name: Run client-sdk test
|
||||
working-directory: "${{ github.workspace }}"
|
||||
env:
|
||||
REPORT_OUTPUT: md_report.md
|
||||
shell: bash
|
||||
run: |
|
||||
pip install --upgrade pytest-md-report
|
||||
echo "REPORT_FILE=${REPORT_OUTPUT}" >> "$GITHUB_ENV"
|
||||
|
||||
export INFERENCE_MODEL=meta-llama/Llama-3.1-8B-Instruct
|
||||
LLAMA_STACK_CONFIG=./llama_stack/templates/${{ matrix.provider }}/run.yaml pytest --md-report --md-report-verbose=1 ./tests/client-sdk/inference/ --md-report-output "$REPORT_OUTPUT"
|
||||
|
||||
- name: Output reports to the job summary
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
if [ -f "$REPORT_FILE" ]; then
|
||||
echo "<details><summary> Test Report for ${{ matrix.provider }} </summary>" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
cat "$REPORT_FILE" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "</details>" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
11
docs/_static/llama-stack-spec.html
vendored
11
docs/_static/llama-stack-spec.html
vendored
|
|
@ -11340,6 +11340,9 @@
|
|||
},
|
||||
"embedding_dimension": {
|
||||
"type": "integer"
|
||||
},
|
||||
"vector_db_name": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
|
@ -13590,10 +13593,6 @@
|
|||
"provider_id": {
|
||||
"type": "string",
|
||||
"description": "The ID of the provider to use for this vector store."
|
||||
},
|
||||
"provider_vector_db_id": {
|
||||
"type": "string",
|
||||
"description": "The provider-specific vector database ID."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
|
@ -15634,6 +15633,10 @@
|
|||
"type": "string",
|
||||
"description": "The identifier of the provider."
|
||||
},
|
||||
"vector_db_name": {
|
||||
"type": "string",
|
||||
"description": "The name of the vector database."
|
||||
},
|
||||
"provider_vector_db_id": {
|
||||
"type": "string",
|
||||
"description": "The identifier of the vector database in the provider."
|
||||
|
|
|
|||
9
docs/_static/llama-stack-spec.yaml
vendored
9
docs/_static/llama-stack-spec.yaml
vendored
|
|
@ -7984,6 +7984,8 @@ components:
|
|||
type: string
|
||||
embedding_dimension:
|
||||
type: integer
|
||||
vector_db_name:
|
||||
type: string
|
||||
additionalProperties: false
|
||||
required:
|
||||
- identifier
|
||||
|
|
@ -9494,10 +9496,6 @@ components:
|
|||
type: string
|
||||
description: >-
|
||||
The ID of the provider to use for this vector store.
|
||||
provider_vector_db_id:
|
||||
type: string
|
||||
description: >-
|
||||
The provider-specific vector database ID.
|
||||
additionalProperties: false
|
||||
required:
|
||||
- name
|
||||
|
|
@ -10945,6 +10943,9 @@ components:
|
|||
provider_id:
|
||||
type: string
|
||||
description: The identifier of the provider.
|
||||
vector_db_name:
|
||||
type: string
|
||||
description: The name of the vector database.
|
||||
provider_vector_db_id:
|
||||
type: string
|
||||
description: >-
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ class VectorDB(Resource):
|
|||
|
||||
embedding_model: str
|
||||
embedding_dimension: int
|
||||
vector_db_name: str | None = None
|
||||
|
||||
@property
|
||||
def vector_db_id(self) -> str:
|
||||
|
|
@ -70,6 +71,7 @@ class VectorDBs(Protocol):
|
|||
embedding_model: str,
|
||||
embedding_dimension: int | None = 384,
|
||||
provider_id: str | None = None,
|
||||
vector_db_name: str | None = None,
|
||||
provider_vector_db_id: str | None = None,
|
||||
) -> VectorDB:
|
||||
"""Register a vector database.
|
||||
|
|
@ -78,6 +80,7 @@ class VectorDBs(Protocol):
|
|||
:param embedding_model: The embedding model to use.
|
||||
:param embedding_dimension: The dimension of the embedding model.
|
||||
:param provider_id: The identifier of the provider.
|
||||
:param vector_db_name: The name of the vector database.
|
||||
:param provider_vector_db_id: The identifier of the vector database in the provider.
|
||||
:returns: A VectorDB.
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -346,7 +346,6 @@ class VectorIO(Protocol):
|
|||
embedding_model: str | None = None,
|
||||
embedding_dimension: int | None = 384,
|
||||
provider_id: str | None = None,
|
||||
provider_vector_db_id: str | None = None,
|
||||
) -> VectorStoreObject:
|
||||
"""Creates a vector store.
|
||||
|
||||
|
|
@ -358,7 +357,6 @@ class VectorIO(Protocol):
|
|||
:param embedding_model: The embedding model to use for this vector store.
|
||||
:param embedding_dimension: The dimension of the embedding vectors (default: 384).
|
||||
:param provider_id: The ID of the provider to use for this vector store.
|
||||
:param provider_vector_db_id: The provider-specific vector database ID.
|
||||
:returns: A VectorStoreObject representing the created vector store.
|
||||
"""
|
||||
...
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
import asyncio
|
||||
import uuid
|
||||
from typing import Any
|
||||
|
||||
from llama_stack.apis.common.content_types import (
|
||||
|
|
@ -81,6 +82,7 @@ class VectorIORouter(VectorIO):
|
|||
embedding_model: str,
|
||||
embedding_dimension: int | None = 384,
|
||||
provider_id: str | None = None,
|
||||
vector_db_name: str | None = None,
|
||||
provider_vector_db_id: str | None = None,
|
||||
) -> None:
|
||||
logger.debug(f"VectorIORouter.register_vector_db: {vector_db_id}, {embedding_model}")
|
||||
|
|
@ -89,6 +91,7 @@ class VectorIORouter(VectorIO):
|
|||
embedding_model,
|
||||
embedding_dimension,
|
||||
provider_id,
|
||||
vector_db_name,
|
||||
provider_vector_db_id,
|
||||
)
|
||||
|
||||
|
|
@ -123,7 +126,6 @@ class VectorIORouter(VectorIO):
|
|||
embedding_model: str | None = None,
|
||||
embedding_dimension: int | None = None,
|
||||
provider_id: str | None = None,
|
||||
provider_vector_db_id: str | None = None,
|
||||
) -> VectorStoreObject:
|
||||
logger.debug(f"VectorIORouter.openai_create_vector_store: name={name}, provider_id={provider_id}")
|
||||
|
||||
|
|
@ -135,17 +137,17 @@ class VectorIORouter(VectorIO):
|
|||
embedding_model, embedding_dimension = embedding_model_info
|
||||
logger.info(f"No embedding model specified, using first available: {embedding_model}")
|
||||
|
||||
vector_db_id = name
|
||||
vector_db_id = f"vs_{uuid.uuid4()}"
|
||||
registered_vector_db = await self.routing_table.register_vector_db(
|
||||
vector_db_id,
|
||||
embedding_model,
|
||||
embedding_dimension,
|
||||
provider_id,
|
||||
provider_vector_db_id,
|
||||
vector_db_id=vector_db_id,
|
||||
embedding_model=embedding_model,
|
||||
embedding_dimension=embedding_dimension,
|
||||
provider_id=provider_id,
|
||||
provider_vector_db_id=vector_db_id,
|
||||
vector_db_name=name,
|
||||
)
|
||||
|
||||
return await self.routing_table.get_provider_impl(registered_vector_db.identifier).openai_create_vector_store(
|
||||
vector_db_id,
|
||||
name=name,
|
||||
file_ids=file_ids,
|
||||
expires_after=expires_after,
|
||||
chunking_strategy=chunking_strategy,
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ class VectorDBsRoutingTable(CommonRoutingTableImpl, VectorDBs):
|
|||
embedding_dimension: int | None = 384,
|
||||
provider_id: str | None = None,
|
||||
provider_vector_db_id: str | None = None,
|
||||
vector_db_name: str | None = None,
|
||||
) -> VectorDB:
|
||||
if provider_vector_db_id is None:
|
||||
provider_vector_db_id = vector_db_id
|
||||
|
|
@ -62,6 +63,7 @@ class VectorDBsRoutingTable(CommonRoutingTableImpl, VectorDBs):
|
|||
"provider_resource_id": provider_vector_db_id,
|
||||
"embedding_model": embedding_model,
|
||||
"embedding_dimension": model.metadata["embedding_dimension"],
|
||||
"vector_db_name": vector_db_name,
|
||||
}
|
||||
vector_db = TypeAdapter(VectorDBWithOwner).validate_python(vector_db_data)
|
||||
await self.register_object(vector_db)
|
||||
|
|
|
|||
|
|
@ -217,7 +217,6 @@ class ChromaVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
|
|||
embedding_model: str | None = None,
|
||||
embedding_dimension: int | None = 384,
|
||||
provider_id: str | None = None,
|
||||
provider_vector_db_id: str | None = None,
|
||||
) -> VectorStoreObject:
|
||||
raise NotImplementedError("OpenAI Vector Stores API is not supported in Chroma")
|
||||
|
||||
|
|
|
|||
|
|
@ -214,7 +214,6 @@ class QdrantVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
|
|||
embedding_model: str | None = None,
|
||||
embedding_dimension: int | None = 384,
|
||||
provider_id: str | None = None,
|
||||
provider_vector_db_id: str | None = None,
|
||||
) -> VectorStoreObject:
|
||||
raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant")
|
||||
|
||||
|
|
|
|||
|
|
@ -172,8 +172,9 @@ class OpenAIVectorStoreMixin(ABC):
|
|||
provider_vector_db_id: str | None = None,
|
||||
) -> VectorStoreObject:
|
||||
"""Creates a vector store."""
|
||||
store_id = name or str(uuid.uuid4())
|
||||
created_at = int(time.time())
|
||||
# Derive the canonical vector_db_id (allow override, else generate)
|
||||
vector_db_id = provider_vector_db_id or f"vs_{uuid.uuid4()}"
|
||||
|
||||
if provider_id is None:
|
||||
raise ValueError("Provider ID is required")
|
||||
|
|
@ -181,19 +182,19 @@ class OpenAIVectorStoreMixin(ABC):
|
|||
if embedding_model is None:
|
||||
raise ValueError("Embedding model is required")
|
||||
|
||||
# Use provided embedding dimension or default to 384
|
||||
# Embedding dimension is required (defaulted to 384 if not provided)
|
||||
if embedding_dimension is None:
|
||||
raise ValueError("Embedding dimension is required")
|
||||
|
||||
provider_vector_db_id = provider_vector_db_id or store_id
|
||||
# Register the VectorDB backing this vector store
|
||||
vector_db = VectorDB(
|
||||
identifier=store_id,
|
||||
identifier=vector_db_id,
|
||||
embedding_dimension=embedding_dimension,
|
||||
embedding_model=embedding_model,
|
||||
provider_id=provider_id,
|
||||
provider_resource_id=provider_vector_db_id,
|
||||
provider_resource_id=vector_db_id,
|
||||
vector_db_name=name,
|
||||
)
|
||||
# Register the vector DB
|
||||
await self.register_vector_db(vector_db)
|
||||
|
||||
# Create OpenAI vector store metadata
|
||||
|
|
@ -207,11 +208,11 @@ class OpenAIVectorStoreMixin(ABC):
|
|||
in_progress=0,
|
||||
total=0,
|
||||
)
|
||||
store_info = {
|
||||
"id": store_id,
|
||||
store_info: dict[str, Any] = {
|
||||
"id": vector_db_id,
|
||||
"object": "vector_store",
|
||||
"created_at": created_at,
|
||||
"name": store_id,
|
||||
"name": name,
|
||||
"usage_bytes": 0,
|
||||
"file_counts": file_counts.model_dump(),
|
||||
"status": status,
|
||||
|
|
@ -231,18 +232,18 @@ class OpenAIVectorStoreMixin(ABC):
|
|||
store_info["metadata"] = metadata
|
||||
|
||||
# Save to persistent storage (provider-specific)
|
||||
await self._save_openai_vector_store(store_id, store_info)
|
||||
await self._save_openai_vector_store(vector_db_id, store_info)
|
||||
|
||||
# Store in memory cache
|
||||
self.openai_vector_stores[store_id] = store_info
|
||||
self.openai_vector_stores[vector_db_id] = store_info
|
||||
|
||||
# Now that our vector store is created, attach any files that were provided
|
||||
file_ids = file_ids or []
|
||||
tasks = [self.openai_attach_file_to_vector_store(store_id, file_id) for file_id in file_ids]
|
||||
tasks = [self.openai_attach_file_to_vector_store(vector_db_id, file_id) for file_id in file_ids]
|
||||
await asyncio.gather(*tasks)
|
||||
|
||||
# Get the updated store info and return it
|
||||
store_info = self.openai_vector_stores[store_id]
|
||||
store_info = self.openai_vector_stores[vector_db_id]
|
||||
return VectorStoreObject.model_validate(store_info)
|
||||
|
||||
async def openai_list_vector_stores(
|
||||
|
|
|
|||
|
|
@ -821,6 +821,59 @@ def test_openai_vector_store_update_file(compat_client_with_empty_stores, client
|
|||
assert retrieved_file.attributes["foo"] == "baz"
|
||||
|
||||
|
||||
def test_create_vector_store_files_duplicate_vector_store_name(compat_client_with_empty_stores, client_with_models):
|
||||
"""
|
||||
This test confirms that client.vector_stores.create() creates a unique ID
|
||||
"""
|
||||
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
|
||||
skip_if_provider_doesnt_support_openai_vector_store_files_api(client_with_models)
|
||||
|
||||
if isinstance(compat_client_with_empty_stores, LlamaStackClient):
|
||||
pytest.skip("Vector Store Files create is not yet supported with LlamaStackClient")
|
||||
|
||||
compat_client = compat_client_with_empty_stores
|
||||
|
||||
# Create a vector store with files
|
||||
file_ids = []
|
||||
for i in range(3):
|
||||
with BytesIO(f"This is a test file {i}".encode()) as file_buffer:
|
||||
file_buffer.name = f"openai_test_{i}.txt"
|
||||
file = compat_client.files.create(file=file_buffer, purpose="assistants")
|
||||
file_ids.append(file.id)
|
||||
|
||||
vector_store = compat_client.vector_stores.create(
|
||||
name="test_store_with_files",
|
||||
)
|
||||
assert vector_store.file_counts.completed == 0
|
||||
assert vector_store.file_counts.total == 0
|
||||
assert vector_store.file_counts.cancelled == 0
|
||||
assert vector_store.file_counts.failed == 0
|
||||
assert vector_store.file_counts.in_progress == 0
|
||||
|
||||
vector_store2 = compat_client.vector_stores.create(
|
||||
name="test_store_with_files",
|
||||
)
|
||||
|
||||
vector_stores_list = compat_client.vector_stores.list()
|
||||
assert len(vector_stores_list.data) == 2
|
||||
|
||||
created_file = compat_client.vector_stores.files.create(
|
||||
vector_store_id=vector_store.id,
|
||||
file_id=file_ids[0],
|
||||
)
|
||||
assert created_file.status == "completed"
|
||||
|
||||
_ = compat_client.vector_stores.delete(vector_store2.id)
|
||||
created_file_from_non_deleted_vector_store = compat_client.vector_stores.files.create(
|
||||
vector_store_id=vector_store.id,
|
||||
file_id=file_ids[1],
|
||||
)
|
||||
assert created_file_from_non_deleted_vector_store.status == "completed"
|
||||
|
||||
vector_stores_list_post_delete = compat_client.vector_stores.list()
|
||||
assert len(vector_stores_list_post_delete.data) == 1
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Client library needs to be scaffolded to support search_mode parameter")
|
||||
def test_openai_vector_store_search_modes():
|
||||
"""Test OpenAI vector store search with different search modes.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue