Merge upstream/main and resolve conflicts

Resolved merge conflicts in:
- Documentation files: updated vector IO provider docs to include both kvstore fields and embedding model configuration
- Config files: merged kvstore requirements from upstream with embedding model fields
- Dependencies: updated to latest client versions while preserving llama-models dependency
- Regenerated lockfiles to ensure consistency

All embedding model configuration features preserved while incorporating upstream changes.
This commit is contained in:
skamenan7 2025-07-16 19:57:02 -04:00
commit 6634b21a76
92 changed files with 3069 additions and 2481 deletions

View file

@ -5,6 +5,7 @@
# the root directory of this source tree.
import asyncio
import uuid
from typing import Any
from llama_stack.apis.common.content_types import InterleavedContent
@ -105,6 +106,7 @@ class VectorIORouter(VectorIO):
embedding_model: str,
embedding_dimension: int | None = 384,
provider_id: str | None = None,
vector_db_name: str | None = None,
provider_vector_db_id: str | None = None,
) -> None:
logger.debug(f"VectorIORouter.register_vector_db: {vector_db_id}, {embedding_model}")
@ -113,6 +115,7 @@ class VectorIORouter(VectorIO):
embedding_model,
embedding_dimension,
provider_id,
vector_db_name,
provider_vector_db_id,
)
@ -147,7 +150,6 @@ class VectorIORouter(VectorIO):
embedding_model: str | None = None,
embedding_dimension: int | None = None,
provider_id: str | None = None,
provider_vector_db_id: str | None = None,
) -> VectorStoreObject:
logger.debug(f"VectorIORouter.openai_create_vector_store: name={name}, provider_id={provider_id}")
@ -210,17 +212,17 @@ class VectorIORouter(VectorIO):
)
raise ValueError(f"Unable to determine embedding model for vector store '{name}': {e}") from e
vector_db_id = name
vector_db_id = f"vs_{uuid.uuid4()}"
registered_vector_db = await self.routing_table.register_vector_db(
vector_db_id,
embedding_model,
embedding_dimension,
provider_id,
provider_vector_db_id,
vector_db_id=vector_db_id,
embedding_model=embedding_model,
embedding_dimension=embedding_dimension,
provider_id=provider_id,
provider_vector_db_id=vector_db_id,
vector_db_name=name,
)
return await self.routing_table.get_provider_impl(registered_vector_db.identifier).openai_create_vector_store(
vector_db_id,
name=name,
file_ids=file_ids,
expires_after=expires_after,
chunking_strategy=chunking_strategy,