diff --git a/docs/_static/js/keyboard_shortcuts.js b/docs/_static/js/keyboard_shortcuts.js new file mode 100644 index 000000000..81d0b7c65 --- /dev/null +++ b/docs/_static/js/keyboard_shortcuts.js @@ -0,0 +1,14 @@ +document.addEventListener('keydown', function(event) { + // command+K or ctrl+K + if ((event.metaKey || event.ctrlKey) && event.key === 'k') { + event.preventDefault(); + document.querySelector('.search-input, .search-field, input[name="q"]').focus(); + } + + // forward slash + if (event.key === '/' && + !event.target.matches('input, textarea, select')) { + event.preventDefault(); + document.querySelector('.search-input, .search-field, input[name="q"]').focus(); + } +}); diff --git a/docs/source/apis/external.md b/docs/source/apis/external.md index cc13deb9b..5831990b0 100644 --- a/docs/source/apis/external.md +++ b/docs/source/apis/external.md @@ -111,7 +111,7 @@ name = "llama-stack-api-weather" version = "0.1.0" description = "Weather API for Llama Stack" readme = "README.md" -requires-python = ">=3.10" +requires-python = ">=3.12" dependencies = ["llama-stack", "pydantic"] [build-system] @@ -231,7 +231,7 @@ name = "llama-stack-provider-kaze" version = "0.1.0" description = "Kaze weather provider for Llama Stack" readme = "README.md" -requires-python = ">=3.10" +requires-python = ">=3.12" dependencies = ["llama-stack", "pydantic", "aiohttp"] [build-system] diff --git a/docs/source/conf.py b/docs/source/conf.py index 20f1abf00..3f84d1310 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -131,6 +131,7 @@ html_static_path = ["../_static"] def setup(app): app.add_css_file("css/my_theme.css") app.add_js_file("js/detect_theme.js") + app.add_js_file("js/keyboard_shortcuts.js") def dockerhub_role(name, rawtext, text, lineno, inliner, options={}, content=[]): url = f"https://hub.docker.com/r/llamastack/{text}" diff --git a/docs/source/contributing/index.md b/docs/source/contributing/index.md index 1e067ea6c..79c3861ea 100644 --- a/docs/source/contributing/index.md +++ b/docs/source/contributing/index.md @@ -2,14 +2,28 @@ ```{include} ../../../CONTRIBUTING.md ``` -See the [Adding a New API Provider](new_api_provider.md) which describes how to add new API providers to the Stack. +## Testing +See the [Test Page](testing.md) which describes how to test your changes. +```{toctree} +:maxdepth: 1 +:hidden: +:caption: Testing +testing +``` +## Adding a New Provider + +See the [Adding a New API Provider Page](new_api_provider.md) which describes how to add new API providers to the Stack. + +See the [Vector Database Page](new_vector_database.md) which describes how to add a new vector databases with Llama Stack. + +See the [External Provider Page](../providers/external/index.md) which describes how to add external providers to the Stack. ```{toctree} :maxdepth: 1 :hidden: new_api_provider -testing +new_vector_database ``` diff --git a/docs/source/contributing/new_vector_database.md b/docs/source/contributing/new_vector_database.md new file mode 100644 index 000000000..83c0f55bc --- /dev/null +++ b/docs/source/contributing/new_vector_database.md @@ -0,0 +1,75 @@ +# Adding a New Vector Database + +This guide will walk you through the process of adding a new vector database to Llama Stack. + +> **_NOTE:_** Here's an example Pull Request of the [Milvus Vector Database Provider](https://github.com/meta-llama/llama-stack/pull/1467). + +Vector Database providers are used to store and retrieve vector embeddings. Vector databases are not limited to vector +search but can support keyword and hybrid search. Additionally, vector database can also support operations like +filtering, sorting, and aggregating vectors. + +## Steps to Add a New Vector Database Provider +1. **Choose the Database Type**: Determine if your vector database is a remote service, inline, or both. + - Remote databases make requests to external services, while inline databases execute locally. Some providers support both. +2. **Implement the Provider**: Create a new provider class that inherits from `VectorDatabaseProvider` and implements the required methods. + - Implement methods for vector storage, retrieval, search, and any additional features your database supports. + - You will need to implement the following methods for `YourVectorIndex`: + - `YourVectorIndex.create()` + - `YourVectorIndex.initialize()` + - `YourVectorIndex.add_chunks()` + - `YourVectorIndex.delete_chunk()` + - `YourVectorIndex.query_vector()` + - `YourVectorIndex.query_keyword()` + - `YourVectorIndex.query_hybrid()` + - You will need to implement the following methods for `YourVectorIOAdapter`: + - `YourVectorIOAdapter.initialize()` + - `YourVectorIOAdapter.shutdown()` + - `YourVectorIOAdapter.list_vector_dbs()` + - `YourVectorIOAdapter.register_vector_db()` + - `YourVectorIOAdapter.unregister_vector_db()` + - `YourVectorIOAdapter.insert_chunks()` + - `YourVectorIOAdapter.query_chunks()` + - `YourVectorIOAdapter.delete_chunks()` +3. **Add to Registry**: Register your provider in the appropriate registry file. + - Update {repopath}`llama_stack/providers/registry/vector_io.py` to include your new provider. +```python +from llama_stack.providers.registry.specs import InlineProviderSpec +from llama_stack.providers.registry.api import Api + +InlineProviderSpec( + api=Api.vector_io, + provider_type="inline::milvus", + pip_packages=["pymilvus>=2.4.10"], + module="llama_stack.providers.inline.vector_io.milvus", + config_class="llama_stack.providers.inline.vector_io.milvus.MilvusVectorIOConfig", + api_dependencies=[Api.inference], + optional_api_dependencies=[Api.files], + description="", +), +``` +4. **Add Tests**: Create unit tests and integration tests for your provider in the `tests/` directory. + - Unit Tests + - By following the structure of the class methods, you will be able to easily run unit and integration tests for your database. + 1. You have to configure the tests for your provide in `/tests/unit/providers/vector_io/conftest.py`. + 2. Update the `vector_provider` fixture to include your provider if they are an inline provider. + 3. Create a `your_vectorprovider_index` fixture that initializes your vector index. + 4. Create a `your_vectorprovider_adapter` fixture that initializes your vector adapter. + 5. Add your provider to the `vector_io_providers` fixture dictionary. + - Please follow the naming convention of `your_vectorprovider_index` and `your_vectorprovider_adapter` as the tests require this to execute properly. + - Integration Tests + - Integration tests are located in {repopath}`tests/integration`. These tests use the python client-SDK APIs (from the `llama_stack_client` package) to test functionality. + - The two set of integration tests are: + - `tests/integration/vector_io/test_vector_io.py`: This file tests registration, insertion, and retrieval. + - `tests/integration/vector_io/test_openai_vector_stores.py`: These tests are for OpenAI-compatible vector stores and test the OpenAI API compatibility. + - You will need to update `skip_if_provider_doesnt_support_openai_vector_stores` to include your provider as well as `skip_if_provider_doesnt_support_openai_vector_stores_search` to test the appropriate search functionality. + - Running the tests in the GitHub CI + - You will need to update the `.github/workflows/integration-vector-io-tests.yml` file to include your provider. + - If your provider is a remote provider, you will also have to add a container to spin up and run it in the action. + - Updating the pyproject.yml + - If you are adding tests for the `inline` provider you will have to update the `unit` group. + - `uv add new_pip_package --group unit` + - If you are adding tests for the `remote` provider you will have to update the `test` group, which is used in the GitHub CI for integration tests. + - `uv add new_pip_package --group test` +5. **Update Documentation**: Please update the documentation for end users + - Generate the provider documentation by running {repopath}`./scripts/provider_codegen.py`. + - Update the autogenerated content in the registry/vector_io.py file with information about your provider. Please see other providers for examples. \ No newline at end of file diff --git a/docs/source/contributing/testing.md b/docs/source/contributing/testing.md index 47bf9dea7..454ded266 100644 --- a/docs/source/contributing/testing.md +++ b/docs/source/contributing/testing.md @@ -1,6 +1,8 @@ -# Testing Llama Stack +```{include} ../../../tests/README.md +``` -Tests are of three different kinds: -- Unit tests -- Provider focused integration tests -- Client SDK tests +```{include} ../../../tests/unit/README.md +``` + +```{include} ../../../tests/integration/README.md +``` diff --git a/docs/source/providers/external/external-providers-guide.md b/docs/source/providers/external/external-providers-guide.md index 2479d406f..e2d4ebea9 100644 --- a/docs/source/providers/external/external-providers-guide.md +++ b/docs/source/providers/external/external-providers-guide.md @@ -226,7 +226,7 @@ uv init name = "llama-stack-provider-ollama" version = "0.1.0" description = "Ollama provider for Llama Stack" -requires-python = ">=3.10" +requires-python = ">=3.12" dependencies = ["llama-stack", "pydantic", "ollama", "aiohttp"] ``` diff --git a/llama_stack/apis/common/errors.py b/llama_stack/apis/common/errors.py index 95d6ac18e..6e0fa0b3c 100644 --- a/llama_stack/apis/common/errors.py +++ b/llama_stack/apis/common/errors.py @@ -62,3 +62,13 @@ class SessionNotFoundError(ValueError): def __init__(self, session_name: str) -> None: message = f"Session '{session_name}' not found or access denied." super().__init__(message) + + +class ModelTypeError(TypeError): + """raised when a model is present but not the correct type""" + + def __init__(self, model_name: str, model_type: str, expected_model_type: str) -> None: + message = ( + f"Model '{model_name}' is of type '{model_type}' rather than the expected type '{expected_model_type}'" + ) + super().__init__(message) diff --git a/llama_stack/core/routers/inference.py b/llama_stack/core/routers/inference.py index 79ab7c34f..52581cc9d 100644 --- a/llama_stack/core/routers/inference.py +++ b/llama_stack/core/routers/inference.py @@ -18,7 +18,7 @@ from llama_stack.apis.common.content_types import ( InterleavedContent, InterleavedContentItem, ) -from llama_stack.apis.common.errors import ModelNotFoundError +from llama_stack.apis.common.errors import ModelNotFoundError, ModelTypeError from llama_stack.apis.inference import ( BatchChatCompletionResponse, BatchCompletionResponse, @@ -177,6 +177,15 @@ class InferenceRouter(Inference): encoded = self.formatter.encode_content(messages) return len(encoded.tokens) if encoded and encoded.tokens else 0 + async def _get_model(self, model_id: str, expected_model_type: str) -> Model: + """takes a model id and gets model after ensuring that it is accessible and of the correct type""" + model = await self.routing_table.get_model(model_id) + if model is None: + raise ModelNotFoundError(model_id) + if model.model_type != expected_model_type: + raise ModelTypeError(model_id, model.model_type, expected_model_type) + return model + async def chat_completion( self, model_id: str, @@ -195,11 +204,7 @@ class InferenceRouter(Inference): ) if sampling_params is None: sampling_params = SamplingParams() - model = await self.routing_table.get_model(model_id) - if model is None: - raise ModelNotFoundError(model_id) - if model.model_type == ModelType.embedding: - raise ValueError(f"Model '{model_id}' is an embedding model and does not support chat completions") + model = await self._get_model(model_id, ModelType.llm) if tool_config: if tool_choice and tool_choice != tool_config.tool_choice: raise ValueError("tool_choice and tool_config.tool_choice must match") @@ -301,11 +306,7 @@ class InferenceRouter(Inference): logger.debug( f"InferenceRouter.completion: {model_id=}, {stream=}, {content=}, {sampling_params=}, {response_format=}", ) - model = await self.routing_table.get_model(model_id) - if model is None: - raise ModelNotFoundError(model_id) - if model.model_type == ModelType.embedding: - raise ValueError(f"Model '{model_id}' is an embedding model and does not support chat completions") + model = await self._get_model(model_id, ModelType.llm) provider = await self.routing_table.get_provider_impl(model_id) params = dict( model_id=model_id, @@ -355,11 +356,7 @@ class InferenceRouter(Inference): task_type: EmbeddingTaskType | None = None, ) -> EmbeddingsResponse: logger.debug(f"InferenceRouter.embeddings: {model_id}") - model = await self.routing_table.get_model(model_id) - if model is None: - raise ModelNotFoundError(model_id) - if model.model_type == ModelType.llm: - raise ValueError(f"Model '{model_id}' is an LLM model and does not support embeddings") + await self._get_model(model_id, ModelType.embedding) provider = await self.routing_table.get_provider_impl(model_id) return await provider.embeddings( model_id=model_id, @@ -395,12 +392,7 @@ class InferenceRouter(Inference): logger.debug( f"InferenceRouter.openai_completion: {model=}, {stream=}, {prompt=}", ) - model_obj = await self.routing_table.get_model(model) - if model_obj is None: - raise ModelNotFoundError(model) - if model_obj.model_type == ModelType.embedding: - raise ValueError(f"Model '{model}' is an embedding model and does not support completions") - + model_obj = await self._get_model(model, ModelType.llm) params = dict( model=model_obj.identifier, prompt=prompt, @@ -476,11 +468,7 @@ class InferenceRouter(Inference): logger.debug( f"InferenceRouter.openai_chat_completion: {model=}, {stream=}, {messages=}", ) - model_obj = await self.routing_table.get_model(model) - if model_obj is None: - raise ModelNotFoundError(model) - if model_obj.model_type == ModelType.embedding: - raise ValueError(f"Model '{model}' is an embedding model and does not support chat completions") + model_obj = await self._get_model(model, ModelType.llm) # Use the OpenAI client for a bit of extra input validation without # exposing the OpenAI client itself as part of our API surface @@ -567,12 +555,7 @@ class InferenceRouter(Inference): logger.debug( f"InferenceRouter.openai_embeddings: {model=}, input_type={type(input)}, {encoding_format=}, {dimensions=}", ) - model_obj = await self.routing_table.get_model(model) - if model_obj is None: - raise ModelNotFoundError(model) - if model_obj.model_type != ModelType.embedding: - raise ValueError(f"Model '{model}' is not an embedding model") - + model_obj = await self._get_model(model, ModelType.embedding) params = dict( model=model_obj.identifier, input=input, diff --git a/llama_stack/core/routing_tables/toolgroups.py b/llama_stack/core/routing_tables/toolgroups.py index e172af991..6910b3906 100644 --- a/llama_stack/core/routing_tables/toolgroups.py +++ b/llama_stack/core/routing_tables/toolgroups.py @@ -124,10 +124,7 @@ class ToolGroupsRoutingTable(CommonRoutingTableImpl, ToolGroups): return toolgroup async def unregister_toolgroup(self, toolgroup_id: str) -> None: - tool_group = await self.get_tool_group(toolgroup_id) - if tool_group is None: - raise ToolGroupNotFoundError(toolgroup_id) - await self.unregister_object(tool_group) + await self.unregister_object(await self.get_tool_group(toolgroup_id)) async def shutdown(self) -> None: pass diff --git a/llama_stack/core/routing_tables/vector_dbs.py b/llama_stack/core/routing_tables/vector_dbs.py index c81a27a3b..e8dc46997 100644 --- a/llama_stack/core/routing_tables/vector_dbs.py +++ b/llama_stack/core/routing_tables/vector_dbs.py @@ -8,7 +8,7 @@ from typing import Any from pydantic import TypeAdapter -from llama_stack.apis.common.errors import ModelNotFoundError, VectorStoreNotFoundError +from llama_stack.apis.common.errors import ModelNotFoundError, ModelTypeError, VectorStoreNotFoundError from llama_stack.apis.models import ModelType from llama_stack.apis.resource import ResourceType from llama_stack.apis.vector_dbs import ListVectorDBsResponse, VectorDB, VectorDBs @@ -66,7 +66,7 @@ class VectorDBsRoutingTable(CommonRoutingTableImpl, VectorDBs): if model is None: raise ModelNotFoundError(embedding_model) if model.model_type != ModelType.embedding: - raise ValueError(f"Model {embedding_model} is not an embedding model") + raise ModelTypeError(embedding_model, model.model_type, ModelType.embedding) if "embedding_dimension" not in model.metadata: raise ValueError(f"Model {embedding_model} does not have an embedding dimension") vector_db_data = { diff --git a/llama_stack/log.py b/llama_stack/log.py index ab53e08c0..0a2d63ef6 100644 --- a/llama_stack/log.py +++ b/llama_stack/log.py @@ -99,7 +99,8 @@ def parse_environment_config(env_config: str) -> dict[str, int]: Dict[str, int]: A dictionary mapping categories to their log levels. """ category_levels = {} - for pair in env_config.split(";"): + delimiter = "," + for pair in env_config.split(delimiter): if not pair.strip(): continue diff --git a/llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py b/llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py index 796771ee1..801500dee 100644 --- a/llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py +++ b/llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py @@ -15,6 +15,7 @@ from llama_stack.apis.safety import ( RunShieldResponse, Safety, SafetyViolation, + ShieldStore, ViolationLevel, ) from llama_stack.apis.shields import Shield @@ -32,6 +33,8 @@ PROMPT_GUARD_MODEL = "Prompt-Guard-86M" class PromptGuardSafetyImpl(Safety, ShieldsProtocolPrivate): + shield_store: ShieldStore + def __init__(self, config: PromptGuardConfig, _deps) -> None: self.config = config @@ -53,7 +56,7 @@ class PromptGuardSafetyImpl(Safety, ShieldsProtocolPrivate): self, shield_id: str, messages: list[Message], - params: dict[str, Any] = None, + params: dict[str, Any], ) -> RunShieldResponse: shield = await self.shield_store.get_shield(shield_id) if not shield: @@ -61,6 +64,9 @@ class PromptGuardSafetyImpl(Safety, ShieldsProtocolPrivate): return await self.shield.run(messages) + async def run_moderation(self, input: str | list[str], model: str): + raise NotImplementedError("run_moderation not implemented for PromptGuard") + class PromptGuardShield: def __init__( @@ -117,8 +123,10 @@ class PromptGuardShield: elif self.config.guard_type == PromptGuardType.jailbreak.value and score_malicious > self.threshold: violation = SafetyViolation( violation_level=ViolationLevel.ERROR, - violation_type=f"prompt_injection:malicious={score_malicious}", - violation_return_message="Sorry, I cannot do this.", + user_message="Sorry, I cannot do this.", + metadata={ + "violation_type": f"prompt_injection:malicious={score_malicious}", + }, ) return RunShieldResponse(violation=violation) diff --git a/llama_stack/providers/remote/inference/ollama/ollama.py b/llama_stack/providers/remote/inference/ollama/ollama.py index 26b4dec76..a93421536 100644 --- a/llama_stack/providers/remote/inference/ollama/ollama.py +++ b/llama_stack/providers/remote/inference/ollama/ollama.py @@ -457,9 +457,6 @@ class OllamaInferenceAdapter( user: str | None = None, ) -> OpenAIEmbeddingsResponse: model_obj = await self._get_model(model) - if model_obj.model_type != ModelType.embedding: - raise ValueError(f"Model {model} is not an embedding model") - if model_obj.provider_resource_id is None: raise ValueError(f"Model {model} has no provider_resource_id set") diff --git a/llama_stack/providers/utils/inference/openai_compat.py b/llama_stack/providers/utils/inference/openai_compat.py index e6e5ccc8a..9a77c8cc4 100644 --- a/llama_stack/providers/utils/inference/openai_compat.py +++ b/llama_stack/providers/utils/inference/openai_compat.py @@ -70,7 +70,7 @@ from openai.types.chat.chat_completion_chunk import ( from openai.types.chat.chat_completion_content_part_image_param import ( ImageURL as OpenAIImageURL, ) -from openai.types.chat.chat_completion_message_tool_call_param import ( +from openai.types.chat.chat_completion_message_tool_call import ( Function as OpenAIFunction, ) from pydantic import BaseModel diff --git a/llama_stack/ui/app/chat-playground/page.tsx b/llama_stack/ui/app/chat-playground/page.tsx index c31248b78..d8094af85 100644 --- a/llama_stack/ui/app/chat-playground/page.tsx +++ b/llama_stack/ui/app/chat-playground/page.tsx @@ -175,7 +175,7 @@ const handleSubmitWithContent = async (content: string) => { return (