diff --git a/.github/workflows/python-build-test.yml b/.github/workflows/python-build-test.yml index 00f0950c7..ea8e6a66a 100644 --- a/.github/workflows/python-build-test.yml +++ b/.github/workflows/python-build-test.yml @@ -24,7 +24,7 @@ jobs: uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Install uv - uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v6.6.1 + uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v6.7.0 with: python-version: ${{ matrix.python-version }} activate-environment: true diff --git a/benchmarking/k8s-benchmark/apply.sh b/benchmarking/k8s-benchmark/apply.sh index 4f2270da8..6e6607663 100755 --- a/benchmarking/k8s-benchmark/apply.sh +++ b/benchmarking/k8s-benchmark/apply.sh @@ -17,11 +17,8 @@ export POSTGRES_PASSWORD=llamastack export INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B -export MOCK_INFERENCE_MODEL=mock-inference - -export MOCK_INFERENCE_URL=openai-mock-service:8080 - export BENCHMARK_INFERENCE_MODEL=$INFERENCE_MODEL +export LLAMA_STACK_WORKERS=4 set -euo pipefail set -x diff --git a/benchmarking/k8s-benchmark/stack-configmap.yaml b/benchmarking/k8s-benchmark/stack-configmap.yaml index bf6109b68..286ba5f77 100644 --- a/benchmarking/k8s-benchmark/stack-configmap.yaml +++ b/benchmarking/k8s-benchmark/stack-configmap.yaml @@ -5,6 +5,7 @@ data: image_name: kubernetes-benchmark-demo apis: - agents + - files - inference - files - safety @@ -23,6 +24,14 @@ data: - provider_id: sentence-transformers provider_type: inline::sentence-transformers config: {} + files: + - provider_id: meta-reference-files + provider_type: inline::localfs + config: + storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files} + metadata_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/files_metadata.db vector_io: - provider_id: ${env.ENABLE_CHROMADB:+chromadb} provider_type: remote::chromadb diff --git a/benchmarking/k8s-benchmark/stack-k8s.yaml.template b/benchmarking/k8s-benchmark/stack-k8s.yaml.template index 9cb1e5be3..8842c0bea 100644 --- a/benchmarking/k8s-benchmark/stack-k8s.yaml.template +++ b/benchmarking/k8s-benchmark/stack-k8s.yaml.template @@ -52,9 +52,20 @@ spec: value: http://vllm-server-safety.default.svc.cluster.local:8001/v1 - name: VLLM_TLS_VERIFY value: "false" - command: ["python", "-m", "llama_stack.core.server.server", "/etc/config/stack_run_config.yaml", "--port", "8323"] + - name: LLAMA_STACK_LOGGING + value: "all=WARNING" + - name: LLAMA_STACK_CONFIG + value: "/etc/config/stack_run_config.yaml" + - name: LLAMA_STACK_WORKERS + value: "${LLAMA_STACK_WORKERS}" + command: ["uvicorn", "llama_stack.core.server.server:create_app", "--host", "0.0.0.0", "--port", "8323", "--workers", "$LLAMA_STACK_WORKERS", "--factory"] ports: - containerPort: 8323 + resources: + requests: + cpu: "${LLAMA_STACK_WORKERS}" + limits: + cpu: "${LLAMA_STACK_WORKERS}" volumeMounts: - name: llama-storage mountPath: /root/.llama diff --git a/docs/source/distributions/self_hosted_distro/nvidia.md b/docs/source/distributions/self_hosted_distro/nvidia.md index 86d025ce7..d4f070075 100644 --- a/docs/source/distributions/self_hosted_distro/nvidia.md +++ b/docs/source/distributions/self_hosted_distro/nvidia.md @@ -11,6 +11,7 @@ The `llamastack/distribution-nvidia` distribution consists of the following prov | agents | `inline::meta-reference` | | datasetio | `inline::localfs`, `remote::nvidia` | | eval | `remote::nvidia` | +| files | `inline::localfs` | | inference | `remote::nvidia` | | post_training | `remote::nvidia` | | safety | `remote::nvidia` | diff --git a/docs/source/getting_started/detailed_tutorial.md b/docs/source/getting_started/detailed_tutorial.md index 14f888628..77a899c48 100644 --- a/docs/source/getting_started/detailed_tutorial.md +++ b/docs/source/getting_started/detailed_tutorial.md @@ -460,10 +460,12 @@ client = LlamaStackClient(base_url="http://localhost:8321") embed_lm = next(m for m in client.models.list() if m.model_type == "embedding") embedding_model = embed_lm.identifier vector_db_id = f"v{uuid.uuid4().hex}" -client.vector_dbs.register( +# The VectorDB API is deprecated; the server now returns its own authoritative ID. +# We capture the correct ID from the response's .identifier attribute. +vector_db_id = client.vector_dbs.register( vector_db_id=vector_db_id, embedding_model=embedding_model, -) +).identifier # Create Documents urls = [ diff --git a/docs/source/providers/vector_io/remote_milvus.md b/docs/source/providers/vector_io/remote_milvus.md index 075423d04..8974ada10 100644 --- a/docs/source/providers/vector_io/remote_milvus.md +++ b/docs/source/providers/vector_io/remote_milvus.md @@ -23,7 +23,13 @@ To use Milvus in your Llama Stack project, follow these steps: ## Installation -You can install Milvus using pymilvus: +If you want to use inline Milvus, you can install: + +```bash +pip install pymilvus[milvus-lite] +``` + +If you want to use remote Milvus, you can install: ```bash pip install pymilvus diff --git a/llama_stack/core/build_container.sh b/llama_stack/core/build_container.sh index 424b40a9d..8e47fc592 100755 --- a/llama_stack/core/build_container.sh +++ b/llama_stack/core/build_container.sh @@ -147,7 +147,7 @@ WORKDIR /app RUN dnf -y update && dnf install -y iputils git net-tools wget \ vim-minimal python3.12 python3.12-pip python3.12-wheel \ - python3.12-setuptools python3.12-devel gcc make && \ + python3.12-setuptools python3.12-devel gcc gcc-c++ make && \ ln -s /bin/pip3.12 /bin/pip && ln -s /bin/python3.12 /bin/python && dnf clean all ENV UV_SYSTEM_PYTHON=1 @@ -164,7 +164,7 @@ RUN apt-get update && apt-get install -y \ procps psmisc lsof \ traceroute \ bubblewrap \ - gcc \ + gcc g++ \ && rm -rf /var/lib/apt/lists/* ENV UV_SYSTEM_PYTHON=1 diff --git a/llama_stack/core/datatypes.py b/llama_stack/core/datatypes.py index faaeefd01..b5558c66f 100644 --- a/llama_stack/core/datatypes.py +++ b/llama_stack/core/datatypes.py @@ -121,10 +121,6 @@ class AutoRoutedProviderSpec(ProviderSpec): default=None, ) - @property - def pip_packages(self) -> list[str]: - raise AssertionError("Should not be called on AutoRoutedProviderSpec") - # Example: /models, /shields class RoutingTableProviderSpec(ProviderSpec): diff --git a/llama_stack/core/distribution.py b/llama_stack/core/distribution.py index c104b6764..302ecb960 100644 --- a/llama_stack/core/distribution.py +++ b/llama_stack/core/distribution.py @@ -16,11 +16,10 @@ from llama_stack.core.datatypes import BuildConfig, DistributionSpec from llama_stack.core.external import load_external_apis from llama_stack.log import get_logger from llama_stack.providers.datatypes import ( - AdapterSpec, Api, InlineProviderSpec, ProviderSpec, - remote_provider_spec, + RemoteProviderSpec, ) logger = get_logger(name=__name__, category="core") @@ -77,27 +76,12 @@ def providable_apis() -> list[Api]: def _load_remote_provider_spec(spec_data: dict[str, Any], api: Api) -> ProviderSpec: - adapter = AdapterSpec(**spec_data["adapter"]) - spec = remote_provider_spec( - api=api, - adapter=adapter, - api_dependencies=[Api(dep) for dep in spec_data.get("api_dependencies", [])], - ) + spec = RemoteProviderSpec(api=api, provider_type=f"remote::{spec_data['adapter_type']}", **spec_data) return spec def _load_inline_provider_spec(spec_data: dict[str, Any], api: Api, provider_name: str) -> ProviderSpec: - spec = InlineProviderSpec( - api=api, - provider_type=f"inline::{provider_name}", - pip_packages=spec_data.get("pip_packages", []), - module=spec_data["module"], - config_class=spec_data["config_class"], - api_dependencies=[Api(dep) for dep in spec_data.get("api_dependencies", [])], - optional_api_dependencies=[Api(dep) for dep in spec_data.get("optional_api_dependencies", [])], - provider_data_validator=spec_data.get("provider_data_validator"), - container_image=spec_data.get("container_image"), - ) + spec = InlineProviderSpec(api=api, provider_type=f"inline::{provider_name}", **spec_data) return spec diff --git a/llama_stack/core/library_client.py b/llama_stack/core/library_client.py index ea5a2ac8e..e722e4de6 100644 --- a/llama_stack/core/library_client.py +++ b/llama_stack/core/library_client.py @@ -40,7 +40,7 @@ from llama_stack.core.request_headers import ( from llama_stack.core.resolver import ProviderRegistry from llama_stack.core.server.routes import RouteImpls, find_matching_route, initialize_route_impls from llama_stack.core.stack import ( - construct_stack, + Stack, get_stack_run_config_from_distro, replace_env_vars, ) @@ -252,7 +252,10 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient): try: self.route_impls = None - self.impls = await construct_stack(self.config, self.custom_provider_registry) + + stack = Stack(self.config, self.custom_provider_registry) + await stack.initialize() + self.impls = stack.impls except ModuleNotFoundError as _e: cprint(_e.msg, color="red", file=sys.stderr) cprint( @@ -289,6 +292,7 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient): ) raise _e + assert self.impls is not None if Api.telemetry in self.impls: setup_logger(self.impls[Api.telemetry]) diff --git a/llama_stack/core/routing_tables/models.py b/llama_stack/core/routing_tables/models.py index b6141efa9..641c73c16 100644 --- a/llama_stack/core/routing_tables/models.py +++ b/llama_stack/core/routing_tables/models.py @@ -33,7 +33,7 @@ class ModelsRoutingTable(CommonRoutingTableImpl, Models): try: models = await provider.list_models() except Exception as e: - logger.exception(f"Model refresh failed for provider {provider_id}: {e}") + logger.warning(f"Model refresh failed for provider {provider_id}: {e}") continue self.listed_providers.add(provider_id) diff --git a/llama_stack/core/server/server.py b/llama_stack/core/server/server.py index d3e875fec..9cca42268 100644 --- a/llama_stack/core/server/server.py +++ b/llama_stack/core/server/server.py @@ -6,6 +6,7 @@ import argparse import asyncio +import concurrent.futures import functools import inspect import json @@ -50,17 +51,15 @@ from llama_stack.core.request_headers import ( request_provider_data_context, user_from_scope, ) -from llama_stack.core.resolver import InvalidProviderError from llama_stack.core.server.routes import ( find_matching_route, get_all_api_routes, initialize_route_impls, ) from llama_stack.core.stack import ( + Stack, cast_image_name_to_string, - construct_stack, replace_env_vars, - shutdown_stack, validate_env_pair, ) from llama_stack.core.utils.config import redact_sensitive_fields @@ -156,21 +155,34 @@ def translate_exception(exc: Exception) -> HTTPException | RequestValidationErro ) -async def shutdown(app): - """Initiate a graceful shutdown of the application. - - Handled by the lifespan context manager. The shutdown process involves - shutting down all implementations registered in the application. +class StackApp(FastAPI): """ - await shutdown_stack(app.__llama_stack_impls__) + A wrapper around the FastAPI application to hold a reference to the Stack instance so that we can + start background tasks (e.g. refresh model registry periodically) from the lifespan context manager. + """ + + def __init__(self, config: StackRunConfig, *args, **kwargs): + super().__init__(*args, **kwargs) + self.stack: Stack = Stack(config) + + # This code is called from a running event loop managed by uvicorn so we cannot simply call + # asyncio.run() to initialize the stack. We cannot await either since this is not an async + # function. + # As a workaround, we use a thread pool executor to run the initialize() method + # in a separate thread. + with concurrent.futures.ThreadPoolExecutor() as executor: + future = executor.submit(asyncio.run, self.stack.initialize()) + future.result() @asynccontextmanager -async def lifespan(app: FastAPI): +async def lifespan(app: StackApp): logger.info("Starting up") + assert app.stack is not None + app.stack.create_registry_refresh_task() yield logger.info("Shutting down") - await shutdown(app) + await app.stack.shutdown() def is_streaming_request(func_name: str, request: Request, **kwargs): @@ -386,73 +398,61 @@ class ClientVersionMiddleware: return await self.app(scope, receive, send) -def main(args: argparse.Namespace | None = None): - """Start the LlamaStack server.""" - parser = argparse.ArgumentParser(description="Start the LlamaStack server.") +def create_app( + config_file: str | None = None, + env_vars: list[str] | None = None, +) -> StackApp: + """Create and configure the FastAPI application. - add_config_distro_args(parser) - parser.add_argument( - "--port", - type=int, - default=int(os.getenv("LLAMA_STACK_PORT", 8321)), - help="Port to listen on", - ) - parser.add_argument( - "--env", - action="append", - help="Environment variables in KEY=value format. Can be specified multiple times.", - ) + Args: + config_file: Path to config file. If None, uses LLAMA_STACK_CONFIG env var or default resolution. + env_vars: List of environment variables in KEY=value format. + disable_version_check: Whether to disable version checking. If None, uses LLAMA_STACK_DISABLE_VERSION_CHECK env var. - # Determine whether the server args are being passed by the "run" command, if this is the case - # the args will be passed as a Namespace object to the main function, otherwise they will be - # parsed from the command line - if args is None: - args = parser.parse_args() + Returns: + Configured StackApp instance. + """ + config_file = config_file or os.getenv("LLAMA_STACK_CONFIG") + if config_file is None: + raise ValueError("No config file provided and LLAMA_STACK_CONFIG env var is not set") - config_or_distro = get_config_from_args(args) - config_file = resolve_config_or_distro(config_or_distro, Mode.RUN) + config_file = resolve_config_or_distro(config_file, Mode.RUN) + # Load and process configuration logger_config = None with open(config_file) as fp: config_contents = yaml.safe_load(fp) if isinstance(config_contents, dict) and (cfg := config_contents.get("logging_config")): logger_config = LoggingConfig(**cfg) logger = get_logger(name=__name__, category="core::server", config=logger_config) - if args.env: - for env_pair in args.env: + + if env_vars: + for env_pair in env_vars: try: key, value = validate_env_pair(env_pair) - logger.info(f"Setting CLI environment variable {key} => {value}") + logger.info(f"Setting environment variable {key} => {value}") os.environ[key] = value except ValueError as e: logger.error(f"Error: {str(e)}") - sys.exit(1) + raise ValueError(f"Invalid environment variable format: {env_pair}") from e + config = replace_env_vars(config_contents) config = StackRunConfig(**cast_image_name_to_string(config)) _log_run_config(run_config=config) - app = FastAPI( + app = StackApp( lifespan=lifespan, docs_url="/docs", redoc_url="/redoc", openapi_url="/openapi.json", + config=config, ) if not os.environ.get("LLAMA_STACK_DISABLE_VERSION_CHECK"): app.add_middleware(ClientVersionMiddleware) - try: - # Create and set the event loop that will be used for both construction and server runtime - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - - # Construct the stack in the persistent event loop - impls = loop.run_until_complete(construct_stack(config)) - - except InvalidProviderError as e: - logger.error(f"Error: {str(e)}") - sys.exit(1) + impls = app.stack.impls if config.server.auth: logger.info(f"Enabling authentication with provider: {config.server.auth.provider_config.type.value}") @@ -553,9 +553,54 @@ def main(args: argparse.Namespace | None = None): app.exception_handler(RequestValidationError)(global_exception_handler) app.exception_handler(Exception)(global_exception_handler) - app.__llama_stack_impls__ = impls app.add_middleware(TracingMiddleware, impls=impls, external_apis=external_apis) + return app + + +def main(args: argparse.Namespace | None = None): + """Start the LlamaStack server.""" + parser = argparse.ArgumentParser(description="Start the LlamaStack server.") + + add_config_distro_args(parser) + parser.add_argument( + "--port", + type=int, + default=int(os.getenv("LLAMA_STACK_PORT", 8321)), + help="Port to listen on", + ) + parser.add_argument( + "--env", + action="append", + help="Environment variables in KEY=value format. Can be specified multiple times.", + ) + + # Determine whether the server args are being passed by the "run" command, if this is the case + # the args will be passed as a Namespace object to the main function, otherwise they will be + # parsed from the command line + if args is None: + args = parser.parse_args() + + config_or_distro = get_config_from_args(args) + + try: + app = create_app( + config_file=config_or_distro, + env_vars=args.env, + ) + except Exception as e: + logger.error(f"Error creating app: {str(e)}") + sys.exit(1) + + config_file = resolve_config_or_distro(config_or_distro, Mode.RUN) + with open(config_file) as fp: + config_contents = yaml.safe_load(fp) + if isinstance(config_contents, dict) and (cfg := config_contents.get("logging_config")): + logger_config = LoggingConfig(**cfg) + else: + logger_config = None + config = StackRunConfig(**cast_image_name_to_string(replace_env_vars(config_contents))) + import uvicorn # Configure SSL if certificates are provided @@ -593,7 +638,6 @@ def main(args: argparse.Namespace | None = None): if ssl_config: uvicorn_config.update(ssl_config) - # Run uvicorn in the existing event loop to preserve background tasks # We need to catch KeyboardInterrupt because uvicorn's signal handling # re-raises SIGINT signals using signal.raise_signal(), which Python # converts to KeyboardInterrupt. Without this catch, we'd get a confusing @@ -604,13 +648,9 @@ def main(args: argparse.Namespace | None = None): # Another approach would be to ignore SIGINT entirely - let uvicorn handle it through its own # signal handling but this is quite intrusive and not worth the effort. try: - loop.run_until_complete(uvicorn.Server(uvicorn.Config(**uvicorn_config)).serve()) + asyncio.run(uvicorn.Server(uvicorn.Config(**uvicorn_config)).serve()) except (KeyboardInterrupt, SystemExit): logger.info("Received interrupt signal, shutting down gracefully...") - finally: - if not loop.is_closed(): - logger.debug("Closing event loop") - loop.close() def _log_run_config(run_config: StackRunConfig): diff --git a/llama_stack/core/stack.py b/llama_stack/core/stack.py index 7ab8d2c64..a6c5093eb 100644 --- a/llama_stack/core/stack.py +++ b/llama_stack/core/stack.py @@ -315,78 +315,84 @@ def add_internal_implementations(impls: dict[Api, Any], run_config: StackRunConf impls[Api.prompts] = prompts_impl -# Produces a stack of providers for the given run config. Not all APIs may be -# asked for in the run config. -async def construct_stack( - run_config: StackRunConfig, provider_registry: ProviderRegistry | None = None -) -> dict[Api, Any]: - if "LLAMA_STACK_TEST_INFERENCE_MODE" in os.environ: - from llama_stack.testing.inference_recorder import setup_inference_recording +class Stack: + def __init__(self, run_config: StackRunConfig, provider_registry: ProviderRegistry | None = None): + self.run_config = run_config + self.provider_registry = provider_registry + self.impls = None + + # Produces a stack of providers for the given run config. Not all APIs may be + # asked for in the run config. + async def initialize(self): + if "LLAMA_STACK_TEST_INFERENCE_MODE" in os.environ: + from llama_stack.testing.inference_recorder import setup_inference_recording + + global TEST_RECORDING_CONTEXT + TEST_RECORDING_CONTEXT = setup_inference_recording() + if TEST_RECORDING_CONTEXT: + TEST_RECORDING_CONTEXT.__enter__() + logger.info(f"Inference recording enabled: mode={os.environ.get('LLAMA_STACK_TEST_INFERENCE_MODE')}") + + dist_registry, _ = await create_dist_registry(self.run_config.metadata_store, self.run_config.image_name) + policy = self.run_config.server.auth.access_policy if self.run_config.server.auth else [] + impls = await resolve_impls( + self.run_config, self.provider_registry or get_provider_registry(self.run_config), dist_registry, policy + ) + + # Add internal implementations after all other providers are resolved + add_internal_implementations(impls, self.run_config) + + if Api.prompts in impls: + await impls[Api.prompts].initialize() + + await register_resources(self.run_config, impls) + + await refresh_registry_once(impls) + self.impls = impls + + def create_registry_refresh_task(self): + assert self.impls is not None, "Must call initialize() before starting" + + global REGISTRY_REFRESH_TASK + REGISTRY_REFRESH_TASK = asyncio.create_task(refresh_registry_task(self.impls)) + + def cb(task): + import traceback + + if task.cancelled(): + logger.error("Model refresh task cancelled") + elif task.exception(): + logger.error(f"Model refresh task failed: {task.exception()}") + traceback.print_exception(task.exception()) + else: + logger.debug("Model refresh task completed") + + REGISTRY_REFRESH_TASK.add_done_callback(cb) + + async def shutdown(self): + for impl in self.impls.values(): + impl_name = impl.__class__.__name__ + logger.info(f"Shutting down {impl_name}") + try: + if hasattr(impl, "shutdown"): + await asyncio.wait_for(impl.shutdown(), timeout=5) + else: + logger.warning(f"No shutdown method for {impl_name}") + except TimeoutError: + logger.exception(f"Shutdown timeout for {impl_name}") + except (Exception, asyncio.CancelledError) as e: + logger.exception(f"Failed to shutdown {impl_name}: {e}") global TEST_RECORDING_CONTEXT - TEST_RECORDING_CONTEXT = setup_inference_recording() if TEST_RECORDING_CONTEXT: - TEST_RECORDING_CONTEXT.__enter__() - logger.info(f"Inference recording enabled: mode={os.environ.get('LLAMA_STACK_TEST_INFERENCE_MODE')}") + try: + TEST_RECORDING_CONTEXT.__exit__(None, None, None) + except Exception as e: + logger.error(f"Error during inference recording cleanup: {e}") - dist_registry, _ = await create_dist_registry(run_config.metadata_store, run_config.image_name) - policy = run_config.server.auth.access_policy if run_config.server.auth else [] - impls = await resolve_impls( - run_config, provider_registry or get_provider_registry(run_config), dist_registry, policy - ) - - # Add internal implementations after all other providers are resolved - add_internal_implementations(impls, run_config) - - if Api.prompts in impls: - await impls[Api.prompts].initialize() - - await register_resources(run_config, impls) - - await refresh_registry_once(impls) - - global REGISTRY_REFRESH_TASK - REGISTRY_REFRESH_TASK = asyncio.create_task(refresh_registry_task(impls)) - - def cb(task): - import traceback - - if task.cancelled(): - logger.error("Model refresh task cancelled") - elif task.exception(): - logger.error(f"Model refresh task failed: {task.exception()}") - traceback.print_exception(task.exception()) - else: - logger.debug("Model refresh task completed") - - REGISTRY_REFRESH_TASK.add_done_callback(cb) - return impls - - -async def shutdown_stack(impls: dict[Api, Any]): - for impl in impls.values(): - impl_name = impl.__class__.__name__ - logger.info(f"Shutting down {impl_name}") - try: - if hasattr(impl, "shutdown"): - await asyncio.wait_for(impl.shutdown(), timeout=5) - else: - logger.warning(f"No shutdown method for {impl_name}") - except TimeoutError: - logger.exception(f"Shutdown timeout for {impl_name}") - except (Exception, asyncio.CancelledError) as e: - logger.exception(f"Failed to shutdown {impl_name}: {e}") - - global TEST_RECORDING_CONTEXT - if TEST_RECORDING_CONTEXT: - try: - TEST_RECORDING_CONTEXT.__exit__(None, None, None) - except Exception as e: - logger.error(f"Error during inference recording cleanup: {e}") - - global REGISTRY_REFRESH_TASK - if REGISTRY_REFRESH_TASK: - REGISTRY_REFRESH_TASK.cancel() + global REGISTRY_REFRESH_TASK + if REGISTRY_REFRESH_TASK: + REGISTRY_REFRESH_TASK.cancel() async def refresh_registry_once(impls: dict[Api, Any]): diff --git a/llama_stack/distributions/nvidia/build.yaml b/llama_stack/distributions/nvidia/build.yaml index f3e73a2c1..bc78756d2 100644 --- a/llama_stack/distributions/nvidia/build.yaml +++ b/llama_stack/distributions/nvidia/build.yaml @@ -23,6 +23,8 @@ distribution_spec: - provider_type: inline::basic tool_runtime: - provider_type: inline::rag-runtime + files: + - provider_type: inline::localfs image_type: venv additional_pip_packages: - aiosqlite diff --git a/llama_stack/distributions/nvidia/nvidia.py b/llama_stack/distributions/nvidia/nvidia.py index aedda0ae9..779fabf2c 100644 --- a/llama_stack/distributions/nvidia/nvidia.py +++ b/llama_stack/distributions/nvidia/nvidia.py @@ -8,6 +8,7 @@ from pathlib import Path from llama_stack.core.datatypes import BuildProvider, ModelInput, Provider, ShieldInput, ToolGroupInput from llama_stack.distributions.template import DistributionTemplate, RunConfigSettings, get_model_registry +from llama_stack.providers.inline.files.localfs.config import LocalfsFilesImplConfig from llama_stack.providers.remote.datasetio.nvidia import NvidiaDatasetIOConfig from llama_stack.providers.remote.eval.nvidia import NVIDIAEvalConfig from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig @@ -15,7 +16,7 @@ from llama_stack.providers.remote.inference.nvidia.models import MODEL_ENTRIES from llama_stack.providers.remote.safety.nvidia import NVIDIASafetyConfig -def get_distribution_template() -> DistributionTemplate: +def get_distribution_template(name: str = "nvidia") -> DistributionTemplate: providers = { "inference": [BuildProvider(provider_type="remote::nvidia")], "vector_io": [BuildProvider(provider_type="inline::faiss")], @@ -30,6 +31,7 @@ def get_distribution_template() -> DistributionTemplate: ], "scoring": [BuildProvider(provider_type="inline::basic")], "tool_runtime": [BuildProvider(provider_type="inline::rag-runtime")], + "files": [BuildProvider(provider_type="inline::localfs")], } inference_provider = Provider( @@ -52,6 +54,11 @@ def get_distribution_template() -> DistributionTemplate: provider_type="remote::nvidia", config=NVIDIAEvalConfig.sample_run_config(), ) + files_provider = Provider( + provider_id="meta-reference-files", + provider_type="inline::localfs", + config=LocalfsFilesImplConfig.sample_run_config(f"~/.llama/distributions/{name}"), + ) inference_model = ModelInput( model_id="${env.INFERENCE_MODEL}", provider_id="nvidia", @@ -73,7 +80,7 @@ def get_distribution_template() -> DistributionTemplate: default_models, _ = get_model_registry(available_models) return DistributionTemplate( - name="nvidia", + name=name, distro_type="self_hosted", description="Use NVIDIA NIM for running LLM inference, evaluation and safety", container_image=None, @@ -86,6 +93,7 @@ def get_distribution_template() -> DistributionTemplate: "inference": [inference_provider], "datasetio": [datasetio_provider], "eval": [eval_provider], + "files": [files_provider], }, default_models=default_models, default_tool_groups=default_tool_groups, @@ -97,6 +105,7 @@ def get_distribution_template() -> DistributionTemplate: safety_provider, ], "eval": [eval_provider], + "files": [files_provider], }, default_models=[inference_model, safety_model], default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}", provider_id="nvidia")], diff --git a/llama_stack/distributions/nvidia/run-with-safety.yaml b/llama_stack/distributions/nvidia/run-with-safety.yaml index 015724050..5a958116e 100644 --- a/llama_stack/distributions/nvidia/run-with-safety.yaml +++ b/llama_stack/distributions/nvidia/run-with-safety.yaml @@ -4,6 +4,7 @@ apis: - agents - datasetio - eval +- files - inference - post_training - safety @@ -88,6 +89,14 @@ providers: tool_runtime: - provider_id: rag-runtime provider_type: inline::rag-runtime + files: + - provider_id: meta-reference-files + provider_type: inline::localfs + config: + storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/nvidia/files} + metadata_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/nvidia}/files_metadata.db metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/nvidia}/registry.db diff --git a/llama_stack/distributions/nvidia/run.yaml b/llama_stack/distributions/nvidia/run.yaml index 9fd6b0404..362970d2e 100644 --- a/llama_stack/distributions/nvidia/run.yaml +++ b/llama_stack/distributions/nvidia/run.yaml @@ -4,6 +4,7 @@ apis: - agents - datasetio - eval +- files - inference - post_training - safety @@ -77,6 +78,14 @@ providers: tool_runtime: - provider_id: rag-runtime provider_type: inline::rag-runtime + files: + - provider_id: meta-reference-files + provider_type: inline::localfs + config: + storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/nvidia/files} + metadata_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/nvidia}/files_metadata.db metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/nvidia}/registry.db diff --git a/llama_stack/distributions/starter/starter.py b/llama_stack/distributions/starter/starter.py index c2dfe95ad..6bee51ff0 100644 --- a/llama_stack/distributions/starter/starter.py +++ b/llama_stack/distributions/starter/starter.py @@ -78,12 +78,12 @@ def get_remote_inference_providers() -> list[Provider]: remote_providers = [ provider for provider in available_providers() - if isinstance(provider, RemoteProviderSpec) and provider.adapter.adapter_type in ENABLED_INFERENCE_PROVIDERS + if isinstance(provider, RemoteProviderSpec) and provider.adapter_type in ENABLED_INFERENCE_PROVIDERS ] inference_providers = [] for provider_spec in remote_providers: - provider_type = provider_spec.adapter.adapter_type + provider_type = provider_spec.adapter_type if provider_type in INFERENCE_PROVIDER_IDS: provider_id = INFERENCE_PROVIDER_IDS[provider_type] diff --git a/llama_stack/providers/datatypes.py b/llama_stack/providers/datatypes.py index 5e15dd8e1..c8ff9cecb 100644 --- a/llama_stack/providers/datatypes.py +++ b/llama_stack/providers/datatypes.py @@ -131,6 +131,15 @@ class ProviderSpec(BaseModel): """, ) + pip_packages: list[str] = Field( + default_factory=list, + description="The pip dependencies needed for this implementation", + ) + + provider_data_validator: str | None = Field( + default=None, + ) + is_external: bool = Field(default=False, description="Notes whether this provider is an external provider.") # used internally by the resolver; this is a hack for now @@ -145,45 +154,8 @@ class RoutingTable(Protocol): async def get_provider_impl(self, routing_key: str) -> Any: ... -# TODO: this can now be inlined into RemoteProviderSpec -@json_schema_type -class AdapterSpec(BaseModel): - adapter_type: str = Field( - ..., - description="Unique identifier for this adapter", - ) - module: str = Field( - default_factory=str, - description=""" -Fully-qualified name of the module to import. The module is expected to have: - - - `get_adapter_impl(config, deps)`: returns the adapter implementation -""", - ) - pip_packages: list[str] = Field( - default_factory=list, - description="The pip dependencies needed for this implementation", - ) - config_class: str = Field( - description="Fully-qualified classname of the config for this provider", - ) - provider_data_validator: str | None = Field( - default=None, - ) - description: str | None = Field( - default=None, - description=""" -A description of the provider. This is used to display in the documentation. -""", - ) - - @json_schema_type class InlineProviderSpec(ProviderSpec): - pip_packages: list[str] = Field( - default_factory=list, - description="The pip dependencies needed for this implementation", - ) container_image: str | None = Field( default=None, description=""" @@ -191,10 +163,6 @@ The container image to use for this implementation. If one is provided, pip_pack If a provider depends on other providers, the dependencies MUST NOT specify a container image. """, ) - # module field is inherited from ProviderSpec - provider_data_validator: str | None = Field( - default=None, - ) description: str | None = Field( default=None, description=""" @@ -223,10 +191,15 @@ class RemoteProviderConfig(BaseModel): @json_schema_type class RemoteProviderSpec(ProviderSpec): - adapter: AdapterSpec = Field( + adapter_type: str = Field( + ..., + description="Unique identifier for this adapter", + ) + + description: str | None = Field( + default=None, description=""" -If some code is needed to convert the remote responses into Llama Stack compatible -API responses, specify the adapter here. +A description of the provider. This is used to display in the documentation. """, ) @@ -234,33 +207,6 @@ API responses, specify the adapter here. def container_image(self) -> str | None: return None - # module field is inherited from ProviderSpec - - @property - def pip_packages(self) -> list[str]: - return self.adapter.pip_packages - - @property - def provider_data_validator(self) -> str | None: - return self.adapter.provider_data_validator - - -def remote_provider_spec( - api: Api, - adapter: AdapterSpec, - api_dependencies: list[Api] | None = None, - optional_api_dependencies: list[Api] | None = None, -) -> RemoteProviderSpec: - return RemoteProviderSpec( - api=api, - provider_type=f"remote::{adapter.adapter_type}", - config_class=adapter.config_class, - module=adapter.module, - adapter=adapter, - api_dependencies=api_dependencies or [], - optional_api_dependencies=optional_api_dependencies or [], - ) - class HealthStatus(StrEnum): OK = "OK" diff --git a/llama_stack/providers/inline/files/localfs/files.py b/llama_stack/providers/inline/files/localfs/files.py index 9c610c1ba..65cf8d815 100644 --- a/llama_stack/providers/inline/files/localfs/files.py +++ b/llama_stack/providers/inline/files/localfs/files.py @@ -44,7 +44,7 @@ class LocalfsFilesImpl(Files): storage_path.mkdir(parents=True, exist_ok=True) # Initialize SQL store for metadata - self.sql_store = AuthorizedSqlStore(sqlstore_impl(self.config.metadata_store)) + self.sql_store = AuthorizedSqlStore(sqlstore_impl(self.config.metadata_store), self.policy) await self.sql_store.create_table( "openai_files", { @@ -74,7 +74,7 @@ class LocalfsFilesImpl(Files): if not self.sql_store: raise RuntimeError("Files provider not initialized") - row = await self.sql_store.fetch_one("openai_files", policy=self.policy, where={"id": file_id}) + row = await self.sql_store.fetch_one("openai_files", where={"id": file_id}) if not row: raise ResourceNotFoundError(file_id, "File", "client.files.list()") @@ -150,7 +150,6 @@ class LocalfsFilesImpl(Files): paginated_result = await self.sql_store.fetch_all( table="openai_files", - policy=self.policy, where=where_conditions if where_conditions else None, order_by=[("created_at", order.value)], cursor=("id", after) if after else None, diff --git a/llama_stack/providers/registry/datasetio.py b/llama_stack/providers/registry/datasetio.py index f641b4ce3..a9feb0bac 100644 --- a/llama_stack/providers/registry/datasetio.py +++ b/llama_stack/providers/registry/datasetio.py @@ -6,11 +6,10 @@ from llama_stack.providers.datatypes import ( - AdapterSpec, Api, InlineProviderSpec, ProviderSpec, - remote_provider_spec, + RemoteProviderSpec, ) @@ -25,28 +24,26 @@ def available_providers() -> list[ProviderSpec]: api_dependencies=[], description="Local filesystem-based dataset I/O provider for reading and writing datasets to local storage.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.datasetio, - adapter=AdapterSpec( - adapter_type="huggingface", - pip_packages=[ - "datasets>=4.0.0", - ], - module="llama_stack.providers.remote.datasetio.huggingface", - config_class="llama_stack.providers.remote.datasetio.huggingface.HuggingfaceDatasetIOConfig", - description="HuggingFace datasets provider for accessing and managing datasets from the HuggingFace Hub.", - ), + adapter_type="huggingface", + provider_type="remote::huggingface", + pip_packages=[ + "datasets>=4.0.0", + ], + module="llama_stack.providers.remote.datasetio.huggingface", + config_class="llama_stack.providers.remote.datasetio.huggingface.HuggingfaceDatasetIOConfig", + description="HuggingFace datasets provider for accessing and managing datasets from the HuggingFace Hub.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.datasetio, - adapter=AdapterSpec( - adapter_type="nvidia", - pip_packages=[ - "datasets>=4.0.0", - ], - module="llama_stack.providers.remote.datasetio.nvidia", - config_class="llama_stack.providers.remote.datasetio.nvidia.NvidiaDatasetIOConfig", - description="NVIDIA's dataset I/O provider for accessing datasets from NVIDIA's data platform.", - ), + adapter_type="nvidia", + provider_type="remote::nvidia", + module="llama_stack.providers.remote.datasetio.nvidia", + config_class="llama_stack.providers.remote.datasetio.nvidia.NvidiaDatasetIOConfig", + pip_packages=[ + "datasets>=4.0.0", + ], + description="NVIDIA's dataset I/O provider for accessing datasets from NVIDIA's data platform.", ), ] diff --git a/llama_stack/providers/registry/eval.py b/llama_stack/providers/registry/eval.py index 9f0d17916..4ef0bb41f 100644 --- a/llama_stack/providers/registry/eval.py +++ b/llama_stack/providers/registry/eval.py @@ -5,7 +5,7 @@ # the root directory of this source tree. -from llama_stack.providers.datatypes import AdapterSpec, Api, InlineProviderSpec, ProviderSpec, remote_provider_spec +from llama_stack.providers.datatypes import Api, InlineProviderSpec, ProviderSpec, RemoteProviderSpec def available_providers() -> list[ProviderSpec]: @@ -25,17 +25,16 @@ def available_providers() -> list[ProviderSpec]: ], description="Meta's reference implementation of evaluation tasks with support for multiple languages and evaluation metrics.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.eval, - adapter=AdapterSpec( - adapter_type="nvidia", - pip_packages=[ - "requests", - ], - module="llama_stack.providers.remote.eval.nvidia", - config_class="llama_stack.providers.remote.eval.nvidia.NVIDIAEvalConfig", - description="NVIDIA's evaluation provider for running evaluation tasks on NVIDIA's platform.", - ), + adapter_type="nvidia", + pip_packages=[ + "requests", + ], + provider_type="remote::nvidia", + module="llama_stack.providers.remote.eval.nvidia", + config_class="llama_stack.providers.remote.eval.nvidia.NVIDIAEvalConfig", + description="NVIDIA's evaluation provider for running evaluation tasks on NVIDIA's platform.", api_dependencies=[ Api.datasetio, Api.datasets, diff --git a/llama_stack/providers/registry/files.py b/llama_stack/providers/registry/files.py index ebe90310c..9acabfacd 100644 --- a/llama_stack/providers/registry/files.py +++ b/llama_stack/providers/registry/files.py @@ -4,13 +4,7 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from llama_stack.providers.datatypes import ( - AdapterSpec, - Api, - InlineProviderSpec, - ProviderSpec, - remote_provider_spec, -) +from llama_stack.providers.datatypes import Api, InlineProviderSpec, ProviderSpec, RemoteProviderSpec from llama_stack.providers.utils.sqlstore.sqlstore import sql_store_pip_packages @@ -25,14 +19,13 @@ def available_providers() -> list[ProviderSpec]: config_class="llama_stack.providers.inline.files.localfs.config.LocalfsFilesImplConfig", description="Local filesystem-based file storage provider for managing files and documents locally.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.files, - adapter=AdapterSpec( - adapter_type="s3", - pip_packages=["boto3"] + sql_store_pip_packages, - module="llama_stack.providers.remote.files.s3", - config_class="llama_stack.providers.remote.files.s3.config.S3FilesImplConfig", - description="AWS S3-based file storage provider for scalable cloud file management with metadata persistence.", - ), + provider_type="remote::s3", + adapter_type="s3", + pip_packages=["boto3"] + sql_store_pip_packages, + module="llama_stack.providers.remote.files.s3", + config_class="llama_stack.providers.remote.files.s3.config.S3FilesImplConfig", + description="AWS S3-based file storage provider for scalable cloud file management with metadata persistence.", ), ] diff --git a/llama_stack/providers/registry/inference.py b/llama_stack/providers/registry/inference.py index 0eb4cf104..658611698 100644 --- a/llama_stack/providers/registry/inference.py +++ b/llama_stack/providers/registry/inference.py @@ -6,11 +6,10 @@ from llama_stack.providers.datatypes import ( - AdapterSpec, Api, InlineProviderSpec, ProviderSpec, - remote_provider_spec, + RemoteProviderSpec, ) META_REFERENCE_DEPS = [ @@ -49,177 +48,167 @@ def available_providers() -> list[ProviderSpec]: config_class="llama_stack.providers.inline.inference.sentence_transformers.config.SentenceTransformersInferenceConfig", description="Sentence Transformers inference provider for text embeddings and similarity search.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="cerebras", - pip_packages=[ - "cerebras_cloud_sdk", - ], - module="llama_stack.providers.remote.inference.cerebras", - config_class="llama_stack.providers.remote.inference.cerebras.CerebrasImplConfig", - description="Cerebras inference provider for running models on Cerebras Cloud platform.", - ), + adapter_type="cerebras", + provider_type="remote::cerebras", + pip_packages=[ + "cerebras_cloud_sdk", + ], + module="llama_stack.providers.remote.inference.cerebras", + config_class="llama_stack.providers.remote.inference.cerebras.CerebrasImplConfig", + description="Cerebras inference provider for running models on Cerebras Cloud platform.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="ollama", - pip_packages=["ollama", "aiohttp", "h11>=0.16.0"], - config_class="llama_stack.providers.remote.inference.ollama.OllamaImplConfig", - module="llama_stack.providers.remote.inference.ollama", - description="Ollama inference provider for running local models through the Ollama runtime.", - ), + adapter_type="ollama", + provider_type="remote::ollama", + pip_packages=["ollama", "aiohttp", "h11>=0.16.0"], + config_class="llama_stack.providers.remote.inference.ollama.OllamaImplConfig", + module="llama_stack.providers.remote.inference.ollama", + description="Ollama inference provider for running local models through the Ollama runtime.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="vllm", - pip_packages=[], - module="llama_stack.providers.remote.inference.vllm", - config_class="llama_stack.providers.remote.inference.vllm.VLLMInferenceAdapterConfig", - provider_data_validator="llama_stack.providers.remote.inference.vllm.VLLMProviderDataValidator", - description="Remote vLLM inference provider for connecting to vLLM servers.", - ), + adapter_type="vllm", + provider_type="remote::vllm", + pip_packages=[], + module="llama_stack.providers.remote.inference.vllm", + config_class="llama_stack.providers.remote.inference.vllm.VLLMInferenceAdapterConfig", + provider_data_validator="llama_stack.providers.remote.inference.vllm.VLLMProviderDataValidator", + description="Remote vLLM inference provider for connecting to vLLM servers.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="tgi", - pip_packages=["huggingface_hub", "aiohttp"], - module="llama_stack.providers.remote.inference.tgi", - config_class="llama_stack.providers.remote.inference.tgi.TGIImplConfig", - description="Text Generation Inference (TGI) provider for HuggingFace model serving.", - ), + adapter_type="tgi", + provider_type="remote::tgi", + pip_packages=["huggingface_hub", "aiohttp"], + module="llama_stack.providers.remote.inference.tgi", + config_class="llama_stack.providers.remote.inference.tgi.TGIImplConfig", + description="Text Generation Inference (TGI) provider for HuggingFace model serving.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="hf::serverless", - pip_packages=["huggingface_hub", "aiohttp"], - module="llama_stack.providers.remote.inference.tgi", - config_class="llama_stack.providers.remote.inference.tgi.InferenceAPIImplConfig", - description="HuggingFace Inference API serverless provider for on-demand model inference.", - ), + adapter_type="hf::serverless", + provider_type="remote::hf::serverless", + pip_packages=["huggingface_hub", "aiohttp"], + module="llama_stack.providers.remote.inference.tgi", + config_class="llama_stack.providers.remote.inference.tgi.InferenceAPIImplConfig", + description="HuggingFace Inference API serverless provider for on-demand model inference.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="hf::endpoint", - pip_packages=["huggingface_hub", "aiohttp"], - module="llama_stack.providers.remote.inference.tgi", - config_class="llama_stack.providers.remote.inference.tgi.InferenceEndpointImplConfig", - description="HuggingFace Inference Endpoints provider for dedicated model serving.", - ), + provider_type="remote::hf::endpoint", + adapter_type="hf::endpoint", + pip_packages=["huggingface_hub", "aiohttp"], + module="llama_stack.providers.remote.inference.tgi", + config_class="llama_stack.providers.remote.inference.tgi.InferenceEndpointImplConfig", + description="HuggingFace Inference Endpoints provider for dedicated model serving.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="fireworks", - pip_packages=[ - "fireworks-ai<=0.17.16", - ], - module="llama_stack.providers.remote.inference.fireworks", - config_class="llama_stack.providers.remote.inference.fireworks.FireworksImplConfig", - provider_data_validator="llama_stack.providers.remote.inference.fireworks.FireworksProviderDataValidator", - description="Fireworks AI inference provider for Llama models and other AI models on the Fireworks platform.", - ), + adapter_type="fireworks", + provider_type="remote::fireworks", + pip_packages=[ + "fireworks-ai<=0.17.16", + ], + module="llama_stack.providers.remote.inference.fireworks", + config_class="llama_stack.providers.remote.inference.fireworks.FireworksImplConfig", + provider_data_validator="llama_stack.providers.remote.inference.fireworks.FireworksProviderDataValidator", + description="Fireworks AI inference provider for Llama models and other AI models on the Fireworks platform.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="together", - pip_packages=[ - "together", - ], - module="llama_stack.providers.remote.inference.together", - config_class="llama_stack.providers.remote.inference.together.TogetherImplConfig", - provider_data_validator="llama_stack.providers.remote.inference.together.TogetherProviderDataValidator", - description="Together AI inference provider for open-source models and collaborative AI development.", - ), + adapter_type="together", + provider_type="remote::together", + pip_packages=[ + "together", + ], + module="llama_stack.providers.remote.inference.together", + config_class="llama_stack.providers.remote.inference.together.TogetherImplConfig", + provider_data_validator="llama_stack.providers.remote.inference.together.TogetherProviderDataValidator", + description="Together AI inference provider for open-source models and collaborative AI development.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="bedrock", - pip_packages=["boto3"], - module="llama_stack.providers.remote.inference.bedrock", - config_class="llama_stack.providers.remote.inference.bedrock.BedrockConfig", - description="AWS Bedrock inference provider for accessing various AI models through AWS's managed service.", - ), + adapter_type="bedrock", + provider_type="remote::bedrock", + pip_packages=["boto3"], + module="llama_stack.providers.remote.inference.bedrock", + config_class="llama_stack.providers.remote.inference.bedrock.BedrockConfig", + description="AWS Bedrock inference provider for accessing various AI models through AWS's managed service.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="databricks", - pip_packages=[], - module="llama_stack.providers.remote.inference.databricks", - config_class="llama_stack.providers.remote.inference.databricks.DatabricksImplConfig", - description="Databricks inference provider for running models on Databricks' unified analytics platform.", - ), + adapter_type="databricks", + provider_type="remote::databricks", + pip_packages=[], + module="llama_stack.providers.remote.inference.databricks", + config_class="llama_stack.providers.remote.inference.databricks.DatabricksImplConfig", + description="Databricks inference provider for running models on Databricks' unified analytics platform.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="nvidia", - pip_packages=[], - module="llama_stack.providers.remote.inference.nvidia", - config_class="llama_stack.providers.remote.inference.nvidia.NVIDIAConfig", - description="NVIDIA inference provider for accessing NVIDIA NIM models and AI services.", - ), + adapter_type="nvidia", + provider_type="remote::nvidia", + pip_packages=[], + module="llama_stack.providers.remote.inference.nvidia", + config_class="llama_stack.providers.remote.inference.nvidia.NVIDIAConfig", + description="NVIDIA inference provider for accessing NVIDIA NIM models and AI services.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="runpod", - pip_packages=[], - module="llama_stack.providers.remote.inference.runpod", - config_class="llama_stack.providers.remote.inference.runpod.RunpodImplConfig", - description="RunPod inference provider for running models on RunPod's cloud GPU platform.", - ), + adapter_type="runpod", + provider_type="remote::runpod", + pip_packages=[], + module="llama_stack.providers.remote.inference.runpod", + config_class="llama_stack.providers.remote.inference.runpod.RunpodImplConfig", + description="RunPod inference provider for running models on RunPod's cloud GPU platform.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="openai", - pip_packages=["litellm"], - module="llama_stack.providers.remote.inference.openai", - config_class="llama_stack.providers.remote.inference.openai.OpenAIConfig", - provider_data_validator="llama_stack.providers.remote.inference.openai.config.OpenAIProviderDataValidator", - description="OpenAI inference provider for accessing GPT models and other OpenAI services.", - ), + adapter_type="openai", + provider_type="remote::openai", + pip_packages=["litellm"], + module="llama_stack.providers.remote.inference.openai", + config_class="llama_stack.providers.remote.inference.openai.OpenAIConfig", + provider_data_validator="llama_stack.providers.remote.inference.openai.config.OpenAIProviderDataValidator", + description="OpenAI inference provider for accessing GPT models and other OpenAI services.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="anthropic", - pip_packages=["litellm"], - module="llama_stack.providers.remote.inference.anthropic", - config_class="llama_stack.providers.remote.inference.anthropic.AnthropicConfig", - provider_data_validator="llama_stack.providers.remote.inference.anthropic.config.AnthropicProviderDataValidator", - description="Anthropic inference provider for accessing Claude models and Anthropic's AI services.", - ), + adapter_type="anthropic", + provider_type="remote::anthropic", + pip_packages=["litellm"], + module="llama_stack.providers.remote.inference.anthropic", + config_class="llama_stack.providers.remote.inference.anthropic.AnthropicConfig", + provider_data_validator="llama_stack.providers.remote.inference.anthropic.config.AnthropicProviderDataValidator", + description="Anthropic inference provider for accessing Claude models and Anthropic's AI services.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="gemini", - pip_packages=["litellm"], - module="llama_stack.providers.remote.inference.gemini", - config_class="llama_stack.providers.remote.inference.gemini.GeminiConfig", - provider_data_validator="llama_stack.providers.remote.inference.gemini.config.GeminiProviderDataValidator", - description="Google Gemini inference provider for accessing Gemini models and Google's AI services.", - ), + adapter_type="gemini", + provider_type="remote::gemini", + pip_packages=[ + "litellm", + ], + module="llama_stack.providers.remote.inference.gemini", + config_class="llama_stack.providers.remote.inference.gemini.GeminiConfig", + provider_data_validator="llama_stack.providers.remote.inference.gemini.config.GeminiProviderDataValidator", + description="Google Gemini inference provider for accessing Gemini models and Google's AI services.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="vertexai", - pip_packages=["litellm", "google-cloud-aiplatform"], - module="llama_stack.providers.remote.inference.vertexai", - config_class="llama_stack.providers.remote.inference.vertexai.VertexAIConfig", - provider_data_validator="llama_stack.providers.remote.inference.vertexai.config.VertexAIProviderDataValidator", - description="""Google Vertex AI inference provider enables you to use Google's Gemini models through Google Cloud's Vertex AI platform, providing several advantages: + adapter_type="vertexai", + provider_type="remote::vertexai", + pip_packages=[ + "litellm", + "google-cloud-aiplatform", + ], + module="llama_stack.providers.remote.inference.vertexai", + config_class="llama_stack.providers.remote.inference.vertexai.VertexAIConfig", + provider_data_validator="llama_stack.providers.remote.inference.vertexai.config.VertexAIProviderDataValidator", + description="""Google Vertex AI inference provider enables you to use Google's Gemini models through Google Cloud's Vertex AI platform, providing several advantages: • Enterprise-grade security: Uses Google Cloud's security controls and IAM • Better integration: Seamless integration with other Google Cloud services @@ -239,76 +228,73 @@ Available Models: - vertex_ai/gemini-2.0-flash - vertex_ai/gemini-2.5-flash - vertex_ai/gemini-2.5-pro""", - ), ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="groq", - pip_packages=["litellm"], - module="llama_stack.providers.remote.inference.groq", - config_class="llama_stack.providers.remote.inference.groq.GroqConfig", - provider_data_validator="llama_stack.providers.remote.inference.groq.config.GroqProviderDataValidator", - description="Groq inference provider for ultra-fast inference using Groq's LPU technology.", - ), + adapter_type="groq", + provider_type="remote::groq", + pip_packages=[ + "litellm", + ], + module="llama_stack.providers.remote.inference.groq", + config_class="llama_stack.providers.remote.inference.groq.GroqConfig", + provider_data_validator="llama_stack.providers.remote.inference.groq.config.GroqProviderDataValidator", + description="Groq inference provider for ultra-fast inference using Groq's LPU technology.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="llama-openai-compat", - pip_packages=["litellm"], - module="llama_stack.providers.remote.inference.llama_openai_compat", - config_class="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaCompatConfig", - provider_data_validator="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaProviderDataValidator", - description="Llama OpenAI-compatible provider for using Llama models with OpenAI API format.", - ), + adapter_type="llama-openai-compat", + provider_type="remote::llama-openai-compat", + pip_packages=["litellm"], + module="llama_stack.providers.remote.inference.llama_openai_compat", + config_class="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaCompatConfig", + provider_data_validator="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaProviderDataValidator", + description="Llama OpenAI-compatible provider for using Llama models with OpenAI API format.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="sambanova", - pip_packages=["litellm"], - module="llama_stack.providers.remote.inference.sambanova", - config_class="llama_stack.providers.remote.inference.sambanova.SambaNovaImplConfig", - provider_data_validator="llama_stack.providers.remote.inference.sambanova.config.SambaNovaProviderDataValidator", - description="SambaNova inference provider for running models on SambaNova's dataflow architecture.", - ), + adapter_type="sambanova", + provider_type="remote::sambanova", + pip_packages=[ + "litellm", + ], + module="llama_stack.providers.remote.inference.sambanova", + config_class="llama_stack.providers.remote.inference.sambanova.SambaNovaImplConfig", + provider_data_validator="llama_stack.providers.remote.inference.sambanova.config.SambaNovaProviderDataValidator", + description="SambaNova inference provider for running models on SambaNova's dataflow architecture.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="passthrough", - pip_packages=[], - module="llama_stack.providers.remote.inference.passthrough", - config_class="llama_stack.providers.remote.inference.passthrough.PassthroughImplConfig", - provider_data_validator="llama_stack.providers.remote.inference.passthrough.PassthroughProviderDataValidator", - description="Passthrough inference provider for connecting to any external inference service not directly supported.", - ), + adapter_type="passthrough", + provider_type="remote::passthrough", + pip_packages=[], + module="llama_stack.providers.remote.inference.passthrough", + config_class="llama_stack.providers.remote.inference.passthrough.PassthroughImplConfig", + provider_data_validator="llama_stack.providers.remote.inference.passthrough.PassthroughProviderDataValidator", + description="Passthrough inference provider for connecting to any external inference service not directly supported.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="watsonx", - pip_packages=["ibm_watsonx_ai"], - module="llama_stack.providers.remote.inference.watsonx", - config_class="llama_stack.providers.remote.inference.watsonx.WatsonXConfig", - provider_data_validator="llama_stack.providers.remote.inference.watsonx.WatsonXProviderDataValidator", - description="IBM WatsonX inference provider for accessing AI models on IBM's WatsonX platform.", - ), + adapter_type="watsonx", + provider_type="remote::watsonx", + pip_packages=["ibm_watsonx_ai"], + module="llama_stack.providers.remote.inference.watsonx", + config_class="llama_stack.providers.remote.inference.watsonx.WatsonXConfig", + provider_data_validator="llama_stack.providers.remote.inference.watsonx.WatsonXProviderDataValidator", + description="IBM WatsonX inference provider for accessing AI models on IBM's WatsonX platform.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.inference, - adapter=AdapterSpec( - adapter_type="azure", - pip_packages=["litellm"], - module="llama_stack.providers.remote.inference.azure", - config_class="llama_stack.providers.remote.inference.azure.AzureConfig", - provider_data_validator="llama_stack.providers.remote.inference.azure.config.AzureProviderDataValidator", - description=""" + provider_type="remote::azure", + adapter_type="azure", + pip_packages=["litellm"], + module="llama_stack.providers.remote.inference.azure", + config_class="llama_stack.providers.remote.inference.azure.AzureConfig", + provider_data_validator="llama_stack.providers.remote.inference.azure.config.AzureProviderDataValidator", + description=""" Azure OpenAI inference provider for accessing GPT models and other Azure services. Provider documentation https://learn.microsoft.com/en-us/azure/ai-foundry/openai/overview """, - ), ), ] diff --git a/llama_stack/providers/registry/post_training.py b/llama_stack/providers/registry/post_training.py index 47aeb401e..2092e3b2d 100644 --- a/llama_stack/providers/registry/post_training.py +++ b/llama_stack/providers/registry/post_training.py @@ -7,7 +7,7 @@ from typing import cast -from llama_stack.providers.datatypes import AdapterSpec, Api, InlineProviderSpec, ProviderSpec, remote_provider_spec +from llama_stack.providers.datatypes import Api, InlineProviderSpec, ProviderSpec, RemoteProviderSpec # We provide two versions of these providers so that distributions can package the appropriate version of torch. # The CPU version is used for distributions that don't have GPU support -- they result in smaller container images. @@ -57,14 +57,13 @@ def available_providers() -> list[ProviderSpec]: ], description="HuggingFace-based post-training provider for fine-tuning models using the HuggingFace ecosystem.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.post_training, - adapter=AdapterSpec( - adapter_type="nvidia", - pip_packages=["requests", "aiohttp"], - module="llama_stack.providers.remote.post_training.nvidia", - config_class="llama_stack.providers.remote.post_training.nvidia.NvidiaPostTrainingConfig", - description="NVIDIA's post-training provider for fine-tuning models on NVIDIA's platform.", - ), + adapter_type="nvidia", + provider_type="remote::nvidia", + pip_packages=["requests", "aiohttp"], + module="llama_stack.providers.remote.post_training.nvidia", + config_class="llama_stack.providers.remote.post_training.nvidia.NvidiaPostTrainingConfig", + description="NVIDIA's post-training provider for fine-tuning models on NVIDIA's platform.", ), ] diff --git a/llama_stack/providers/registry/safety.py b/llama_stack/providers/registry/safety.py index 9dd791bd8..b30074398 100644 --- a/llama_stack/providers/registry/safety.py +++ b/llama_stack/providers/registry/safety.py @@ -6,11 +6,10 @@ from llama_stack.providers.datatypes import ( - AdapterSpec, Api, InlineProviderSpec, ProviderSpec, - remote_provider_spec, + RemoteProviderSpec, ) @@ -48,35 +47,32 @@ def available_providers() -> list[ProviderSpec]: config_class="llama_stack.providers.inline.safety.code_scanner.CodeScannerConfig", description="Code Scanner safety provider for detecting security vulnerabilities and unsafe code patterns.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.safety, - adapter=AdapterSpec( - adapter_type="bedrock", - pip_packages=["boto3"], - module="llama_stack.providers.remote.safety.bedrock", - config_class="llama_stack.providers.remote.safety.bedrock.BedrockSafetyConfig", - description="AWS Bedrock safety provider for content moderation using AWS's safety services.", - ), + adapter_type="bedrock", + provider_type="remote::bedrock", + pip_packages=["boto3"], + module="llama_stack.providers.remote.safety.bedrock", + config_class="llama_stack.providers.remote.safety.bedrock.BedrockSafetyConfig", + description="AWS Bedrock safety provider for content moderation using AWS's safety services.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.safety, - adapter=AdapterSpec( - adapter_type="nvidia", - pip_packages=["requests"], - module="llama_stack.providers.remote.safety.nvidia", - config_class="llama_stack.providers.remote.safety.nvidia.NVIDIASafetyConfig", - description="NVIDIA's safety provider for content moderation and safety filtering.", - ), + adapter_type="nvidia", + provider_type="remote::nvidia", + pip_packages=["requests"], + module="llama_stack.providers.remote.safety.nvidia", + config_class="llama_stack.providers.remote.safety.nvidia.NVIDIASafetyConfig", + description="NVIDIA's safety provider for content moderation and safety filtering.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.safety, - adapter=AdapterSpec( - adapter_type="sambanova", - pip_packages=["litellm", "requests"], - module="llama_stack.providers.remote.safety.sambanova", - config_class="llama_stack.providers.remote.safety.sambanova.SambaNovaSafetyConfig", - provider_data_validator="llama_stack.providers.remote.safety.sambanova.config.SambaNovaProviderDataValidator", - description="SambaNova's safety provider for content moderation and safety filtering.", - ), + adapter_type="sambanova", + provider_type="remote::sambanova", + pip_packages=["litellm", "requests"], + module="llama_stack.providers.remote.safety.sambanova", + config_class="llama_stack.providers.remote.safety.sambanova.SambaNovaSafetyConfig", + provider_data_validator="llama_stack.providers.remote.safety.sambanova.config.SambaNovaProviderDataValidator", + description="SambaNova's safety provider for content moderation and safety filtering.", ), ] diff --git a/llama_stack/providers/registry/tool_runtime.py b/llama_stack/providers/registry/tool_runtime.py index 5a58fa7af..ad8c31dfd 100644 --- a/llama_stack/providers/registry/tool_runtime.py +++ b/llama_stack/providers/registry/tool_runtime.py @@ -6,11 +6,10 @@ from llama_stack.providers.datatypes import ( - AdapterSpec, Api, InlineProviderSpec, ProviderSpec, - remote_provider_spec, + RemoteProviderSpec, ) @@ -35,59 +34,54 @@ def available_providers() -> list[ProviderSpec]: api_dependencies=[Api.vector_io, Api.inference, Api.files], description="RAG (Retrieval-Augmented Generation) tool runtime for document ingestion, chunking, and semantic search.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.tool_runtime, - adapter=AdapterSpec( - adapter_type="brave-search", - module="llama_stack.providers.remote.tool_runtime.brave_search", - config_class="llama_stack.providers.remote.tool_runtime.brave_search.config.BraveSearchToolConfig", - pip_packages=["requests"], - provider_data_validator="llama_stack.providers.remote.tool_runtime.brave_search.BraveSearchToolProviderDataValidator", - description="Brave Search tool for web search capabilities with privacy-focused results.", - ), + adapter_type="brave-search", + provider_type="remote::brave-search", + module="llama_stack.providers.remote.tool_runtime.brave_search", + config_class="llama_stack.providers.remote.tool_runtime.brave_search.config.BraveSearchToolConfig", + pip_packages=["requests"], + provider_data_validator="llama_stack.providers.remote.tool_runtime.brave_search.BraveSearchToolProviderDataValidator", + description="Brave Search tool for web search capabilities with privacy-focused results.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.tool_runtime, - adapter=AdapterSpec( - adapter_type="bing-search", - module="llama_stack.providers.remote.tool_runtime.bing_search", - config_class="llama_stack.providers.remote.tool_runtime.bing_search.config.BingSearchToolConfig", - pip_packages=["requests"], - provider_data_validator="llama_stack.providers.remote.tool_runtime.bing_search.BingSearchToolProviderDataValidator", - description="Bing Search tool for web search capabilities using Microsoft's search engine.", - ), + adapter_type="bing-search", + provider_type="remote::bing-search", + module="llama_stack.providers.remote.tool_runtime.bing_search", + config_class="llama_stack.providers.remote.tool_runtime.bing_search.config.BingSearchToolConfig", + pip_packages=["requests"], + provider_data_validator="llama_stack.providers.remote.tool_runtime.bing_search.BingSearchToolProviderDataValidator", + description="Bing Search tool for web search capabilities using Microsoft's search engine.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.tool_runtime, - adapter=AdapterSpec( - adapter_type="tavily-search", - module="llama_stack.providers.remote.tool_runtime.tavily_search", - config_class="llama_stack.providers.remote.tool_runtime.tavily_search.config.TavilySearchToolConfig", - pip_packages=["requests"], - provider_data_validator="llama_stack.providers.remote.tool_runtime.tavily_search.TavilySearchToolProviderDataValidator", - description="Tavily Search tool for AI-optimized web search with structured results.", - ), + adapter_type="tavily-search", + provider_type="remote::tavily-search", + module="llama_stack.providers.remote.tool_runtime.tavily_search", + config_class="llama_stack.providers.remote.tool_runtime.tavily_search.config.TavilySearchToolConfig", + pip_packages=["requests"], + provider_data_validator="llama_stack.providers.remote.tool_runtime.tavily_search.TavilySearchToolProviderDataValidator", + description="Tavily Search tool for AI-optimized web search with structured results.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.tool_runtime, - adapter=AdapterSpec( - adapter_type="wolfram-alpha", - module="llama_stack.providers.remote.tool_runtime.wolfram_alpha", - config_class="llama_stack.providers.remote.tool_runtime.wolfram_alpha.config.WolframAlphaToolConfig", - pip_packages=["requests"], - provider_data_validator="llama_stack.providers.remote.tool_runtime.wolfram_alpha.WolframAlphaToolProviderDataValidator", - description="Wolfram Alpha tool for computational knowledge and mathematical calculations.", - ), + adapter_type="wolfram-alpha", + provider_type="remote::wolfram-alpha", + module="llama_stack.providers.remote.tool_runtime.wolfram_alpha", + config_class="llama_stack.providers.remote.tool_runtime.wolfram_alpha.config.WolframAlphaToolConfig", + pip_packages=["requests"], + provider_data_validator="llama_stack.providers.remote.tool_runtime.wolfram_alpha.WolframAlphaToolProviderDataValidator", + description="Wolfram Alpha tool for computational knowledge and mathematical calculations.", ), - remote_provider_spec( + RemoteProviderSpec( api=Api.tool_runtime, - adapter=AdapterSpec( - adapter_type="model-context-protocol", - module="llama_stack.providers.remote.tool_runtime.model_context_protocol", - config_class="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderConfig", - pip_packages=["mcp>=1.8.1"], - provider_data_validator="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderDataValidator", - description="Model Context Protocol (MCP) tool for standardized tool calling and context management.", - ), + adapter_type="model-context-protocol", + provider_type="remote::model-context-protocol", + module="llama_stack.providers.remote.tool_runtime.model_context_protocol", + config_class="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderConfig", + pip_packages=["mcp>=1.8.1"], + provider_data_validator="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderDataValidator", + description="Model Context Protocol (MCP) tool for standardized tool calling and context management.", ), ] diff --git a/llama_stack/providers/registry/vector_io.py b/llama_stack/providers/registry/vector_io.py index 511734d57..e8237bc62 100644 --- a/llama_stack/providers/registry/vector_io.py +++ b/llama_stack/providers/registry/vector_io.py @@ -6,11 +6,10 @@ from llama_stack.providers.datatypes import ( - AdapterSpec, Api, InlineProviderSpec, ProviderSpec, - remote_provider_spec, + RemoteProviderSpec, ) @@ -300,14 +299,16 @@ See [sqlite-vec's GitHub repo](https://github.com/asg017/sqlite-vec/tree/main) f Please refer to the sqlite-vec provider documentation. """, ), - remote_provider_spec( - Api.vector_io, - AdapterSpec( - adapter_type="chromadb", - pip_packages=["chromadb-client"], - module="llama_stack.providers.remote.vector_io.chroma", - config_class="llama_stack.providers.remote.vector_io.chroma.ChromaVectorIOConfig", - description=""" + RemoteProviderSpec( + api=Api.vector_io, + adapter_type="chromadb", + provider_type="remote::chromadb", + pip_packages=["chromadb-client"], + module="llama_stack.providers.remote.vector_io.chroma", + config_class="llama_stack.providers.remote.vector_io.chroma.ChromaVectorIOConfig", + api_dependencies=[Api.inference], + optional_api_dependencies=[Api.files], + description=""" [Chroma](https://www.trychroma.com/) is an inline and remote vector database provider for Llama Stack. It allows you to store and query vectors directly within a Chroma database. That means you're not limited to storing vectors in memory or in a separate service. @@ -340,9 +341,6 @@ pip install chromadb ## Documentation See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introduction) for more details about Chroma in general. """, - ), - api_dependencies=[Api.inference], - optional_api_dependencies=[Api.files], ), InlineProviderSpec( api=Api.vector_io, @@ -387,14 +385,16 @@ See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introducti """, ), - remote_provider_spec( - Api.vector_io, - AdapterSpec( - adapter_type="pgvector", - pip_packages=["psycopg2-binary"], - module="llama_stack.providers.remote.vector_io.pgvector", - config_class="llama_stack.providers.remote.vector_io.pgvector.PGVectorVectorIOConfig", - description=""" + RemoteProviderSpec( + api=Api.vector_io, + adapter_type="pgvector", + provider_type="remote::pgvector", + pip_packages=["psycopg2-binary"], + module="llama_stack.providers.remote.vector_io.pgvector", + config_class="llama_stack.providers.remote.vector_io.pgvector.PGVectorVectorIOConfig", + api_dependencies=[Api.inference], + optional_api_dependencies=[Api.files], + description=""" [PGVector](https://github.com/pgvector/pgvector) is a remote vector database provider for Llama Stack. It allows you to store and query vectors directly in memory. That means you'll get fast and efficient vector retrieval. @@ -495,19 +495,18 @@ docker pull pgvector/pgvector:pg17 ## Documentation See [PGVector's documentation](https://github.com/pgvector/pgvector) for more details about PGVector in general. """, - ), + ), + RemoteProviderSpec( + api=Api.vector_io, + adapter_type="weaviate", + provider_type="remote::weaviate", + pip_packages=["weaviate-client"], + module="llama_stack.providers.remote.vector_io.weaviate", + config_class="llama_stack.providers.remote.vector_io.weaviate.WeaviateVectorIOConfig", + provider_data_validator="llama_stack.providers.remote.vector_io.weaviate.WeaviateRequestProviderData", api_dependencies=[Api.inference], optional_api_dependencies=[Api.files], - ), - remote_provider_spec( - Api.vector_io, - AdapterSpec( - adapter_type="weaviate", - pip_packages=["weaviate-client"], - module="llama_stack.providers.remote.vector_io.weaviate", - config_class="llama_stack.providers.remote.vector_io.weaviate.WeaviateVectorIOConfig", - provider_data_validator="llama_stack.providers.remote.vector_io.weaviate.WeaviateRequestProviderData", - description=""" + description=""" [Weaviate](https://weaviate.io/) is a vector database provider for Llama Stack. It allows you to store and query vectors directly within a Weaviate database. That means you're not limited to storing vectors in memory or in a separate service. @@ -538,9 +537,6 @@ To install Weaviate see the [Weaviate quickstart documentation](https://weaviate ## Documentation See [Weaviate's documentation](https://weaviate.io/developers/weaviate) for more details about Weaviate in general. """, - ), - api_dependencies=[Api.inference], - optional_api_dependencies=[Api.files], ), InlineProviderSpec( api=Api.vector_io, @@ -594,28 +590,29 @@ docker pull qdrant/qdrant See the [Qdrant documentation](https://qdrant.tech/documentation/) for more details about Qdrant in general. """, ), - remote_provider_spec( - Api.vector_io, - AdapterSpec( - adapter_type="qdrant", - pip_packages=["qdrant-client"], - module="llama_stack.providers.remote.vector_io.qdrant", - config_class="llama_stack.providers.remote.vector_io.qdrant.QdrantVectorIOConfig", - description=""" -Please refer to the inline provider documentation. -""", - ), + RemoteProviderSpec( + api=Api.vector_io, + adapter_type="qdrant", + provider_type="remote::qdrant", + pip_packages=["qdrant-client"], + module="llama_stack.providers.remote.vector_io.qdrant", + config_class="llama_stack.providers.remote.vector_io.qdrant.QdrantVectorIOConfig", api_dependencies=[Api.inference], optional_api_dependencies=[Api.files], + description=""" +Please refer to the inline provider documentation. +""", ), - remote_provider_spec( - Api.vector_io, - AdapterSpec( - adapter_type="milvus", - pip_packages=["pymilvus>=2.4.10"], - module="llama_stack.providers.remote.vector_io.milvus", - config_class="llama_stack.providers.remote.vector_io.milvus.MilvusVectorIOConfig", - description=""" + RemoteProviderSpec( + api=Api.vector_io, + adapter_type="milvus", + provider_type="remote::milvus", + pip_packages=["pymilvus>=2.4.10"], + module="llama_stack.providers.remote.vector_io.milvus", + config_class="llama_stack.providers.remote.vector_io.milvus.MilvusVectorIOConfig", + api_dependencies=[Api.inference], + optional_api_dependencies=[Api.files], + description=""" [Milvus](https://milvus.io/) is an inline and remote vector database provider for Llama Stack. It allows you to store and query vectors directly within a Milvus database. That means you're not limited to storing vectors in memory or in a separate service. @@ -636,7 +633,13 @@ To use Milvus in your Llama Stack project, follow these steps: ## Installation -You can install Milvus using pymilvus: +If you want to use inline Milvus, you can install: + +```bash +pip install pymilvus[milvus-lite] +``` + +If you want to use remote Milvus, you can install: ```bash pip install pymilvus @@ -806,14 +809,11 @@ See the [Milvus documentation](https://milvus.io/docs/install-overview.md) for m For more details on TLS configuration, refer to the [TLS setup guide](https://milvus.io/docs/tls.md). """, - ), - api_dependencies=[Api.inference], - optional_api_dependencies=[Api.files], ), InlineProviderSpec( api=Api.vector_io, provider_type="inline::milvus", - pip_packages=["pymilvus>=2.4.10"], + pip_packages=["pymilvus[milvus-lite]>=2.4.10"], module="llama_stack.providers.inline.vector_io.milvus", config_class="llama_stack.providers.inline.vector_io.milvus.MilvusVectorIOConfig", api_dependencies=[Api.inference], diff --git a/llama_stack/providers/remote/files/s3/files.py b/llama_stack/providers/remote/files/s3/files.py index 54742d900..8ea96af9e 100644 --- a/llama_stack/providers/remote/files/s3/files.py +++ b/llama_stack/providers/remote/files/s3/files.py @@ -137,7 +137,7 @@ class S3FilesImpl(Files): where: dict[str, str | dict] = {"id": file_id} if not return_expired: where["expires_at"] = {">": self._now()} - if not (row := await self.sql_store.fetch_one("openai_files", policy=self.policy, where=where)): + if not (row := await self.sql_store.fetch_one("openai_files", where=where)): raise ResourceNotFoundError(file_id, "File", "files.list()") return row @@ -164,7 +164,7 @@ class S3FilesImpl(Files): self._client = _create_s3_client(self._config) await _create_bucket_if_not_exists(self._client, self._config) - self._sql_store = AuthorizedSqlStore(sqlstore_impl(self._config.metadata_store)) + self._sql_store = AuthorizedSqlStore(sqlstore_impl(self._config.metadata_store), self.policy) await self._sql_store.create_table( "openai_files", { @@ -268,7 +268,6 @@ class S3FilesImpl(Files): paginated_result = await self.sql_store.fetch_all( table="openai_files", - policy=self.policy, where=where_conditions, order_by=[("created_at", order.value)], cursor=("id", after) if after else None, diff --git a/llama_stack/providers/remote/inference/anthropic/__init__.py b/llama_stack/providers/remote/inference/anthropic/__init__.py index 8b420a5a0..30d986808 100644 --- a/llama_stack/providers/remote/inference/anthropic/__init__.py +++ b/llama_stack/providers/remote/inference/anthropic/__init__.py @@ -4,15 +4,9 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from pydantic import BaseModel - from .config import AnthropicConfig -class AnthropicProviderDataValidator(BaseModel): - anthropic_api_key: str | None = None - - async def get_adapter_impl(config: AnthropicConfig, _deps): from .anthropic import AnthropicInferenceAdapter diff --git a/llama_stack/providers/remote/inference/fireworks/fireworks.py b/llama_stack/providers/remote/inference/fireworks/fireworks.py index e907e8ec6..2fcf1be2e 100644 --- a/llama_stack/providers/remote/inference/fireworks/fireworks.py +++ b/llama_stack/providers/remote/inference/fireworks/fireworks.py @@ -4,11 +4,9 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from collections.abc import AsyncGenerator, AsyncIterator -from typing import Any +from collections.abc import AsyncGenerator from fireworks.client import Fireworks -from openai import AsyncOpenAI from llama_stack.apis.common.content_types import ( InterleavedContent, @@ -24,12 +22,6 @@ from llama_stack.apis.inference import ( Inference, LogProbConfig, Message, - OpenAIChatCompletion, - OpenAIChatCompletionChunk, - OpenAICompletion, - OpenAIEmbeddingsResponse, - OpenAIMessageParam, - OpenAIResponseFormatParam, ResponseFormat, ResponseFormatType, SamplingParams, @@ -45,15 +37,14 @@ from llama_stack.providers.utils.inference.model_registry import ( ModelRegistryHelper, ) from llama_stack.providers.utils.inference.openai_compat import ( - OpenAIChatCompletionToLlamaStackMixin, convert_message_to_openai_dict, get_sampling_options, - prepare_openai_completion_params, process_chat_completion_response, process_chat_completion_stream_response, process_completion_response, process_completion_stream_response, ) +from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack.providers.utils.inference.prompt_adapter import ( chat_completion_request_to_prompt, completion_request_to_prompt, @@ -68,7 +59,7 @@ from .models import MODEL_ENTRIES logger = get_logger(name=__name__, category="inference::fireworks") -class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProviderData): +class FireworksInferenceAdapter(OpenAIMixin, ModelRegistryHelper, Inference, NeedsRequestProviderData): def __init__(self, config: FireworksImplConfig) -> None: ModelRegistryHelper.__init__(self, MODEL_ENTRIES, config.allowed_models) self.config = config @@ -79,7 +70,7 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv async def shutdown(self) -> None: pass - def _get_api_key(self) -> str: + def get_api_key(self) -> str: config_api_key = self.config.api_key.get_secret_value() if self.config.api_key else None if config_api_key: return config_api_key @@ -91,15 +82,18 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv ) return provider_data.fireworks_api_key - def _get_base_url(self) -> str: + def get_base_url(self) -> str: return "https://api.fireworks.ai/inference/v1" def _get_client(self) -> Fireworks: - fireworks_api_key = self._get_api_key() + fireworks_api_key = self.get_api_key() return Fireworks(api_key=fireworks_api_key) - def _get_openai_client(self) -> AsyncOpenAI: - return AsyncOpenAI(base_url=self._get_base_url(), api_key=self._get_api_key()) + def _preprocess_prompt_for_fireworks(self, prompt: str) -> str: + """Remove BOS token as Fireworks automatically prepends it""" + if prompt.startswith("<|begin_of_text|>"): + return prompt[len("<|begin_of_text|>") :] + return prompt async def completion( self, @@ -285,153 +279,3 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv embeddings = [data.embedding for data in response.data] return EmbeddingsResponse(embeddings=embeddings) - - async def openai_embeddings( - self, - model: str, - input: str | list[str], - encoding_format: str | None = "float", - dimensions: int | None = None, - user: str | None = None, - ) -> OpenAIEmbeddingsResponse: - raise NotImplementedError() - - async def openai_completion( - self, - model: str, - prompt: str | list[str] | list[int] | list[list[int]], - best_of: int | None = None, - echo: bool | None = None, - frequency_penalty: float | None = None, - logit_bias: dict[str, float] | None = None, - logprobs: bool | None = None, - max_tokens: int | None = None, - n: int | None = None, - presence_penalty: float | None = None, - seed: int | None = None, - stop: str | list[str] | None = None, - stream: bool | None = None, - stream_options: dict[str, Any] | None = None, - temperature: float | None = None, - top_p: float | None = None, - user: str | None = None, - guided_choice: list[str] | None = None, - prompt_logprobs: int | None = None, - suffix: str | None = None, - ) -> OpenAICompletion: - model_obj = await self.model_store.get_model(model) - - # Fireworks always prepends with BOS - if isinstance(prompt, str) and prompt.startswith("<|begin_of_text|>"): - prompt = prompt[len("<|begin_of_text|>") :] - - params = await prepare_openai_completion_params( - model=model_obj.provider_resource_id, - prompt=prompt, - best_of=best_of, - echo=echo, - frequency_penalty=frequency_penalty, - logit_bias=logit_bias, - logprobs=logprobs, - max_tokens=max_tokens, - n=n, - presence_penalty=presence_penalty, - seed=seed, - stop=stop, - stream=stream, - stream_options=stream_options, - temperature=temperature, - top_p=top_p, - user=user, - ) - - return await self._get_openai_client().completions.create(**params) - - async def openai_chat_completion( - self, - model: str, - messages: list[OpenAIMessageParam], - frequency_penalty: float | None = None, - function_call: str | dict[str, Any] | None = None, - functions: list[dict[str, Any]] | None = None, - logit_bias: dict[str, float] | None = None, - logprobs: bool | None = None, - max_completion_tokens: int | None = None, - max_tokens: int | None = None, - n: int | None = None, - parallel_tool_calls: bool | None = None, - presence_penalty: float | None = None, - response_format: OpenAIResponseFormatParam | None = None, - seed: int | None = None, - stop: str | list[str] | None = None, - stream: bool | None = None, - stream_options: dict[str, Any] | None = None, - temperature: float | None = None, - tool_choice: str | dict[str, Any] | None = None, - tools: list[dict[str, Any]] | None = None, - top_logprobs: int | None = None, - top_p: float | None = None, - user: str | None = None, - ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]: - model_obj = await self.model_store.get_model(model) - - # Divert Llama Models through Llama Stack inference APIs because - # Fireworks chat completions OpenAI-compatible API does not support - # tool calls properly. - llama_model = self.get_llama_model(model_obj.provider_resource_id) - - if llama_model: - return await OpenAIChatCompletionToLlamaStackMixin.openai_chat_completion( - self, - model=model, - messages=messages, - frequency_penalty=frequency_penalty, - function_call=function_call, - functions=functions, - logit_bias=logit_bias, - logprobs=logprobs, - max_completion_tokens=max_completion_tokens, - max_tokens=max_tokens, - n=n, - parallel_tool_calls=parallel_tool_calls, - presence_penalty=presence_penalty, - response_format=response_format, - seed=seed, - stop=stop, - stream=stream, - stream_options=stream_options, - temperature=temperature, - tool_choice=tool_choice, - tools=tools, - top_logprobs=top_logprobs, - top_p=top_p, - user=user, - ) - - params = await prepare_openai_completion_params( - messages=messages, - frequency_penalty=frequency_penalty, - function_call=function_call, - functions=functions, - logit_bias=logit_bias, - logprobs=logprobs, - max_completion_tokens=max_completion_tokens, - max_tokens=max_tokens, - n=n, - parallel_tool_calls=parallel_tool_calls, - presence_penalty=presence_penalty, - response_format=response_format, - seed=seed, - stop=stop, - stream=stream, - stream_options=stream_options, - temperature=temperature, - tool_choice=tool_choice, - tools=tools, - top_logprobs=top_logprobs, - top_p=top_p, - user=user, - ) - - logger.debug(f"fireworks params: {params}") - return await self._get_openai_client().chat.completions.create(model=model_obj.provider_resource_id, **params) diff --git a/llama_stack/providers/remote/inference/gemini/__init__.py b/llama_stack/providers/remote/inference/gemini/__init__.py index 9d35da893..bda2f52d4 100644 --- a/llama_stack/providers/remote/inference/gemini/__init__.py +++ b/llama_stack/providers/remote/inference/gemini/__init__.py @@ -4,15 +4,9 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from pydantic import BaseModel - from .config import GeminiConfig -class GeminiProviderDataValidator(BaseModel): - gemini_api_key: str | None = None - - async def get_adapter_impl(config: GeminiConfig, _deps): from .gemini import GeminiInferenceAdapter diff --git a/llama_stack/providers/remote/inference/ollama/ollama.py b/llama_stack/providers/remote/inference/ollama/ollama.py index d3d107e1d..67a22cbe3 100644 --- a/llama_stack/providers/remote/inference/ollama/ollama.py +++ b/llama_stack/providers/remote/inference/ollama/ollama.py @@ -7,12 +7,10 @@ import asyncio import base64 -import uuid from collections.abc import AsyncGenerator, AsyncIterator from typing import Any -from ollama import AsyncClient # type: ignore[attr-defined] -from openai import AsyncOpenAI +from ollama import AsyncClient as AsyncOllamaClient from llama_stack.apis.common.content_types import ( ImageContentItem, @@ -37,9 +35,6 @@ from llama_stack.apis.inference import ( Message, OpenAIChatCompletion, OpenAIChatCompletionChunk, - OpenAICompletion, - OpenAIEmbeddingsResponse, - OpenAIEmbeddingUsage, OpenAIMessageParam, OpenAIResponseFormatParam, ResponseFormat, @@ -64,15 +59,14 @@ from llama_stack.providers.utils.inference.model_registry import ( from llama_stack.providers.utils.inference.openai_compat import ( OpenAICompatCompletionChoice, OpenAICompatCompletionResponse, - b64_encode_openai_embeddings_response, get_sampling_options, prepare_openai_completion_params, - prepare_openai_embeddings_params, process_chat_completion_response, process_chat_completion_stream_response, process_completion_response, process_completion_stream_response, ) +from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack.providers.utils.inference.prompt_adapter import ( chat_completion_request_to_prompt, completion_request_to_prompt, @@ -89,6 +83,7 @@ logger = get_logger(name=__name__, category="inference::ollama") class OllamaInferenceAdapter( + OpenAIMixin, InferenceProvider, ModelsProtocolPrivate, ): @@ -98,23 +93,21 @@ class OllamaInferenceAdapter( def __init__(self, config: OllamaImplConfig) -> None: self.register_helper = ModelRegistryHelper(MODEL_ENTRIES) self.config = config - self._clients: dict[asyncio.AbstractEventLoop, AsyncClient] = {} - self._openai_client = None + self._clients: dict[asyncio.AbstractEventLoop, AsyncOllamaClient] = {} @property - def client(self) -> AsyncClient: + def ollama_client(self) -> AsyncOllamaClient: # ollama client attaches itself to the current event loop (sadly?) loop = asyncio.get_running_loop() if loop not in self._clients: - self._clients[loop] = AsyncClient(host=self.config.url) + self._clients[loop] = AsyncOllamaClient(host=self.config.url) return self._clients[loop] - @property - def openai_client(self) -> AsyncOpenAI: - if self._openai_client is None: - url = self.config.url.rstrip("/") - self._openai_client = AsyncOpenAI(base_url=f"{url}/v1", api_key="ollama") - return self._openai_client + def get_api_key(self): + return "NO_KEY" + + def get_base_url(self): + return self.config.url.rstrip("/") + "/v1" async def initialize(self) -> None: logger.info(f"checking connectivity to Ollama at `{self.config.url}`...") @@ -129,7 +122,7 @@ class OllamaInferenceAdapter( async def list_models(self) -> list[Model] | None: provider_id = self.__provider_id__ - response = await self.client.list() + response = await self.ollama_client.list() # always add the two embedding models which can be pulled on demand models = [ @@ -189,7 +182,7 @@ class OllamaInferenceAdapter( HealthResponse: A dictionary containing the health status. """ try: - await self.client.ps() + await self.ollama_client.ps() return HealthResponse(status=HealthStatus.OK) except Exception as e: return HealthResponse(status=HealthStatus.ERROR, message=f"Health check failed: {str(e)}") @@ -238,7 +231,7 @@ class OllamaInferenceAdapter( params = await self._get_params(request) async def _generate_and_convert_to_openai_compat(): - s = await self.client.generate(**params) + s = await self.ollama_client.generate(**params) async for chunk in s: choice = OpenAICompatCompletionChoice( finish_reason=chunk["done_reason"] if chunk["done"] else None, @@ -254,7 +247,7 @@ class OllamaInferenceAdapter( async def _nonstream_completion(self, request: CompletionRequest) -> CompletionResponse: params = await self._get_params(request) - r = await self.client.generate(**params) + r = await self.ollama_client.generate(**params) choice = OpenAICompatCompletionChoice( finish_reason=r["done_reason"] if r["done"] else None, @@ -346,9 +339,9 @@ class OllamaInferenceAdapter( async def _nonstream_chat_completion(self, request: ChatCompletionRequest) -> ChatCompletionResponse: params = await self._get_params(request) if "messages" in params: - r = await self.client.chat(**params) + r = await self.ollama_client.chat(**params) else: - r = await self.client.generate(**params) + r = await self.ollama_client.generate(**params) if "message" in r: choice = OpenAICompatCompletionChoice( @@ -372,9 +365,9 @@ class OllamaInferenceAdapter( async def _generate_and_convert_to_openai_compat(): if "messages" in params: - s = await self.client.chat(**params) + s = await self.ollama_client.chat(**params) else: - s = await self.client.generate(**params) + s = await self.ollama_client.generate(**params) async for chunk in s: if "message" in chunk: choice = OpenAICompatCompletionChoice( @@ -407,7 +400,7 @@ class OllamaInferenceAdapter( assert all(not content_has_media(content) for content in contents), ( "Ollama does not support media for embeddings" ) - response = await self.client.embed( + response = await self.ollama_client.embed( model=model.provider_resource_id, input=[interleaved_content_as_str(content) for content in contents], ) @@ -422,14 +415,14 @@ class OllamaInferenceAdapter( pass # Ignore statically unknown model, will check live listing if model.model_type == ModelType.embedding: - response = await self.client.list() + response = await self.ollama_client.list() if model.provider_resource_id not in [m.model for m in response.models]: - await self.client.pull(model.provider_resource_id) + await self.ollama_client.pull(model.provider_resource_id) # we use list() here instead of ps() - # - ps() only lists running models, not available models # - models not currently running are run by the ollama server as needed - response = await self.client.list() + response = await self.ollama_client.list() available_models = [m.model for m in response.models] provider_resource_id = model.provider_resource_id @@ -448,90 +441,6 @@ class OllamaInferenceAdapter( return model - async def openai_embeddings( - self, - model: str, - input: str | list[str], - encoding_format: str | None = "float", - dimensions: int | None = None, - user: str | None = None, - ) -> OpenAIEmbeddingsResponse: - model_obj = await self._get_model(model) - if model_obj.provider_resource_id is None: - raise ValueError(f"Model {model} has no provider_resource_id set") - - # Note, at the moment Ollama does not support encoding_format, dimensions, and user parameters - params = prepare_openai_embeddings_params( - model=model_obj.provider_resource_id, - input=input, - encoding_format=encoding_format, - dimensions=dimensions, - user=user, - ) - - response = await self.openai_client.embeddings.create(**params) - data = b64_encode_openai_embeddings_response(response.data, encoding_format) - - usage = OpenAIEmbeddingUsage( - prompt_tokens=response.usage.prompt_tokens, - total_tokens=response.usage.total_tokens, - ) - # TODO: Investigate why model_obj.identifier is used instead of response.model - return OpenAIEmbeddingsResponse( - data=data, - model=model_obj.identifier, - usage=usage, - ) - - async def openai_completion( - self, - model: str, - prompt: str | list[str] | list[int] | list[list[int]], - best_of: int | None = None, - echo: bool | None = None, - frequency_penalty: float | None = None, - logit_bias: dict[str, float] | None = None, - logprobs: bool | None = None, - max_tokens: int | None = None, - n: int | None = None, - presence_penalty: float | None = None, - seed: int | None = None, - stop: str | list[str] | None = None, - stream: bool | None = None, - stream_options: dict[str, Any] | None = None, - temperature: float | None = None, - top_p: float | None = None, - user: str | None = None, - guided_choice: list[str] | None = None, - prompt_logprobs: int | None = None, - suffix: str | None = None, - ) -> OpenAICompletion: - if not isinstance(prompt, str): - raise ValueError("Ollama does not support non-string prompts for completion") - - model_obj = await self._get_model(model) - params = await prepare_openai_completion_params( - model=model_obj.provider_resource_id, - prompt=prompt, - best_of=best_of, - echo=echo, - frequency_penalty=frequency_penalty, - logit_bias=logit_bias, - logprobs=logprobs, - max_tokens=max_tokens, - n=n, - presence_penalty=presence_penalty, - seed=seed, - stop=stop, - stream=stream, - stream_options=stream_options, - temperature=temperature, - top_p=top_p, - user=user, - suffix=suffix, - ) - return await self.openai_client.completions.create(**params) # type: ignore - async def openai_chat_completion( self, model: str, @@ -599,25 +508,7 @@ class OllamaInferenceAdapter( top_p=top_p, user=user, ) - response = await self.openai_client.chat.completions.create(**params) - return await self._adjust_ollama_chat_completion_response_ids(response) - - async def _adjust_ollama_chat_completion_response_ids( - self, - response: OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk], - ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]: - id = f"chatcmpl-{uuid.uuid4()}" - if isinstance(response, AsyncIterator): - - async def stream_with_chunk_ids() -> AsyncIterator[OpenAIChatCompletionChunk]: - async for chunk in response: - chunk.id = id - yield chunk - - return stream_with_chunk_ids() - else: - response.id = id - return response + return await OpenAIMixin.openai_chat_completion(self, **params) async def convert_message_to_openai_dict_for_ollama(message: Message) -> list[dict]: diff --git a/llama_stack/providers/remote/inference/openai/__init__.py b/llama_stack/providers/remote/inference/openai/__init__.py index c245dbe10..bd3daeb9a 100644 --- a/llama_stack/providers/remote/inference/openai/__init__.py +++ b/llama_stack/providers/remote/inference/openai/__init__.py @@ -4,15 +4,9 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from pydantic import BaseModel - from .config import OpenAIConfig -class OpenAIProviderDataValidator(BaseModel): - openai_api_key: str | None = None - - async def get_adapter_impl(config: OpenAIConfig, _deps): from .openai import OpenAIInferenceAdapter diff --git a/llama_stack/providers/utils/inference/inference_store.py b/llama_stack/providers/utils/inference/inference_store.py index 17f4c6268..ffc9f3e11 100644 --- a/llama_stack/providers/utils/inference/inference_store.py +++ b/llama_stack/providers/utils/inference/inference_store.py @@ -54,7 +54,7 @@ class InferenceStore: async def initialize(self): """Create the necessary tables if they don't exist.""" - self.sql_store = AuthorizedSqlStore(sqlstore_impl(self.sql_store_config)) + self.sql_store = AuthorizedSqlStore(sqlstore_impl(self.sql_store_config), self.policy) await self.sql_store.create_table( "chat_completions", { @@ -202,7 +202,6 @@ class InferenceStore: order_by=[("created", order.value)], cursor=("id", after) if after else None, limit=limit, - policy=self.policy, ) data = [ @@ -229,7 +228,6 @@ class InferenceStore: row = await self.sql_store.fetch_one( table="chat_completions", where={"id": completion_id}, - policy=self.policy, ) if not row: diff --git a/llama_stack/providers/utils/inference/model_registry.py b/llama_stack/providers/utils/inference/model_registry.py index 44add8f9e..b6b06c0b6 100644 --- a/llama_stack/providers/utils/inference/model_registry.py +++ b/llama_stack/providers/utils/inference/model_registry.py @@ -103,7 +103,7 @@ class ModelRegistryHelper(ModelsProtocolPrivate): Model( identifier=id, provider_resource_id=entry.provider_model_id, - model_type=ModelType.llm, + model_type=entry.model_type, metadata=entry.metadata, provider_id=self.__provider_id__, ) diff --git a/llama_stack/providers/utils/responses/responses_store.py b/llama_stack/providers/utils/responses/responses_store.py index 04778ed1c..829cd8a62 100644 --- a/llama_stack/providers/utils/responses/responses_store.py +++ b/llama_stack/providers/utils/responses/responses_store.py @@ -28,8 +28,7 @@ class ResponsesStore: sql_store_config = SqliteSqlStoreConfig( db_path=(RUNTIME_BASE_DIR / "sqlstore.db").as_posix(), ) - self.sql_store = AuthorizedSqlStore(sqlstore_impl(sql_store_config)) - self.policy = policy + self.sql_store = AuthorizedSqlStore(sqlstore_impl(sql_store_config), policy) async def initialize(self): """Create the necessary tables if they don't exist.""" @@ -87,7 +86,6 @@ class ResponsesStore: order_by=[("created_at", order.value)], cursor=("id", after) if after else None, limit=limit, - policy=self.policy, ) data = [OpenAIResponseObjectWithInput(**row["response_object"]) for row in paginated_result.data] @@ -105,7 +103,6 @@ class ResponsesStore: row = await self.sql_store.fetch_one( "openai_responses", where={"id": response_id}, - policy=self.policy, ) if not row: @@ -116,7 +113,7 @@ class ResponsesStore: return OpenAIResponseObjectWithInput(**row["response_object"]) async def delete_response_object(self, response_id: str) -> OpenAIDeleteResponseObject: - row = await self.sql_store.fetch_one("openai_responses", where={"id": response_id}, policy=self.policy) + row = await self.sql_store.fetch_one("openai_responses", where={"id": response_id}) if not row: raise ValueError(f"Response with id {response_id} not found") await self.sql_store.delete("openai_responses", where={"id": response_id}) diff --git a/llama_stack/providers/utils/sqlstore/authorized_sqlstore.py b/llama_stack/providers/utils/sqlstore/authorized_sqlstore.py index acb688f96..ab67f7052 100644 --- a/llama_stack/providers/utils/sqlstore/authorized_sqlstore.py +++ b/llama_stack/providers/utils/sqlstore/authorized_sqlstore.py @@ -53,13 +53,15 @@ class AuthorizedSqlStore: access control policies, user attribute capture, and SQL filtering optimization. """ - def __init__(self, sql_store: SqlStore): + def __init__(self, sql_store: SqlStore, policy: list[AccessRule]): """ Initialize the authorization layer. :param sql_store: Base SqlStore implementation to wrap + :param policy: Access control policy to use for authorization """ self.sql_store = sql_store + self.policy = policy self._detect_database_type() self._validate_sql_optimized_policy() @@ -117,14 +119,13 @@ class AuthorizedSqlStore: async def fetch_all( self, table: str, - policy: list[AccessRule], where: Mapping[str, Any] | None = None, limit: int | None = None, order_by: list[tuple[str, Literal["asc", "desc"]]] | None = None, cursor: tuple[str, str] | None = None, ) -> PaginatedResponse: """Fetch all rows with automatic access control filtering.""" - access_where = self._build_access_control_where_clause(policy) + access_where = self._build_access_control_where_clause(self.policy) rows = await self.sql_store.fetch_all( table=table, where=where, @@ -146,7 +147,7 @@ class AuthorizedSqlStore: str(record_id), table, User(principal=stored_owner_principal, attributes=stored_access_attrs) ) - if is_action_allowed(policy, Action.READ, sql_record, current_user): + if is_action_allowed(self.policy, Action.READ, sql_record, current_user): filtered_rows.append(row) return PaginatedResponse( @@ -157,14 +158,12 @@ class AuthorizedSqlStore: async def fetch_one( self, table: str, - policy: list[AccessRule], where: Mapping[str, Any] | None = None, order_by: list[tuple[str, Literal["asc", "desc"]]] | None = None, ) -> dict[str, Any] | None: """Fetch one row with automatic access control checking.""" results = await self.fetch_all( table=table, - policy=policy, where=where, limit=1, order_by=order_by, diff --git a/llama_stack/testing/inference_recorder.py b/llama_stack/testing/inference_recorder.py index 674016fb1..1071da13f 100644 --- a/llama_stack/testing/inference_recorder.py +++ b/llama_stack/testing/inference_recorder.py @@ -203,7 +203,12 @@ def _model_identifiers_digest(endpoint: str, response: dict[str, Any]) -> str: - '/v1/models' (OpenAI): response body is: [ { id: ... }, ... ] Returns a list of unique identifiers or None if structure doesn't match. """ - items = response["body"] + if "models" in response["body"]: + # ollama + items = response["body"]["models"] + else: + # openai + items = response["body"] idents = [m.model if endpoint == "/api/tags" else m.id for m in items] return sorted(set(idents)) diff --git a/llama_stack/ui/package-lock.json b/llama_stack/ui/package-lock.json index 3f22716c6..638655639 100644 --- a/llama_stack/ui/package-lock.json +++ b/llama_stack/ui/package-lock.json @@ -9,7 +9,7 @@ "version": "0.1.0", "dependencies": { "@radix-ui/react-collapsible": "^1.1.12", - "@radix-ui/react-dialog": "^1.1.13", + "@radix-ui/react-dialog": "^1.1.15", "@radix-ui/react-dropdown-menu": "^2.1.16", "@radix-ui/react-select": "^2.2.6", "@radix-ui/react-separator": "^1.1.7", @@ -27,7 +27,7 @@ "react-dom": "^19.1.1", "react-markdown": "^10.1.0", "remark-gfm": "^4.0.1", - "remeda": "^2.30.0", + "remeda": "^2.32.0", "shiki": "^1.29.2", "sonner": "^2.0.7", "tailwind-merge": "^3.3.1" @@ -47,7 +47,7 @@ "eslint-config-prettier": "^10.1.8", "eslint-plugin-prettier": "^5.5.4", "jest": "^29.7.0", - "jest-environment-jsdom": "^29.7.0", + "jest-environment-jsdom": "^30.1.2", "prettier": "3.6.2", "tailwindcss": "^4", "ts-node": "^10.9.2", @@ -89,6 +89,27 @@ "node": ">=6.0.0" } }, + "node_modules/@asamuzakjp/css-color": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-3.2.0.tgz", + "integrity": "sha512-K1A6z8tS3XsmCMM86xoWdn7Fkdn9m6RSVtocUrJYIwZnFVkng/PvkEoWtOWmP+Scc6saYWHWZYbndEEXxl24jw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@csstools/css-calc": "^2.1.3", + "@csstools/css-color-parser": "^3.0.9", + "@csstools/css-parser-algorithms": "^3.0.4", + "@csstools/css-tokenizer": "^3.0.3", + "lru-cache": "^10.4.3" + } + }, + "node_modules/@asamuzakjp/css-color/node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true, + "license": "ISC" + }, "node_modules/@babel/code-frame": { "version": "7.27.1", "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", @@ -651,6 +672,121 @@ "@jridgewell/sourcemap-codec": "^1.4.10" } }, + "node_modules/@csstools/color-helpers": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-5.1.0.tgz", + "integrity": "sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + } + }, + "node_modules/@csstools/css-calc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-2.1.4.tgz", + "integrity": "sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-color-parser": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-3.1.0.tgz", + "integrity": "sha512-nbtKwh3a6xNVIp/VRuXV64yTKnb1IjTAEEh3irzS+HkKjAOYLTGNb9pmVNntZ8iVBHcWDA2Dof0QtPgFI1BaTA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/color-helpers": "^5.1.0", + "@csstools/css-calc": "^2.1.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-parser-algorithms": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-3.0.5.tgz", + "integrity": "sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-tokenizer": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-3.0.4.tgz", + "integrity": "sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + } + }, "node_modules/@emnapi/core": { "version": "1.4.3", "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.4.3.tgz", @@ -1591,6 +1727,235 @@ "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, + "node_modules/@jest/environment-jsdom-abstract": { + "version": "30.1.2", + "resolved": "https://registry.npmjs.org/@jest/environment-jsdom-abstract/-/environment-jsdom-abstract-30.1.2.tgz", + "integrity": "sha512-u8kTh/ZBl97GOmnGJLYK/1GuwAruMC4hoP6xuk/kwltmVWsA9u/6fH1/CsPVGt2O+Wn2yEjs8n1B1zZJ62Cx0w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "30.1.2", + "@jest/fake-timers": "30.1.2", + "@jest/types": "30.0.5", + "@types/jsdom": "^21.1.7", + "@types/node": "*", + "jest-mock": "30.0.5", + "jest-util": "30.0.5" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + }, + "peerDependencies": { + "canvas": "^3.0.0", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "canvas": { + "optional": true + } + } + }, + "node_modules/@jest/environment-jsdom-abstract/node_modules/@jest/environment": { + "version": "30.1.2", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-30.1.2.tgz", + "integrity": "sha512-N8t1Ytw4/mr9uN28OnVf0SYE2dGhaIxOVYcwsf9IInBKjvofAjbFRvedvBBlyTYk2knbJTiEjEJ2PyyDIBnd9w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/fake-timers": "30.1.2", + "@jest/types": "30.0.5", + "@types/node": "*", + "jest-mock": "30.0.5" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/@jest/environment-jsdom-abstract/node_modules/@jest/fake-timers": { + "version": "30.1.2", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-30.1.2.tgz", + "integrity": "sha512-Beljfv9AYkr9K+ETX9tvV61rJTY706BhBUtiaepQHeEGfe0DbpvUA5Z3fomwc5Xkhns6NWrcFDZn+72fLieUnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "30.0.5", + "@sinonjs/fake-timers": "^13.0.0", + "@types/node": "*", + "jest-message-util": "30.1.0", + "jest-mock": "30.0.5", + "jest-util": "30.0.5" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/@jest/environment-jsdom-abstract/node_modules/@jest/schemas": { + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.5.tgz", + "integrity": "sha512-DmdYgtezMkh3cpU8/1uyXakv3tJRcmcXxBOcO0tbaozPwpmh4YMsnWrQm9ZmZMfa5ocbxzbFk6O4bDPEc/iAnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.34.0" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/@jest/environment-jsdom-abstract/node_modules/@jest/types": { + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.5.tgz", + "integrity": "sha512-aREYa3aku9SSnea4aX6bhKn4bgv3AXkgijoQgbYV3yvbiGt6z+MQ85+6mIhx9DsKW2BuB/cLR/A+tcMThx+KLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/pattern": "30.0.1", + "@jest/schemas": "30.0.5", + "@types/istanbul-lib-coverage": "^2.0.6", + "@types/istanbul-reports": "^3.0.4", + "@types/node": "*", + "@types/yargs": "^17.0.33", + "chalk": "^4.1.2" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/@jest/environment-jsdom-abstract/node_modules/@sinclair/typebox": { + "version": "0.34.41", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.34.41.tgz", + "integrity": "sha512-6gS8pZzSXdyRHTIqoqSVknxolr1kzfy4/CeDnrzsVz8TTIWUbOBr6gnzOmTYJ3eXQNh4IYHIGi5aIL7sOZ2G/g==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jest/environment-jsdom-abstract/node_modules/@sinonjs/fake-timers": { + "version": "13.0.5", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-13.0.5.tgz", + "integrity": "sha512-36/hTbH2uaWuGVERyC6da9YwGWnzUZXuPro/F2LfsdOsLnCojz/iSH8MxUt/FD2S5XBSVPhmArFUXcpCQ2Hkiw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@sinonjs/commons": "^3.0.1" + } + }, + "node_modules/@jest/environment-jsdom-abstract/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@jest/environment-jsdom-abstract/node_modules/ci-info": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.3.0.tgz", + "integrity": "sha512-l+2bNRMiQgcfILUi33labAZYIWlH1kWDp+ecNo5iisRKrbm0xcRyCww71/YU0Fkw0mAFpz9bJayXPjey6vkmaQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/environment-jsdom-abstract/node_modules/jest-message-util": { + "version": "30.1.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-30.1.0.tgz", + "integrity": "sha512-HizKDGG98cYkWmaLUHChq4iN+oCENohQLb7Z5guBPumYs+/etonmNFlg1Ps6yN9LTPyZn+M+b/9BbnHx3WTMDg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@jest/types": "30.0.5", + "@types/stack-utils": "^2.0.3", + "chalk": "^4.1.2", + "graceful-fs": "^4.2.11", + "micromatch": "^4.0.8", + "pretty-format": "30.0.5", + "slash": "^3.0.0", + "stack-utils": "^2.0.6" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/@jest/environment-jsdom-abstract/node_modules/jest-mock": { + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-30.0.5.tgz", + "integrity": "sha512-Od7TyasAAQX/6S+QCbN6vZoWOMwlTtzzGuxJku1GhGanAjz9y+QsQkpScDmETvdc9aSXyJ/Op4rhpMYBWW91wQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "30.0.5", + "@types/node": "*", + "jest-util": "30.0.5" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/@jest/environment-jsdom-abstract/node_modules/jest-util": { + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.5.tgz", + "integrity": "sha512-pvyPWssDZR0FlfMxCBoc0tvM8iUEskaRFALUtGQYzVEAqisAztmy+R8LnU14KT4XA0H/a5HMVTXat1jLne010g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "30.0.5", + "@types/node": "*", + "chalk": "^4.1.2", + "ci-info": "^4.2.0", + "graceful-fs": "^4.2.11", + "picomatch": "^4.0.2" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/@jest/environment-jsdom-abstract/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/@jest/environment-jsdom-abstract/node_modules/pretty-format": { + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.5.tgz", + "integrity": "sha512-D1tKtYvByrBkFLe2wHJl2bwMJIiT8rW+XA+TiataH79/FszLQMrpGEvzUVkzPau7OCO0Qnrhpe87PqtOAIB8Yw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "30.0.5", + "ansi-styles": "^5.2.0", + "react-is": "^18.3.1" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/@jest/environment-jsdom-abstract/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, "node_modules/@jest/expect": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz", @@ -1652,6 +2017,30 @@ "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, + "node_modules/@jest/pattern": { + "version": "30.0.1", + "resolved": "https://registry.npmjs.org/@jest/pattern/-/pattern-30.0.1.tgz", + "integrity": "sha512-gWp7NfQW27LaBQz3TITS8L7ZCQ0TLvtmI//4OwlQRx4rnWxcPNIYjxZpDcN4+UlGxgm3jS5QPz8IPTCkb59wZA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "jest-regex-util": "30.0.1" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/@jest/pattern/node_modules/jest-regex-util": { + "version": "30.0.1", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-30.0.1.tgz", + "integrity": "sha512-jHEQgBXAgc+Gh4g0p3bCevgRCVRkB4VB70zhoAE48gxeSr1hfUOsM/C2WoJgVL7Eyg//hudYENbm3Ne+/dRVVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, "node_modules/@jest/reporters": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz", @@ -2110,9 +2499,9 @@ "license": "MIT" }, "node_modules/@radix-ui/primitive": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.2.tgz", - "integrity": "sha512-XnbHrrprsNqZKQhStrSwgRUQzoCI1glLzdw79xiZPoofhGICeZRSQ3dIxAKH1gb3OHfNf4d6f+vAv3kil2eggA==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", + "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", "license": "MIT" }, "node_modules/@radix-ui/react-arrow": { @@ -2138,29 +2527,6 @@ } } }, - "node_modules/@radix-ui/react-arrow/node_modules/@radix-ui/react-primitive": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", - "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-slot": "1.2.3" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-collapsible": { "version": "1.1.12", "resolved": "https://registry.npmjs.org/@radix-ui/react-collapsible/-/react-collapsible-1.1.12.tgz", @@ -2191,59 +2557,6 @@ } } }, - "node_modules/@radix-ui/react-collapsible/node_modules/@radix-ui/primitive": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", - "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", - "license": "MIT" - }, - "node_modules/@radix-ui/react-collapsible/node_modules/@radix-ui/react-presence": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", - "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-use-layout-effect": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-collapsible/node_modules/@radix-ui/react-primitive": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", - "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-slot": "1.2.3" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-collection": { "version": "1.1.7", "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.7.tgz", @@ -2270,29 +2583,6 @@ } } }, - "node_modules/@radix-ui/react-collection/node_modules/@radix-ui/react-primitive": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", - "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-slot": "1.2.3" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-compose-refs": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", @@ -2324,22 +2614,22 @@ } }, "node_modules/@radix-ui/react-dialog": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.13.tgz", - "integrity": "sha512-ARFmqUyhIVS3+riWzwGTe7JLjqwqgnODBUZdqpWar/z1WFs9z76fuOs/2BOWCR+YboRn4/WN9aoaGVwqNRr8VA==", + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.15.tgz", + "integrity": "sha512-TCglVRtzlffRNxRMEyR36DGBLJpeusFcgMVD9PZEzAKnUs1lKCgX5u9BmC2Yg+LL9MgZDugFFs1Vl+Jp4t/PGw==", "license": "MIT", "dependencies": { - "@radix-ui/primitive": "1.1.2", + "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", - "@radix-ui/react-dismissable-layer": "1.1.9", - "@radix-ui/react-focus-guards": "1.1.2", - "@radix-ui/react-focus-scope": "1.1.6", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", - "@radix-ui/react-portal": "1.1.8", - "@radix-ui/react-presence": "1.1.4", - "@radix-ui/react-primitive": "2.1.2", - "@radix-ui/react-slot": "1.2.2", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" @@ -2359,24 +2649,6 @@ } } }, - "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-slot": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.2.tgz", - "integrity": "sha512-y7TBO4xN4Y94FvcWIOIh18fM4R1A8S4q1jhoz4PNzOoHsFcN8pogcFmZrTYAm4F9VRUrWP/Mw7xSKybIeRI+CQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-compose-refs": "1.1.2" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-direction": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.1.tgz", @@ -2393,14 +2665,14 @@ } }, "node_modules/@radix-ui/react-dismissable-layer": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.9.tgz", - "integrity": "sha512-way197PiTvNp+WBP7svMJasHl+vibhWGQDb6Mgf5mhEWJkgb85z7Lfl9TUdkqpWsf8GRNmoopx9ZxCyDzmgRMQ==", + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz", + "integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==", "license": "MIT", "dependencies": { - "@radix-ui/primitive": "1.1.2", + "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-primitive": "2.1.2", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-escape-keydown": "1.1.1" }, @@ -2448,39 +2720,10 @@ } } }, - "node_modules/@radix-ui/react-dropdown-menu/node_modules/@radix-ui/primitive": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", - "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", - "license": "MIT" - }, - "node_modules/@radix-ui/react-dropdown-menu/node_modules/@radix-ui/react-primitive": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", - "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-slot": "1.2.3" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-focus-guards": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.2.tgz", - "integrity": "sha512-fyjAACV62oPV925xFCrH8DR5xWhg9KYtJT4s3u54jxp+L/hbpTY2kIeEFFbFe+a/HCE94zGQMZLIpVTPVZDhaA==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.3.tgz", + "integrity": "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==", "license": "MIT", "peerDependencies": { "@types/react": "*", @@ -2493,13 +2736,13 @@ } }, "node_modules/@radix-ui/react-focus-scope": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.6.tgz", - "integrity": "sha512-r9zpYNUQY+2jWHWZGyddQLL9YHkM/XvSFHVcWs7bdVuxMAnCwTAuy6Pf47Z4nw7dYcUou1vg/VgjjrrH03VeBw==", + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz", + "integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==", "license": "MIT", "dependencies": { "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-primitive": "2.1.2", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1" }, "peerDependencies": { @@ -2575,150 +2818,6 @@ } } }, - "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/primitive": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", - "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", - "license": "MIT" - }, - "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-dismissable-layer": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz", - "integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==", - "license": "MIT", - "dependencies": { - "@radix-ui/primitive": "1.1.3", - "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-primitive": "2.1.3", - "@radix-ui/react-use-callback-ref": "1.1.1", - "@radix-ui/react-use-escape-keydown": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-focus-guards": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.3.tgz", - "integrity": "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==", - "license": "MIT", - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-focus-scope": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz", - "integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-primitive": "2.1.3", - "@radix-ui/react-use-callback-ref": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-portal": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", - "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-primitive": "2.1.3", - "@radix-ui/react-use-layout-effect": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-presence": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", - "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-use-layout-effect": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-primitive": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", - "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-slot": "1.2.3" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-popper": { "version": "1.2.8", "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz", @@ -2751,36 +2850,13 @@ } } }, - "node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-primitive": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", - "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-slot": "1.2.3" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-portal": { - "version": "1.1.8", - "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.8.tgz", - "integrity": "sha512-hQsTUIn7p7fxCPvao/q6wpbxmCwgLrlz+nOrJgC+RwfZqWY/WN+UMqkXzrtKbPrF82P43eCTl3ekeKuyAQbFeg==", + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", + "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", "license": "MIT", "dependencies": { - "@radix-ui/react-primitive": "2.1.2", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { @@ -2799,9 +2875,9 @@ } }, "node_modules/@radix-ui/react-presence": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.4.tgz", - "integrity": "sha512-ueDqRbdc4/bkaQT3GIpLQssRlFgWaL/U2z/S31qRwwLWoxHLgry3SIfCwhxeQNbirEUXFa+lq3RL3oBYXtcmIA==", + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", + "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", "license": "MIT", "dependencies": { "@radix-ui/react-compose-refs": "1.1.2", @@ -2823,12 +2899,12 @@ } }, "node_modules/@radix-ui/react-primitive": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.2.tgz", - "integrity": "sha512-uHa+l/lKfxuDD2zjN/0peM/RhhSmRjr5YWdk/37EnSv1nJ88uvG85DPexSm8HdFQROd2VdERJ6ynXbkCFi+APw==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", "license": "MIT", "dependencies": { - "@radix-ui/react-slot": "1.2.2" + "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", @@ -2845,24 +2921,6 @@ } } }, - "node_modules/@radix-ui/react-primitive/node_modules/@radix-ui/react-slot": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.2.tgz", - "integrity": "sha512-y7TBO4xN4Y94FvcWIOIh18fM4R1A8S4q1jhoz4PNzOoHsFcN8pogcFmZrTYAm4F9VRUrWP/Mw7xSKybIeRI+CQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-compose-refs": "1.1.2" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-roving-focus": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.11.tgz", @@ -2894,35 +2952,6 @@ } } }, - "node_modules/@radix-ui/react-roving-focus/node_modules/@radix-ui/primitive": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", - "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", - "license": "MIT" - }, - "node_modules/@radix-ui/react-roving-focus/node_modules/@radix-ui/react-primitive": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", - "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-slot": "1.2.3" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-select": { "version": "2.2.6", "resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.2.6.tgz", @@ -2966,126 +2995,6 @@ } } }, - "node_modules/@radix-ui/react-select/node_modules/@radix-ui/primitive": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", - "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", - "license": "MIT" - }, - "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-dismissable-layer": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz", - "integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==", - "license": "MIT", - "dependencies": { - "@radix-ui/primitive": "1.1.3", - "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-primitive": "2.1.3", - "@radix-ui/react-use-callback-ref": "1.1.1", - "@radix-ui/react-use-escape-keydown": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-focus-guards": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.3.tgz", - "integrity": "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==", - "license": "MIT", - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-focus-scope": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz", - "integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-primitive": "2.1.3", - "@radix-ui/react-use-callback-ref": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-portal": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", - "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-primitive": "2.1.3", - "@radix-ui/react-use-layout-effect": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-primitive": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", - "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-slot": "1.2.3" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-separator": { "version": "1.1.7", "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.7.tgz", @@ -3109,29 +3018,6 @@ } } }, - "node_modules/@radix-ui/react-separator/node_modules/@radix-ui/react-primitive": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", - "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-slot": "1.2.3" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-slot": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", @@ -3184,110 +3070,6 @@ } } }, - "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/primitive": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", - "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", - "license": "MIT" - }, - "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-dismissable-layer": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz", - "integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==", - "license": "MIT", - "dependencies": { - "@radix-ui/primitive": "1.1.3", - "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-primitive": "2.1.3", - "@radix-ui/react-use-callback-ref": "1.1.1", - "@radix-ui/react-use-escape-keydown": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-portal": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", - "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-primitive": "2.1.3", - "@radix-ui/react-use-layout-effect": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-presence": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", - "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-use-layout-effect": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-primitive": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", - "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-slot": "1.2.3" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-use-callback-ref": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", @@ -3447,29 +3229,6 @@ } } }, - "node_modules/@radix-ui/react-visually-hidden/node_modules/@radix-ui/react-primitive": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", - "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-slot": "1.2.3" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, "node_modules/@radix-ui/rect": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.1.tgz", @@ -3970,16 +3729,6 @@ } } }, - "node_modules/@tootallnate/once": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", - "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10" - } - }, "node_modules/@tsconfig/node10": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz", @@ -4188,9 +3937,9 @@ "license": "MIT" }, "node_modules/@types/jsdom": { - "version": "20.0.1", - "resolved": "https://registry.npmjs.org/@types/jsdom/-/jsdom-20.0.1.tgz", - "integrity": "sha512-d0r18sZPmMQr1eG35u12FZfhIXNrnsPU/g5wvRKCUf/tOGilKKwYMYGqh33BNR6ba+2gkHw1EUiHoN3mn7E5IQ==", + "version": "21.1.7", + "resolved": "https://registry.npmjs.org/@types/jsdom/-/jsdom-21.1.7.tgz", + "integrity": "sha512-yOriVnggzrnQ3a9OKOCxaVuSug3w3/SbOj5i7VwXWZEyUNl3bLF9V3MfxGbZKuwqJOQyRfqXyROBB1CoZLFWzA==", "dev": true, "license": "MIT", "dependencies": { @@ -4838,14 +4587,6 @@ "win32" ] }, - "node_modules/abab": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/abab/-/abab-2.0.6.tgz", - "integrity": "sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==", - "deprecated": "Use your platform's native atob() and btoa() methods instead", - "dev": true, - "license": "BSD-3-Clause" - }, "node_modules/abort-controller": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", @@ -4885,17 +4626,6 @@ "node": ">=0.4.0" } }, - "node_modules/acorn-globals": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/acorn-globals/-/acorn-globals-7.0.1.tgz", - "integrity": "sha512-umOSDSDrfHbTNPuNpC2NSnnA3LUrqpevPb4T9jRx4MagXNS0rs+gwiTcAvqCRmsD6utzsrzNt+ebm00SNWiC3Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "acorn": "^8.1.0", - "acorn-walk": "^8.0.2" - } - }, "node_modules/acorn-jsx": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", @@ -4920,16 +4650,13 @@ } }, "node_modules/agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", "dev": true, "license": "MIT", - "dependencies": { - "debug": "4" - }, "engines": { - "node": ">= 6.0.0" + "node": ">= 14" } }, "node_modules/agentkeepalive": { @@ -5965,33 +5692,20 @@ "dev": true, "license": "MIT" }, - "node_modules/cssom": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.5.0.tgz", - "integrity": "sha512-iKuQcq+NdHqlAcwUY0o/HL69XQrUaQdMjmStJ8JFmUaiiQErlhrmuigkg/CU4E2J0IyUKUrMAgl36TvN67MqTw==", - "dev": true, - "license": "MIT" - }, "node_modules/cssstyle": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-2.3.0.tgz", - "integrity": "sha512-AZL67abkUzIuvcHqk7c09cezpGNcxUxU4Ioi/05xHk4DQeTkWmGYftIE6ctU6AEt+Gn4n1lDStOtj7FKycP71A==", + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-4.6.0.tgz", + "integrity": "sha512-2z+rWdzbbSZv6/rhtvzvqeZQHrBaqgogqt85sqFNbabZOuFbCVFb8kPeEtZjiKkbrm395irpNKiYeFeLiQnFPg==", "dev": true, "license": "MIT", "dependencies": { - "cssom": "~0.3.6" + "@asamuzakjp/css-color": "^3.2.0", + "rrweb-cssom": "^0.8.0" }, "engines": { - "node": ">=8" + "node": ">=18" } }, - "node_modules/cssstyle/node_modules/cssom": { - "version": "0.3.8", - "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.3.8.tgz", - "integrity": "sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg==", - "dev": true, - "license": "MIT" - }, "node_modules/csstype": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", @@ -6006,31 +5720,30 @@ "license": "BSD-2-Clause" }, "node_modules/data-urls": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-3.0.2.tgz", - "integrity": "sha512-Jy/tj3ldjZJo63sVAvg6LHt2mHvl4V6AgRAmNDtLdm7faqtsx+aJG42rsyCo9JCoRVKwPFzKlIPx3DIibwSIaQ==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-5.0.0.tgz", + "integrity": "sha512-ZYP5VBHshaDAiVZxjbRVcFJpc+4xGgT0bK3vzy1HLN8jTO975HEbuYzZJcHoQEY5K1a0z8YayJkyVETa08eNTg==", "dev": true, "license": "MIT", "dependencies": { - "abab": "^2.0.6", - "whatwg-mimetype": "^3.0.0", - "whatwg-url": "^11.0.0" + "whatwg-mimetype": "^4.0.0", + "whatwg-url": "^14.0.0" }, "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/data-urls/node_modules/tr46": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-3.0.0.tgz", - "integrity": "sha512-l7FvfAHlcmulp8kr+flpQZmVwtu7nfRV7NZujtN0OqES8EL4O4e0qqzL0DC5gAvx/ZC/9lk6rhcUwYvkBnBnYA==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-5.1.1.tgz", + "integrity": "sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==", "dev": true, "license": "MIT", "dependencies": { - "punycode": "^2.1.1" + "punycode": "^2.3.1" }, "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/data-urls/node_modules/webidl-conversions": { @@ -6044,17 +5757,17 @@ } }, "node_modules/data-urls/node_modules/whatwg-url": { - "version": "11.0.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-11.0.0.tgz", - "integrity": "sha512-RKT8HExMpoYx4igMiVMY83lN6UeITKJlBQ+vR/8ZJ8OCdSiN3RwCq+9gH0+Xzj0+5IrM6i4j/6LuvzbZIQgEcQ==", + "version": "14.2.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-14.2.0.tgz", + "integrity": "sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw==", "dev": true, "license": "MIT", "dependencies": { - "tr46": "^3.0.0", + "tr46": "^5.1.0", "webidl-conversions": "^7.0.0" }, "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/data-view-buffer": { @@ -6129,9 +5842,9 @@ } }, "node_modules/decimal.js": { - "version": "10.5.0", - "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.5.0.tgz", - "integrity": "sha512-8vDa8Qxvr/+d94hSh5P3IJwI5t8/c0KsMp+g8bNw9cY2icONa5aPfvKeieW1WlG0WQYwwhJ7mjui2xtiePQSXw==", + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.6.0.tgz", + "integrity": "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==", "dev": true, "license": "MIT" }, @@ -6323,30 +6036,6 @@ "dev": true, "license": "MIT" }, - "node_modules/domexception": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/domexception/-/domexception-4.0.0.tgz", - "integrity": "sha512-A2is4PLG+eeSfoTMA95/s4pvAoSo2mKtiM5jlHkAVewmiO8ISFTFKZjH7UAM1Atli/OT/7JHOrJRJiMKUZKYBw==", - "deprecated": "Use your platform's native DOMException instead", - "dev": true, - "license": "MIT", - "dependencies": { - "webidl-conversions": "^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/domexception/node_modules/webidl-conversions": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz", - "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=12" - } - }, "node_modules/dunder-proto": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", @@ -6426,9 +6115,9 @@ } }, "node_modules/entities": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.0.tgz", - "integrity": "sha512-aKstq2TDOndCn4diEyp9Uq/Flu2i1GlLkc6XIDQSDMuaFE3OPW5OphLCyQ5SpSJZTb4reN+kTcYru5yIfXoRPw==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", + "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", "dev": true, "license": "BSD-2-Clause", "engines": { @@ -6655,28 +6344,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/escodegen": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-2.1.0.tgz", - "integrity": "sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "esprima": "^4.0.1", - "estraverse": "^5.2.0", - "esutils": "^2.0.2" - }, - "bin": { - "escodegen": "bin/escodegen.js", - "esgenerate": "bin/esgenerate.js" - }, - "engines": { - "node": ">=6.0" - }, - "optionalDependencies": { - "source-map": "~0.6.1" - } - }, "node_modules/eslint": { "version": "9.26.0", "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.26.0.tgz", @@ -8040,16 +7707,16 @@ } }, "node_modules/html-encoding-sniffer": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-3.0.0.tgz", - "integrity": "sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz", + "integrity": "sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==", "dev": true, "license": "MIT", "dependencies": { - "whatwg-encoding": "^2.0.0" + "whatwg-encoding": "^3.1.1" }, "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/html-escaper": { @@ -8097,32 +7764,31 @@ } }, "node_modules/http-proxy-agent": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", - "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", "dev": true, "license": "MIT", "dependencies": { - "@tootallnate/once": "2", - "agent-base": "6", - "debug": "4" + "agent-base": "^7.1.0", + "debug": "^4.3.4" }, "engines": { - "node": ">= 6" + "node": ">= 14" } }, "node_modules/https-proxy-agent": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", - "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", "dev": true, "license": "MIT", "dependencies": { - "agent-base": "6", + "agent-base": "^7.1.2", "debug": "4" }, "engines": { - "node": ">= 6" + "node": ">= 14" } }, "node_modules/human-signals": { @@ -9216,26 +8882,23 @@ "license": "MIT" }, "node_modules/jest-environment-jsdom": { - "version": "29.7.0", - "resolved": "https://registry.npmjs.org/jest-environment-jsdom/-/jest-environment-jsdom-29.7.0.tgz", - "integrity": "sha512-k9iQbsf9OyOfdzWH8HDmrRT0gSIcX+FLNW7IQq94tFX0gynPwqDTW0Ho6iMVNjGz/nb+l/vW3dWM2bbLLpkbXA==", + "version": "30.1.2", + "resolved": "https://registry.npmjs.org/jest-environment-jsdom/-/jest-environment-jsdom-30.1.2.tgz", + "integrity": "sha512-LXsfAh5+mDTuXDONGl1ZLYxtJEaS06GOoxJb2arcJTjIfh1adYg8zLD8f6P0df8VmjvCaMrLmc1PgHUI/YUTbg==", "dev": true, "license": "MIT", "dependencies": { - "@jest/environment": "^29.7.0", - "@jest/fake-timers": "^29.7.0", - "@jest/types": "^29.6.3", - "@types/jsdom": "^20.0.0", + "@jest/environment": "30.1.2", + "@jest/environment-jsdom-abstract": "30.1.2", + "@types/jsdom": "^21.1.7", "@types/node": "*", - "jest-mock": "^29.7.0", - "jest-util": "^29.7.0", - "jsdom": "^20.0.0" + "jsdom": "^26.1.0" }, "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" }, "peerDependencies": { - "canvas": "^2.5.0" + "canvas": "^3.0.0" }, "peerDependenciesMeta": { "canvas": { @@ -9243,6 +8906,207 @@ } } }, + "node_modules/jest-environment-jsdom/node_modules/@jest/environment": { + "version": "30.1.2", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-30.1.2.tgz", + "integrity": "sha512-N8t1Ytw4/mr9uN28OnVf0SYE2dGhaIxOVYcwsf9IInBKjvofAjbFRvedvBBlyTYk2knbJTiEjEJ2PyyDIBnd9w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/fake-timers": "30.1.2", + "@jest/types": "30.0.5", + "@types/node": "*", + "jest-mock": "30.0.5" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-environment-jsdom/node_modules/@jest/fake-timers": { + "version": "30.1.2", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-30.1.2.tgz", + "integrity": "sha512-Beljfv9AYkr9K+ETX9tvV61rJTY706BhBUtiaepQHeEGfe0DbpvUA5Z3fomwc5Xkhns6NWrcFDZn+72fLieUnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "30.0.5", + "@sinonjs/fake-timers": "^13.0.0", + "@types/node": "*", + "jest-message-util": "30.1.0", + "jest-mock": "30.0.5", + "jest-util": "30.0.5" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-environment-jsdom/node_modules/@jest/schemas": { + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.5.tgz", + "integrity": "sha512-DmdYgtezMkh3cpU8/1uyXakv3tJRcmcXxBOcO0tbaozPwpmh4YMsnWrQm9ZmZMfa5ocbxzbFk6O4bDPEc/iAnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.34.0" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-environment-jsdom/node_modules/@jest/types": { + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.5.tgz", + "integrity": "sha512-aREYa3aku9SSnea4aX6bhKn4bgv3AXkgijoQgbYV3yvbiGt6z+MQ85+6mIhx9DsKW2BuB/cLR/A+tcMThx+KLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/pattern": "30.0.1", + "@jest/schemas": "30.0.5", + "@types/istanbul-lib-coverage": "^2.0.6", + "@types/istanbul-reports": "^3.0.4", + "@types/node": "*", + "@types/yargs": "^17.0.33", + "chalk": "^4.1.2" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-environment-jsdom/node_modules/@sinclair/typebox": { + "version": "0.34.41", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.34.41.tgz", + "integrity": "sha512-6gS8pZzSXdyRHTIqoqSVknxolr1kzfy4/CeDnrzsVz8TTIWUbOBr6gnzOmTYJ3eXQNh4IYHIGi5aIL7sOZ2G/g==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-environment-jsdom/node_modules/@sinonjs/fake-timers": { + "version": "13.0.5", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-13.0.5.tgz", + "integrity": "sha512-36/hTbH2uaWuGVERyC6da9YwGWnzUZXuPro/F2LfsdOsLnCojz/iSH8MxUt/FD2S5XBSVPhmArFUXcpCQ2Hkiw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@sinonjs/commons": "^3.0.1" + } + }, + "node_modules/jest-environment-jsdom/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-environment-jsdom/node_modules/ci-info": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.3.0.tgz", + "integrity": "sha512-l+2bNRMiQgcfILUi33labAZYIWlH1kWDp+ecNo5iisRKrbm0xcRyCww71/YU0Fkw0mAFpz9bJayXPjey6vkmaQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-environment-jsdom/node_modules/jest-message-util": { + "version": "30.1.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-30.1.0.tgz", + "integrity": "sha512-HizKDGG98cYkWmaLUHChq4iN+oCENohQLb7Z5guBPumYs+/etonmNFlg1Ps6yN9LTPyZn+M+b/9BbnHx3WTMDg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@jest/types": "30.0.5", + "@types/stack-utils": "^2.0.3", + "chalk": "^4.1.2", + "graceful-fs": "^4.2.11", + "micromatch": "^4.0.8", + "pretty-format": "30.0.5", + "slash": "^3.0.0", + "stack-utils": "^2.0.6" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-environment-jsdom/node_modules/jest-mock": { + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-30.0.5.tgz", + "integrity": "sha512-Od7TyasAAQX/6S+QCbN6vZoWOMwlTtzzGuxJku1GhGanAjz9y+QsQkpScDmETvdc9aSXyJ/Op4rhpMYBWW91wQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "30.0.5", + "@types/node": "*", + "jest-util": "30.0.5" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-environment-jsdom/node_modules/jest-util": { + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.5.tgz", + "integrity": "sha512-pvyPWssDZR0FlfMxCBoc0tvM8iUEskaRFALUtGQYzVEAqisAztmy+R8LnU14KT4XA0H/a5HMVTXat1jLne010g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "30.0.5", + "@types/node": "*", + "chalk": "^4.1.2", + "ci-info": "^4.2.0", + "graceful-fs": "^4.2.11", + "picomatch": "^4.0.2" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-environment-jsdom/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/jest-environment-jsdom/node_modules/pretty-format": { + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.5.tgz", + "integrity": "sha512-D1tKtYvByrBkFLe2wHJl2bwMJIiT8rW+XA+TiataH79/FszLQMrpGEvzUVkzPau7OCO0Qnrhpe87PqtOAIB8Yw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "30.0.5", + "ansi-styles": "^5.2.0", + "react-is": "^18.3.1" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-environment-jsdom/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, "node_modules/jest-environment-node": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz", @@ -9851,44 +9715,38 @@ } }, "node_modules/jsdom": { - "version": "20.0.3", - "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-20.0.3.tgz", - "integrity": "sha512-SYhBvTh89tTfCD/CRdSOm13mOBa42iTaTyfyEWBdKcGdPxPtLFBXuHR8XHb33YNYaP+lLbmSvBTsnoesCNJEsQ==", + "version": "26.1.0", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-26.1.0.tgz", + "integrity": "sha512-Cvc9WUhxSMEo4McES3P7oK3QaXldCfNWp7pl2NNeiIFlCoLr3kfq9kb1fxftiwk1FLV7CvpvDfonxtzUDeSOPg==", "dev": true, "license": "MIT", "dependencies": { - "abab": "^2.0.6", - "acorn": "^8.8.1", - "acorn-globals": "^7.0.0", - "cssom": "^0.5.0", - "cssstyle": "^2.3.0", - "data-urls": "^3.0.2", - "decimal.js": "^10.4.2", - "domexception": "^4.0.0", - "escodegen": "^2.0.0", - "form-data": "^4.0.0", - "html-encoding-sniffer": "^3.0.0", - "http-proxy-agent": "^5.0.0", - "https-proxy-agent": "^5.0.1", + "cssstyle": "^4.2.1", + "data-urls": "^5.0.0", + "decimal.js": "^10.5.0", + "html-encoding-sniffer": "^4.0.0", + "http-proxy-agent": "^7.0.2", + "https-proxy-agent": "^7.0.6", "is-potential-custom-element-name": "^1.0.1", - "nwsapi": "^2.2.2", - "parse5": "^7.1.1", + "nwsapi": "^2.2.16", + "parse5": "^7.2.1", + "rrweb-cssom": "^0.8.0", "saxes": "^6.0.0", "symbol-tree": "^3.2.4", - "tough-cookie": "^4.1.2", - "w3c-xmlserializer": "^4.0.0", + "tough-cookie": "^5.1.1", + "w3c-xmlserializer": "^5.0.0", "webidl-conversions": "^7.0.0", - "whatwg-encoding": "^2.0.0", - "whatwg-mimetype": "^3.0.0", - "whatwg-url": "^11.0.0", - "ws": "^8.11.0", - "xml-name-validator": "^4.0.0" + "whatwg-encoding": "^3.1.1", + "whatwg-mimetype": "^4.0.0", + "whatwg-url": "^14.1.1", + "ws": "^8.18.0", + "xml-name-validator": "^5.0.0" }, "engines": { - "node": ">=14" + "node": ">=18" }, "peerDependencies": { - "canvas": "^2.5.0" + "canvas": "^3.0.0" }, "peerDependenciesMeta": { "canvas": { @@ -9897,16 +9755,16 @@ } }, "node_modules/jsdom/node_modules/tr46": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-3.0.0.tgz", - "integrity": "sha512-l7FvfAHlcmulp8kr+flpQZmVwtu7nfRV7NZujtN0OqES8EL4O4e0qqzL0DC5gAvx/ZC/9lk6rhcUwYvkBnBnYA==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-5.1.1.tgz", + "integrity": "sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==", "dev": true, "license": "MIT", "dependencies": { - "punycode": "^2.1.1" + "punycode": "^2.3.1" }, "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/jsdom/node_modules/webidl-conversions": { @@ -9920,17 +9778,17 @@ } }, "node_modules/jsdom/node_modules/whatwg-url": { - "version": "11.0.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-11.0.0.tgz", - "integrity": "sha512-RKT8HExMpoYx4igMiVMY83lN6UeITKJlBQ+vR/8ZJ8OCdSiN3RwCq+9gH0+Xzj0+5IrM6i4j/6LuvzbZIQgEcQ==", + "version": "14.2.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-14.2.0.tgz", + "integrity": "sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw==", "dev": true, "license": "MIT", "dependencies": { - "tr46": "^3.0.0", + "tr46": "^5.1.0", "webidl-conversions": "^7.0.0" }, "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/jsesc": { @@ -11763,9 +11621,9 @@ } }, "node_modules/nwsapi": { - "version": "2.2.20", - "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.20.tgz", - "integrity": "sha512-/ieB+mDe4MrrKMT8z+mQL8klXydZWGR5Dowt4RAGKbJ3kIGEx3X4ljUo+6V73IXtUPWgfOlU5B9MlGxFO5T+cA==", + "version": "2.2.22", + "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.22.tgz", + "integrity": "sha512-ujSMe1OWVn55euT1ihwCI1ZcAaAU3nxUiDwfDQldc51ZXaB9m2AyOn6/jh1BLe2t/G8xd6uKG1UBF2aZJeg2SQ==", "dev": true, "license": "MIT" }, @@ -12503,19 +12361,6 @@ "node": ">= 0.10" } }, - "node_modules/psl": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.15.0.tgz", - "integrity": "sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==", - "dev": true, - "license": "MIT", - "dependencies": { - "punycode": "^2.3.1" - }, - "funding": { - "url": "https://github.com/sponsors/lupomontero" - } - }, "node_modules/punycode": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", @@ -12559,13 +12404,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/querystringify": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", - "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", - "dev": true, - "license": "MIT" - }, "node_modules/queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", @@ -12887,9 +12725,9 @@ } }, "node_modules/remeda": { - "version": "2.30.0", - "resolved": "https://registry.npmjs.org/remeda/-/remeda-2.30.0.tgz", - "integrity": "sha512-TcRpI1ecqnMer3jHhFtMerGvHFCDlCHljUp0/9A4HxHOh5bSY3kP1l8nQDFMnWYJKl3MSarDNY1tb0Bs/bCmvw==", + "version": "2.32.0", + "resolved": "https://registry.npmjs.org/remeda/-/remeda-2.32.0.tgz", + "integrity": "sha512-BZx9DsT4FAgXDTOdgJIc5eY6ECIXMwtlSPQoPglF20ycSWigttDDe88AozEsPPT4OWk5NujroGSBC1phw5uU+w==", "license": "MIT", "dependencies": { "type-fest": "^4.41.0" @@ -12917,13 +12755,6 @@ "node": ">=0.10.0" } }, - "node_modules/requires-port": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", - "dev": true, - "license": "MIT" - }, "node_modules/resolve": { "version": "1.22.10", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", @@ -13026,6 +12857,13 @@ "node": ">= 18" } }, + "node_modules/rrweb-cssom": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.8.0.tgz", + "integrity": "sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw==", + "dev": true, + "license": "MIT" + }, "node_modules/run-parallel": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", @@ -13966,6 +13804,26 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, + "node_modules/tldts": { + "version": "6.1.86", + "resolved": "https://registry.npmjs.org/tldts/-/tldts-6.1.86.tgz", + "integrity": "sha512-WMi/OQ2axVTf/ykqCQgXiIct+mSQDFdH2fkwhPwgEwvJ1kSzZRiinb0zF2Xb8u4+OqPChmyI6MEu4EezNJz+FQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "tldts-core": "^6.1.86" + }, + "bin": { + "tldts": "bin/cli.js" + } + }, + "node_modules/tldts-core": { + "version": "6.1.86", + "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-6.1.86.tgz", + "integrity": "sha512-Je6p7pkk+KMzMv2XXKmAE3McmolOQFdxkKw0R8EYNr7sELW46JqnNeTX8ybPiQgvg1ymCoF8LXs5fzFaZvJPTA==", + "dev": true, + "license": "MIT" + }, "node_modules/tmpl": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", @@ -13997,19 +13855,16 @@ } }, "node_modules/tough-cookie": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.4.tgz", - "integrity": "sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==", + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-5.1.2.tgz", + "integrity": "sha512-FVDYdxtnj0G6Qm/DhNPSb8Ju59ULcup3tuJxkFb5K8Bv2pUXILbf0xZWU8PX8Ov19OXljbUyveOFwRMwkXzO+A==", "dev": true, "license": "BSD-3-Clause", "dependencies": { - "psl": "^1.1.33", - "punycode": "^2.1.1", - "universalify": "^0.2.0", - "url-parse": "^1.5.3" + "tldts": "^6.1.32" }, "engines": { - "node": ">=6" + "node": ">=16" } }, "node_modules/tr46": { @@ -14379,16 +14234,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/universalify": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", - "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 4.0.0" - } - }, "node_modules/unpipe": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", @@ -14473,17 +14318,6 @@ "punycode": "^2.1.0" } }, - "node_modules/url-parse": { - "version": "1.5.10", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", - "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "querystringify": "^2.1.1", - "requires-port": "^1.0.0" - } - }, "node_modules/use-callback-ref": { "version": "1.3.3", "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.3.tgz", @@ -14597,16 +14431,16 @@ } }, "node_modules/w3c-xmlserializer": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-4.0.0.tgz", - "integrity": "sha512-d+BFHzbiCx6zGfz0HyQ6Rg69w9k19nviJspaj4yNscGjrHu94sVP+aRm75yEbCh+r2/yR+7q6hux9LVtbuTGBw==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-5.0.0.tgz", + "integrity": "sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==", "dev": true, "license": "MIT", "dependencies": { - "xml-name-validator": "^4.0.0" + "xml-name-validator": "^5.0.0" }, "engines": { - "node": ">=14" + "node": ">=18" } }, "node_modules/walker": { @@ -14635,26 +14469,26 @@ "license": "BSD-2-Clause" }, "node_modules/whatwg-encoding": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz", - "integrity": "sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz", + "integrity": "sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==", "dev": true, "license": "MIT", "dependencies": { "iconv-lite": "0.6.3" }, "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/whatwg-mimetype": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-3.0.0.tgz", - "integrity": "sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz", + "integrity": "sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==", "dev": true, "license": "MIT", "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/whatwg-url": { @@ -14822,9 +14656,9 @@ } }, "node_modules/ws": { - "version": "8.18.2", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.2.tgz", - "integrity": "sha512-DMricUmwGZUVr++AEAe2uiVM7UoO9MAVZMDu05UQOaUII0lp+zOzLLU4Xqh/JvTqklB1T4uELaaPBKyjE1r4fQ==", + "version": "8.18.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", + "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", "dev": true, "license": "MIT", "engines": { @@ -14844,13 +14678,13 @@ } }, "node_modules/xml-name-validator": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-4.0.0.tgz", - "integrity": "sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-5.0.0.tgz", + "integrity": "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==", "dev": true, "license": "Apache-2.0", "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/xmlchars": { diff --git a/llama_stack/ui/package.json b/llama_stack/ui/package.json index 2b0322b28..2ba81ea84 100644 --- a/llama_stack/ui/package.json +++ b/llama_stack/ui/package.json @@ -14,7 +14,7 @@ }, "dependencies": { "@radix-ui/react-collapsible": "^1.1.12", - "@radix-ui/react-dialog": "^1.1.13", + "@radix-ui/react-dialog": "^1.1.15", "@radix-ui/react-dropdown-menu": "^2.1.16", "@radix-ui/react-select": "^2.2.6", "@radix-ui/react-separator": "^1.1.7", @@ -32,7 +32,7 @@ "react-dom": "^19.1.1", "react-markdown": "^10.1.0", "remark-gfm": "^4.0.1", - "remeda": "^2.30.0", + "remeda": "^2.32.0", "shiki": "^1.29.2", "sonner": "^2.0.7", "tailwind-merge": "^3.3.1" @@ -52,7 +52,7 @@ "eslint-config-prettier": "^10.1.8", "eslint-plugin-prettier": "^5.5.4", "jest": "^29.7.0", - "jest-environment-jsdom": "^29.7.0", + "jest-environment-jsdom": "^30.1.2", "prettier": "3.6.2", "tailwindcss": "^4", "ts-node": "^10.9.2", diff --git a/tests/external/kaze.yaml b/tests/external/kaze.yaml index c61ac0e31..1b42f2e14 100644 --- a/tests/external/kaze.yaml +++ b/tests/external/kaze.yaml @@ -1,6 +1,5 @@ -adapter: - adapter_type: kaze - pip_packages: ["tests/external/llama-stack-provider-kaze"] - config_class: llama_stack_provider_kaze.config.KazeProviderConfig - module: llama_stack_provider_kaze +adapter_type: kaze +pip_packages: ["tests/external/llama-stack-provider-kaze"] +config_class: llama_stack_provider_kaze.config.KazeProviderConfig +module: llama_stack_provider_kaze optional_api_dependencies: [] diff --git a/tests/external/llama-stack-api-weather/src/llama_stack_api_weather/weather.py b/tests/external/llama-stack-api-weather/src/llama_stack_api_weather/weather.py index 4b3bfb641..de1427bfd 100644 --- a/tests/external/llama-stack-api-weather/src/llama_stack_api_weather/weather.py +++ b/tests/external/llama-stack-api-weather/src/llama_stack_api_weather/weather.py @@ -6,7 +6,7 @@ from typing import Protocol -from llama_stack.providers.datatypes import AdapterSpec, Api, ProviderSpec, RemoteProviderSpec +from llama_stack.providers.datatypes import Api, ProviderSpec, RemoteProviderSpec from llama_stack.schema_utils import webmethod @@ -16,12 +16,9 @@ def available_providers() -> list[ProviderSpec]: api=Api.weather, provider_type="remote::kaze", config_class="llama_stack_provider_kaze.KazeProviderConfig", - adapter=AdapterSpec( - adapter_type="kaze", - module="llama_stack_provider_kaze", - pip_packages=["llama_stack_provider_kaze"], - config_class="llama_stack_provider_kaze.KazeProviderConfig", - ), + adapter_type="kaze", + module="llama_stack_provider_kaze", + pip_packages=["llama_stack_provider_kaze"], ), ] diff --git a/tests/integration/inference/test_openai_embeddings.py b/tests/integration/inference/test_openai_embeddings.py index 622b97287..ce3d2a8ea 100644 --- a/tests/integration/inference/test_openai_embeddings.py +++ b/tests/integration/inference/test_openai_embeddings.py @@ -33,6 +33,7 @@ def skip_if_model_doesnt_support_user_param(client, model_id): provider = provider_from_model(client, model_id) if provider.provider_type in ( "remote::together", # service returns 400 + "remote::fireworks", # service returns 400 malformed input ): pytest.skip(f"Model {model_id} hosted by {provider.provider_type} does not support user param.") @@ -41,6 +42,7 @@ def skip_if_model_doesnt_support_encoding_format_base64(client, model_id): provider = provider_from_model(client, model_id) if provider.provider_type in ( "remote::together", # param silently ignored, always returns floats + "remote::fireworks", # param silently ignored, always returns list of floats ): pytest.skip(f"Model {model_id} hosted by {provider.provider_type} does not support encoding_format='base64'.") @@ -287,7 +289,6 @@ def test_openai_embeddings_base64_batch_processing(compat_client, client_with_mo input=input_texts, encoding_format="base64", ) - # Validate response structure assert response.object == "list" assert response.model == embedding_model_id diff --git a/tests/integration/providers/utils/sqlstore/test_authorized_sqlstore.py b/tests/integration/providers/utils/sqlstore/test_authorized_sqlstore.py index 4002f2e1f..98bef0f2c 100644 --- a/tests/integration/providers/utils/sqlstore/test_authorized_sqlstore.py +++ b/tests/integration/providers/utils/sqlstore/test_authorized_sqlstore.py @@ -57,7 +57,7 @@ def authorized_store(backend_config): config = config_func() base_sqlstore = sqlstore_impl(config) - authorized_store = AuthorizedSqlStore(base_sqlstore) + authorized_store = AuthorizedSqlStore(base_sqlstore, default_policy()) yield authorized_store @@ -106,7 +106,7 @@ async def test_authorized_store_attributes(mock_get_authenticated_user, authoriz await authorized_store.insert(table_name, {"id": "1", "data": "public_data"}) # Test fetching with no user - should not error on JSON comparison - result = await authorized_store.fetch_all(table_name, policy=default_policy()) + result = await authorized_store.fetch_all(table_name) assert len(result.data) == 1 assert result.data[0]["id"] == "1" assert result.data[0]["access_attributes"] is None @@ -119,7 +119,7 @@ async def test_authorized_store_attributes(mock_get_authenticated_user, authoriz await authorized_store.insert(table_name, {"id": "2", "data": "admin_data"}) # Fetch all - admin should see both - result = await authorized_store.fetch_all(table_name, policy=default_policy()) + result = await authorized_store.fetch_all(table_name) assert len(result.data) == 2 # Test with non-admin user @@ -127,7 +127,7 @@ async def test_authorized_store_attributes(mock_get_authenticated_user, authoriz mock_get_authenticated_user.return_value = regular_user # Should only see public record - result = await authorized_store.fetch_all(table_name, policy=default_policy()) + result = await authorized_store.fetch_all(table_name) assert len(result.data) == 1 assert result.data[0]["id"] == "1" @@ -156,7 +156,7 @@ async def test_authorized_store_attributes(mock_get_authenticated_user, authoriz # Now test with the multi-user who has both roles=admin and teams=dev mock_get_authenticated_user.return_value = multi_user - result = await authorized_store.fetch_all(table_name, policy=default_policy()) + result = await authorized_store.fetch_all(table_name) # Should see: # - public record (1) - no access_attributes @@ -217,21 +217,24 @@ async def test_user_ownership_policy(mock_get_authenticated_user, authorized_sto ), ] + # Create a new authorized store with the owner-only policy + owner_only_store = AuthorizedSqlStore(authorized_store.sql_store, owner_only_policy) + # Test user1 access - should only see their own record mock_get_authenticated_user.return_value = user1 - result = await authorized_store.fetch_all(table_name, policy=owner_only_policy) + result = await owner_only_store.fetch_all(table_name) assert len(result.data) == 1, f"Expected user1 to see 1 record, got {len(result.data)}" assert result.data[0]["id"] == "1", f"Expected user1's record, got {result.data[0]['id']}" # Test user2 access - should only see their own record mock_get_authenticated_user.return_value = user2 - result = await authorized_store.fetch_all(table_name, policy=owner_only_policy) + result = await owner_only_store.fetch_all(table_name) assert len(result.data) == 1, f"Expected user2 to see 1 record, got {len(result.data)}" assert result.data[0]["id"] == "2", f"Expected user2's record, got {result.data[0]['id']}" # Test with anonymous user - should see no records mock_get_authenticated_user.return_value = None - result = await authorized_store.fetch_all(table_name, policy=owner_only_policy) + result = await owner_only_store.fetch_all(table_name) assert len(result.data) == 0, f"Expected anonymous user to see 0 records, got {len(result.data)}" finally: diff --git a/tests/integration/recordings/responses/3a81146f2afa.json b/tests/integration/recordings/responses/3a81146f2afa.json new file mode 100644 index 000000000..e2d2d52d6 --- /dev/null +++ b/tests/integration/recordings/responses/3a81146f2afa.json @@ -0,0 +1,990 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/completions", + "headers": {}, + "body": { + "model": "llama3.2:3b-instruct-fp16", + "prompt": "Respond to this question and explain your answer. Complete the sentence using one word: Roses are red, violets are ", + "max_tokens": 50, + "stream": true, + "extra_body": {} + }, + "endpoint": "/v1/completions", + "model": "llama3.2:3b-instruct-fp16" + }, + "response": { + "body": [ + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": "Blue" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": ".\n\n" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": "The" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " completed" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " sentence" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " is" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " a" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " well" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": "-known" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " phrase" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " from" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " a" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " traditional" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " English" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " poem" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": ":\n\n" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": "\"" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": "R" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": "oses" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " are" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " red" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": "," + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " v" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": "io" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": "lets" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " are" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " blue" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": ",\n" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": "Sugar" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " is" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " sweet" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": "," + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " and" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " so" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " are" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " you" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": ".\"" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " However" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": "," + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " in" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " many" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " variations" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " of" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " this" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " poem" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": "," + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " the" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " line" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": " \"" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": null, + "index": 0, + "logprobs": null, + "text": "vio" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-439", + "choices": [ + { + "finish_reason": "length", + "index": 0, + "logprobs": null, + "text": "" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": null + } + } + ], + "is_streaming": true + } +} diff --git a/tests/integration/recordings/responses/6412295819a1.json b/tests/integration/recordings/responses/6412295819a1.json new file mode 100644 index 000000000..728380b02 --- /dev/null +++ b/tests/integration/recordings/responses/6412295819a1.json @@ -0,0 +1,43 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/completions", + "headers": {}, + "body": { + "model": "llama3.2:3b-instruct-fp16", + "prompt": "Respond to this question and explain your answer. Complete the sentence using one word: Roses are red, violets are ", + "stream": false, + "extra_body": {} + }, + "endpoint": "/v1/completions", + "model": "llama3.2:3b-instruct-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-104", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "logprobs": null, + "text": "blue.\n\nI completed the sentence with \"blue\" because it is a common completion used to complete the traditional nursery rhyme, which ends with:\n\nRoses are red,\nViolets are blue.\n\nThe complete rhyme is often remembered and recited as follows:\n\nRoses are red,\nViolets are blue,\nSugar is sweet,\nAnd so are you!" + } + ], + "created": 1757857132, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": { + "completion_tokens": 72, + "prompt_tokens": 50, + "total_tokens": 122, + "completion_tokens_details": null, + "prompt_tokens_details": null + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/ecae140151d1.json b/tests/integration/recordings/responses/ecae140151d1.json new file mode 100644 index 000000000..433597080 --- /dev/null +++ b/tests/integration/recordings/responses/ecae140151d1.json @@ -0,0 +1,43 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/completions", + "headers": {}, + "body": { + "model": "llama3.2:3b-instruct-fp16", + "prompt": "Say completions", + "max_tokens": 20, + "extra_body": {} + }, + "endpoint": "/v1/completions", + "model": "llama3.2:3b-instruct-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.completion.Completion", + "__data__": { + "id": "cmpl-406", + "choices": [ + { + "finish_reason": "length", + "index": 0, + "logprobs": null, + "text": "Sure, I'd be happy to provide some definitions and examples of related words or phrases.\n\nTo better" + } + ], + "created": 1757857133, + "model": "llama3.2:3b-instruct-fp16", + "object": "text_completion", + "system_fingerprint": "fp_ollama", + "usage": { + "completion_tokens": 20, + "prompt_tokens": 28, + "total_tokens": 48, + "completion_tokens_details": null, + "prompt_tokens_details": null + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/suites.py b/tests/integration/suites.py index 86fa37ed2..6f6bc02a1 100644 --- a/tests/integration/suites.py +++ b/tests/integration/suites.py @@ -115,6 +115,15 @@ SETUP_DEFINITIONS: dict[str, Setup] = { "text_model": "cerebras/llama-3.3-70b", }, ), + "fireworks": Setup( + name="fireworks", + description="Fireworks provider with a text model", + defaults={ + "text_model": "accounts/fireworks/models/llama-v3p1-8b-instruct", + "vision_model": "accounts/fireworks/models/llama-v3p2-90b-vision-instruct", + "embedding_model": "nomic-ai/nomic-embed-text-v1.5", + }, + ), } diff --git a/tests/unit/distribution/test_distribution.py b/tests/unit/distribution/test_distribution.py index c6c2eb2c7..f24de0644 100644 --- a/tests/unit/distribution/test_distribution.py +++ b/tests/unit/distribution/test_distribution.py @@ -66,10 +66,9 @@ def base_config(tmp_path): def provider_spec_yaml(): """Common provider spec YAML for testing.""" return """ -adapter: - adapter_type: test_provider - config_class: test_provider.config.TestProviderConfig - module: test_provider +adapter_type: test_provider +config_class: test_provider.config.TestProviderConfig +module: test_provider api_dependencies: - safety """ @@ -182,9 +181,9 @@ class TestProviderRegistry: assert Api.inference in registry assert "remote::test_provider" in registry[Api.inference] provider = registry[Api.inference]["remote::test_provider"] - assert provider.adapter.adapter_type == "test_provider" - assert provider.adapter.module == "test_provider" - assert provider.adapter.config_class == "test_provider.config.TestProviderConfig" + assert provider.adapter_type == "test_provider" + assert provider.module == "test_provider" + assert provider.config_class == "test_provider.config.TestProviderConfig" assert Api.safety in provider.api_dependencies def test_external_inline_providers(self, api_directories, mock_providers, base_config, inline_provider_spec_yaml): @@ -246,8 +245,7 @@ class TestProviderRegistry: """Test handling of malformed remote provider spec (missing required fields).""" remote_dir, _ = api_directories malformed_spec = """ -adapter: - adapter_type: test_provider +adapter_type: test_provider # Missing required fields api_dependencies: - safety @@ -270,7 +268,7 @@ pip_packages: with open(inline_dir / "malformed.yaml", "w") as f: f.write(malformed_spec) - with pytest.raises(KeyError) as exc_info: + with pytest.raises(ValidationError) as exc_info: get_provider_registry(base_config) assert "config_class" in str(exc_info.value) diff --git a/tests/unit/distribution/test_library_client_initialization.py b/tests/unit/distribution/test_library_client_initialization.py index b7e7a1857..b01a5c3e2 100644 --- a/tests/unit/distribution/test_library_client_initialization.py +++ b/tests/unit/distribution/test_library_client_initialization.py @@ -27,13 +27,17 @@ class TestLlamaStackAsLibraryClientAutoInitialization: mock_impls = {} mock_route_impls = RouteImpls({}) - async def mock_construct_stack(config, custom_provider_registry): - return mock_impls + class MockStack: + def __init__(self, config, custom_provider_registry=None): + self.impls = mock_impls + + async def initialize(self): + pass def mock_initialize_route_impls(impls): return mock_route_impls - monkeypatch.setattr("llama_stack.core.library_client.construct_stack", mock_construct_stack) + monkeypatch.setattr("llama_stack.core.library_client.Stack", MockStack) monkeypatch.setattr("llama_stack.core.library_client.initialize_route_impls", mock_initialize_route_impls) client = LlamaStackAsLibraryClient("ci-tests") @@ -46,13 +50,17 @@ class TestLlamaStackAsLibraryClientAutoInitialization: mock_impls = {} mock_route_impls = RouteImpls({}) - async def mock_construct_stack(config, custom_provider_registry): - return mock_impls + class MockStack: + def __init__(self, config, custom_provider_registry=None): + self.impls = mock_impls + + async def initialize(self): + pass def mock_initialize_route_impls(impls): return mock_route_impls - monkeypatch.setattr("llama_stack.core.library_client.construct_stack", mock_construct_stack) + monkeypatch.setattr("llama_stack.core.library_client.Stack", MockStack) monkeypatch.setattr("llama_stack.core.library_client.initialize_route_impls", mock_initialize_route_impls) client = AsyncLlamaStackAsLibraryClient("ci-tests") @@ -68,13 +76,17 @@ class TestLlamaStackAsLibraryClientAutoInitialization: mock_impls = {} mock_route_impls = RouteImpls({}) - async def mock_construct_stack(config, custom_provider_registry): - return mock_impls + class MockStack: + def __init__(self, config, custom_provider_registry=None): + self.impls = mock_impls + + async def initialize(self): + pass def mock_initialize_route_impls(impls): return mock_route_impls - monkeypatch.setattr("llama_stack.core.library_client.construct_stack", mock_construct_stack) + monkeypatch.setattr("llama_stack.core.library_client.Stack", MockStack) monkeypatch.setattr("llama_stack.core.library_client.initialize_route_impls", mock_initialize_route_impls) client = LlamaStackAsLibraryClient("ci-tests") @@ -90,13 +102,17 @@ class TestLlamaStackAsLibraryClientAutoInitialization: mock_impls = {} mock_route_impls = RouteImpls({}) - async def mock_construct_stack(config, custom_provider_registry): - return mock_impls + class MockStack: + def __init__(self, config, custom_provider_registry=None): + self.impls = mock_impls + + async def initialize(self): + pass def mock_initialize_route_impls(impls): return mock_route_impls - monkeypatch.setattr("llama_stack.core.library_client.construct_stack", mock_construct_stack) + monkeypatch.setattr("llama_stack.core.library_client.Stack", MockStack) monkeypatch.setattr("llama_stack.core.library_client.initialize_route_impls", mock_initialize_route_impls) client = AsyncLlamaStackAsLibraryClient("ci-tests") @@ -112,13 +128,17 @@ class TestLlamaStackAsLibraryClientAutoInitialization: mock_impls = {} mock_route_impls = RouteImpls({}) - async def mock_construct_stack(config, custom_provider_registry): - return mock_impls + class MockStack: + def __init__(self, config, custom_provider_registry=None): + self.impls = mock_impls + + async def initialize(self): + pass def mock_initialize_route_impls(impls): return mock_route_impls - monkeypatch.setattr("llama_stack.core.library_client.construct_stack", mock_construct_stack) + monkeypatch.setattr("llama_stack.core.library_client.Stack", MockStack) monkeypatch.setattr("llama_stack.core.library_client.initialize_route_impls", mock_initialize_route_impls) sync_client = LlamaStackAsLibraryClient("ci-tests") diff --git a/tests/unit/utils/test_authorized_sqlstore.py b/tests/unit/utils/test_authorized_sqlstore.py index 90eb706e4..d85e784a9 100644 --- a/tests/unit/utils/test_authorized_sqlstore.py +++ b/tests/unit/utils/test_authorized_sqlstore.py @@ -26,7 +26,7 @@ async def test_authorized_fetch_with_where_sql_access_control(mock_get_authentic db_path=tmp_dir + "/" + db_name, ) ) - sqlstore = AuthorizedSqlStore(base_sqlstore) + sqlstore = AuthorizedSqlStore(base_sqlstore, default_policy()) # Create table with access control await sqlstore.create_table( @@ -56,24 +56,24 @@ async def test_authorized_fetch_with_where_sql_access_control(mock_get_authentic mock_get_authenticated_user.return_value = admin_user # Admin should see both documents - result = await sqlstore.fetch_all("documents", policy=default_policy(), where={"id": 1}) + result = await sqlstore.fetch_all("documents", where={"id": 1}) assert len(result.data) == 1 assert result.data[0]["title"] == "Admin Document" # User should only see their document mock_get_authenticated_user.return_value = regular_user - result = await sqlstore.fetch_all("documents", policy=default_policy(), where={"id": 1}) + result = await sqlstore.fetch_all("documents", where={"id": 1}) assert len(result.data) == 0 - result = await sqlstore.fetch_all("documents", policy=default_policy(), where={"id": 2}) + result = await sqlstore.fetch_all("documents", where={"id": 2}) assert len(result.data) == 1 assert result.data[0]["title"] == "User Document" - row = await sqlstore.fetch_one("documents", policy=default_policy(), where={"id": 1}) + row = await sqlstore.fetch_one("documents", where={"id": 1}) assert row is None - row = await sqlstore.fetch_one("documents", policy=default_policy(), where={"id": 2}) + row = await sqlstore.fetch_one("documents", where={"id": 2}) assert row is not None assert row["title"] == "User Document" @@ -88,7 +88,7 @@ async def test_sql_policy_consistency(mock_get_authenticated_user): db_path=tmp_dir + "/" + db_name, ) ) - sqlstore = AuthorizedSqlStore(base_sqlstore) + sqlstore = AuthorizedSqlStore(base_sqlstore, default_policy()) await sqlstore.create_table( table="resources", @@ -144,7 +144,7 @@ async def test_sql_policy_consistency(mock_get_authenticated_user): user = User(principal=user_data["principal"], attributes=user_data["attributes"]) mock_get_authenticated_user.return_value = user - sql_results = await sqlstore.fetch_all("resources", policy=policy) + sql_results = await sqlstore.fetch_all("resources") sql_ids = {row["id"] for row in sql_results.data} policy_ids = set() for scenario in test_scenarios: @@ -174,7 +174,7 @@ async def test_authorized_store_user_attribute_capture(mock_get_authenticated_us db_path=tmp_dir + "/" + db_name, ) ) - authorized_store = AuthorizedSqlStore(base_sqlstore) + authorized_store = AuthorizedSqlStore(base_sqlstore, default_policy()) await authorized_store.create_table( table="user_data",