diff --git a/docs/cli_reference.md b/docs/cli_reference.md index d46cf722a..9a84eb3b9 100644 --- a/docs/cli_reference.md +++ b/docs/cli_reference.md @@ -248,8 +248,8 @@ llama stack list-distributions ```
-i+--------------------------------+---------------------------------------+----------------------------------------------------------------------+ -| Distribution ID | Providers | Description | +i+-------------------------------+---------------------------------------+----------------------------------------------------------------------+ +| Distribution Type | Providers | Description | +--------------------------------+---------------------------------------+----------------------------------------------------------------------+ | local | { | Use code from `llama_toolchain` itself to serve all llama stack APIs | | | "inference": "meta-reference", | | diff --git a/llama_toolchain/agentic_system/providers.py b/llama_toolchain/agentic_system/providers.py index a722d9400..164df1a30 100644 --- a/llama_toolchain/agentic_system/providers.py +++ b/llama_toolchain/agentic_system/providers.py @@ -13,7 +13,7 @@ def available_agentic_system_providers() -> List[ProviderSpec]: return [ InlineProviderSpec( api=Api.agentic_system, - provider_id="meta-reference", + provider_type="meta-reference", pip_packages=[ "codeshield", "matplotlib", diff --git a/llama_toolchain/cli/stack/build.py b/llama_toolchain/cli/stack/build.py index c81a6d350..22bd4071f 100644 --- a/llama_toolchain/cli/stack/build.py +++ b/llama_toolchain/cli/stack/build.py @@ -52,7 +52,7 @@ class StackBuild(Subcommand): BuildType, ) - allowed_ids = [d.distribution_id for d in available_distribution_specs()] + allowed_ids = [d.distribution_type for d in available_distribution_specs()] self.parser.add_argument( "distribution", type=str, @@ -101,7 +101,7 @@ class StackBuild(Subcommand): api_inputs.append( ApiInput( api=api, - provider=provider_spec.provider_id, + provider=provider_spec.provider_type, ) ) docker_image = None @@ -115,11 +115,11 @@ class StackBuild(Subcommand): self.parser.error(f"Could not find distribution {args.distribution}") return - for api, provider_id in dist.providers.items(): + for api, provider_type in dist.providers.items(): api_inputs.append( ApiInput( api=api, - provider=provider_id, + provider=provider_type, ) ) docker_image = dist.docker_image @@ -128,6 +128,6 @@ class StackBuild(Subcommand): api_inputs, build_type=BuildType(args.type), name=args.name, - distribution_id=args.distribution, + distribution_type=args.distribution, docker_image=docker_image, ) diff --git a/llama_toolchain/cli/stack/configure.py b/llama_toolchain/cli/stack/configure.py index 70ff4a7f0..658380f4d 100644 --- a/llama_toolchain/cli/stack/configure.py +++ b/llama_toolchain/cli/stack/configure.py @@ -36,7 +36,7 @@ class StackConfigure(Subcommand): ) from llama_toolchain.core.package import BuildType - allowed_ids = [d.distribution_id for d in available_distribution_specs()] + allowed_ids = [d.distribution_type for d in available_distribution_specs()] self.parser.add_argument( "distribution", type=str, @@ -84,7 +84,7 @@ def configure_llama_distribution(config_file: Path) -> None: if config.providers: cprint( - f"Configuration already exists for {config.distribution_id}. Will overwrite...", + f"Configuration already exists for {config.distribution_type}. Will overwrite...", "yellow", attrs=["bold"], ) diff --git a/llama_toolchain/cli/stack/list_distributions.py b/llama_toolchain/cli/stack/list_distributions.py index c4d529157..557b8c33c 100644 --- a/llama_toolchain/cli/stack/list_distributions.py +++ b/llama_toolchain/cli/stack/list_distributions.py @@ -33,7 +33,7 @@ class StackListDistributions(Subcommand): # eventually, this should query a registry at llama.meta.com/llamastack/distributions headers = [ - "Distribution ID", + "Distribution Type", "Providers", "Description", ] @@ -43,7 +43,7 @@ class StackListDistributions(Subcommand): providers = {k.value: v for k, v in spec.providers.items()} rows.append( [ - spec.distribution_id, + spec.distribution_type, json.dumps(providers, indent=2), spec.description, ] diff --git a/llama_toolchain/cli/stack/list_providers.py b/llama_toolchain/cli/stack/list_providers.py index 29602d889..fdf4ab054 100644 --- a/llama_toolchain/cli/stack/list_providers.py +++ b/llama_toolchain/cli/stack/list_providers.py @@ -41,7 +41,7 @@ class StackListProviders(Subcommand): # eventually, this should query a registry at llama.meta.com/llamastack/distributions headers = [ - "Provider ID", + "Provider Type", "PIP Package Dependencies", ] @@ -49,7 +49,7 @@ class StackListProviders(Subcommand): for spec in providers_for_api.values(): rows.append( [ - spec.provider_id, + spec.provider_type, ",".join(spec.pip_packages), ] ) diff --git a/llama_toolchain/cli/stack/run.py b/llama_toolchain/cli/stack/run.py index 68853db35..1568ed820 100644 --- a/llama_toolchain/cli/stack/run.py +++ b/llama_toolchain/cli/stack/run.py @@ -80,7 +80,7 @@ class StackRun(Subcommand): with open(config_file, "r") as f: config = PackageConfig(**yaml.safe_load(f)) - if not config.distribution_id: + if not config.distribution_type: raise ValueError("Build config appears to be corrupt.") if config.docker_image: diff --git a/llama_toolchain/core/build_conda_env.sh b/llama_toolchain/core/build_conda_env.sh index 1e8c002f2..e5b1ca539 100755 --- a/llama_toolchain/core/build_conda_env.sh +++ b/llama_toolchain/core/build_conda_env.sh @@ -20,12 +20,12 @@ fi set -euo pipefail if [ "$#" -ne 3 ]; then - echo "Usage: $0" >&2 - echo "Example: $0 mybuild 'numpy pandas scipy'" >&2 + echo "Usage: $0 " >&2 + echo "Example: $0 mybuild 'numpy pandas scipy'" >&2 exit 1 fi -distribution_id="$1" +distribution_type="$1" build_name="$2" env_name="llamastack-$build_name" pip_dependencies="$3" @@ -117,4 +117,4 @@ ensure_conda_env_python310 "$env_name" "$pip_dependencies" printf "${GREEN}Successfully setup conda environment. Configuring build...${NC}\n" -$CONDA_PREFIX/bin/python3 -m llama_toolchain.cli.llama stack configure $distribution_id --name "$build_name" --type conda_env +$CONDA_PREFIX/bin/python3 -m llama_toolchain.cli.llama stack configure $distribution_type --name "$build_name" --type conda_env diff --git a/llama_toolchain/core/build_container.sh b/llama_toolchain/core/build_container.sh index ec2ca8a0c..e5349cd08 100755 --- a/llama_toolchain/core/build_container.sh +++ b/llama_toolchain/core/build_container.sh @@ -5,12 +5,12 @@ LLAMA_TOOLCHAIN_DIR=${LLAMA_TOOLCHAIN_DIR:-} TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-} if [ "$#" -ne 4 ]; then - echo "Usage: $0 - echo "Example: $0 distribution_id my-fastapi-app python:3.9-slim 'fastapi uvicorn' + echo "Usage: $0 + echo "Example: $0 distribution_type my-fastapi-app python:3.9-slim 'fastapi uvicorn' exit 1 fi -distribution_id=$1 +distribution_type=$1 build_name="$2" image_name="llamastack-$build_name" docker_base=$3 @@ -110,4 +110,4 @@ set +x printf "${GREEN}Succesfully setup Podman image. Configuring build...${NC}" echo "You can run it with: podman run -p 8000:8000 $image_name" -$CONDA_PREFIX/bin/python3 -m llama_toolchain.cli.llama stack configure $distribution_id --name "$build_name" --type container +$CONDA_PREFIX/bin/python3 -m llama_toolchain.cli.llama stack configure $distribution_type --name "$build_name" --type container diff --git a/llama_toolchain/core/configure.py b/llama_toolchain/core/configure.py index 7f9aa0140..252358a52 100644 --- a/llama_toolchain/core/configure.py +++ b/llama_toolchain/core/configure.py @@ -21,14 +21,14 @@ def configure_api_providers(existing_configs: Dict[str, Any]) -> None: for api_str, stub_config in existing_configs.items(): api = Api(api_str) providers = all_providers[api] - provider_id = stub_config["provider_id"] - if provider_id not in providers: + provider_type = stub_config["provider_type"] + if provider_type not in providers: raise ValueError( - f"Unknown provider `{provider_id}` is not available for API `{api_str}`" + f"Unknown provider `{provider_type}` is not available for API `{api_str}`" ) - provider_spec = providers[provider_id] - cprint(f"Configuring API: {api_str} ({provider_id})", "white", attrs=["bold"]) + provider_spec = providers[provider_type] + cprint(f"Configuring API: {api_str} ({provider_type})", "white", attrs=["bold"]) config_type = instantiate_class_type(provider_spec.config_class) try: @@ -43,7 +43,7 @@ def configure_api_providers(existing_configs: Dict[str, Any]) -> None: print("") provider_configs[api_str] = { - "provider_id": provider_id, + "provider_type": provider_type, **provider_config.dict(), } diff --git a/llama_toolchain/core/datatypes.py b/llama_toolchain/core/datatypes.py index cbdda51d4..138d20941 100644 --- a/llama_toolchain/core/datatypes.py +++ b/llama_toolchain/core/datatypes.py @@ -31,7 +31,7 @@ class ApiEndpoint(BaseModel): @json_schema_type class ProviderSpec(BaseModel): api: Api - provider_id: str + provider_type: str config_class: str = Field( ..., description="Fully-qualified classname of the config for this provider", @@ -100,7 +100,7 @@ class RemoteProviderConfig(BaseModel): return url.rstrip("/") -def remote_provider_id(adapter_id: str) -> str: +def remote_provider_type(adapter_id: str) -> str: return f"remote::{adapter_id}" @@ -141,22 +141,22 @@ def remote_provider_spec( if adapter and adapter.config_class else "llama_toolchain.core.datatypes.RemoteProviderConfig" ) - provider_id = remote_provider_id(adapter.adapter_id) if adapter else "remote" + provider_type = remote_provider_type(adapter.adapter_id) if adapter else "remote" return RemoteProviderSpec( - api=api, provider_id=provider_id, config_class=config_class, adapter=adapter + api=api, provider_type=provider_type, config_class=config_class, adapter=adapter ) @json_schema_type class DistributionSpec(BaseModel): - distribution_id: str + distribution_type: str description: str docker_image: Optional[str] = None providers: Dict[Api, str] = Field( default_factory=dict, - description="Provider IDs for each of the APIs provided by this distribution", + description="Provider Types for each of the APIs provided by this distribution", ) @@ -171,7 +171,7 @@ Reference to the distribution this package refers to. For unregistered (adhoc) p this could be just a hash """, ) - distribution_id: Optional[str] = None + distribution_type: Optional[str] = None docker_image: Optional[str] = Field( default=None, diff --git a/llama_toolchain/core/distribution.py b/llama_toolchain/core/distribution.py index 4c50189c0..89e1d7793 100644 --- a/llama_toolchain/core/distribution.py +++ b/llama_toolchain/core/distribution.py @@ -83,18 +83,18 @@ def api_endpoints() -> Dict[Api, List[ApiEndpoint]]: def api_providers() -> Dict[Api, Dict[str, ProviderSpec]]: inference_providers_by_id = { - a.provider_id: a for a in available_inference_providers() + a.provider_type: a for a in available_inference_providers() } - safety_providers_by_id = {a.provider_id: a for a in available_safety_providers()} + safety_providers_by_id = {a.provider_type: a for a in available_safety_providers()} agentic_system_providers_by_id = { - a.provider_id: a for a in available_agentic_system_providers() + a.provider_type: a for a in available_agentic_system_providers() } ret = { Api.inference: inference_providers_by_id, Api.safety: safety_providers_by_id, Api.agentic_system: agentic_system_providers_by_id, - Api.memory: {a.provider_id: a for a in available_memory_providers()}, + Api.memory: {a.provider_type: a for a in available_memory_providers()}, } for k, v in ret.items(): v["remote"] = remote_provider_spec(k) diff --git a/llama_toolchain/core/distribution_registry.py b/llama_toolchain/core/distribution_registry.py index e134fdab6..2b15af72b 100644 --- a/llama_toolchain/core/distribution_registry.py +++ b/llama_toolchain/core/distribution_registry.py @@ -14,7 +14,7 @@ from .datatypes import * # noqa: F403 def available_distribution_specs() -> List[DistributionSpec]: return [ DistributionSpec( - distribution_id="local", + distribution_type="local", description="Use code from `llama_toolchain` itself to serve all llama stack APIs", providers={ Api.inference: "meta-reference", @@ -24,35 +24,35 @@ def available_distribution_specs() -> List[DistributionSpec]: }, ), DistributionSpec( - distribution_id="remote", + distribution_type="remote", description="Point to remote services for all llama stack APIs", providers={x: "remote" for x in Api}, ), DistributionSpec( - distribution_id="local-ollama", + distribution_type="local-ollama", description="Like local, but use ollama for running LLM inference", providers={ - Api.inference: remote_provider_id("ollama"), + Api.inference: remote_provider_type("ollama"), Api.safety: "meta-reference", Api.agentic_system: "meta-reference", Api.memory: "meta-reference-faiss", }, ), DistributionSpec( - distribution_id="local-plus-fireworks-inference", + distribution_type="local-plus-fireworks-inference", description="Use Fireworks.ai for running LLM inference", providers={ - Api.inference: remote_provider_id("fireworks"), + Api.inference: remote_provider_type("fireworks"), Api.safety: "meta-reference", Api.agentic_system: "meta-reference", Api.memory: "meta-reference-faiss", }, ), DistributionSpec( - distribution_id="local-plus-together-inference", + distribution_type="local-plus-together-inference", description="Use Together.ai for running LLM inference", providers={ - Api.inference: remote_provider_id("together"), + Api.inference: remote_provider_type("together"), Api.safety: "meta-reference", Api.agentic_system: "meta-reference", Api.memory: "meta-reference-faiss", @@ -62,8 +62,8 @@ def available_distribution_specs() -> List[DistributionSpec]: @lru_cache() -def resolve_distribution_spec(distribution_id: str) -> Optional[DistributionSpec]: +def resolve_distribution_spec(distribution_type: str) -> Optional[DistributionSpec]: for spec in available_distribution_specs(): - if spec.distribution_id == distribution_id: + if spec.distribution_type == distribution_type: return spec return None diff --git a/llama_toolchain/core/package.py b/llama_toolchain/core/package.py index 72bd93152..ab4346a71 100644 --- a/llama_toolchain/core/package.py +++ b/llama_toolchain/core/package.py @@ -46,13 +46,13 @@ def build_package( api_inputs: List[ApiInput], build_type: BuildType, name: str, - distribution_id: Optional[str] = None, + distribution_type: Optional[str] = None, docker_image: Optional[str] = None, ): - if not distribution_id: - distribution_id = "adhoc" + if not distribution_type: + distribution_type = "adhoc" - build_dir = BUILDS_BASE_DIR / distribution_id / build_type.descriptor() + build_dir = BUILDS_BASE_DIR / distribution_type / build_type.descriptor() os.makedirs(build_dir, exist_ok=True) package_name = name.replace("::", "-") @@ -79,7 +79,7 @@ def build_package( if provider.docker_image: raise ValueError("A stack's dependencies cannot have a docker image") - stub_config[api.value] = {"provider_id": api_input.provider} + stub_config[api.value] = {"provider_type": api_input.provider} if package_file.exists(): cprint( @@ -92,7 +92,7 @@ def build_package( c.providers[api_str] = new_config else: existing_config = c.providers[api_str] - if existing_config["provider_id"] != new_config["provider_id"]: + if existing_config["provider_type"] != new_config["provider_type"]: cprint( f"Provider `{api_str}` has changed from `{existing_config}` to `{new_config}`", color="yellow", @@ -105,7 +105,7 @@ def build_package( providers=stub_config, ) - c.distribution_id = distribution_id + c.distribution_type = distribution_type c.docker_image = package_name if build_type == BuildType.container else None c.conda_env = package_name if build_type == BuildType.conda_env else None @@ -119,7 +119,7 @@ def build_package( ) args = [ script, - distribution_id, + distribution_type, package_name, package_deps.docker_image, " ".join(package_deps.pip_packages), @@ -130,7 +130,7 @@ def build_package( ) args = [ script, - distribution_id, + distribution_type, package_name, " ".join(package_deps.pip_packages), ] diff --git a/llama_toolchain/core/server.py b/llama_toolchain/core/server.py index 4de84b726..8c7ab10a7 100644 --- a/llama_toolchain/core/server.py +++ b/llama_toolchain/core/server.py @@ -284,13 +284,13 @@ def main(yaml_config: str, port: int = 5000, disable_ipv6: bool = False): for api_str, provider_config in config["providers"].items(): api = Api(api_str) providers = all_providers[api] - provider_id = provider_config["provider_id"] - if provider_id not in providers: + provider_type = provider_config["provider_type"] + if provider_type not in providers: raise ValueError( - f"Unknown provider `{provider_id}` is not available for API `{api}`" + f"Unknown provider `{provider_type}` is not available for API `{api}`" ) - provider_specs[api] = providers[provider_id] + provider_specs[api] = providers[provider_type] impls = resolve_impls(provider_specs, config) diff --git a/llama_toolchain/inference/providers.py b/llama_toolchain/inference/providers.py index b469cb29b..5219585c3 100644 --- a/llama_toolchain/inference/providers.py +++ b/llama_toolchain/inference/providers.py @@ -13,7 +13,7 @@ def available_inference_providers() -> List[ProviderSpec]: return [ InlineProviderSpec( api=Api.inference, - provider_id="meta-reference", + provider_type="meta-reference", pip_packages=[ "accelerate", "blobfile", diff --git a/llama_toolchain/memory/providers.py b/llama_toolchain/memory/providers.py index 4d086d861..cc113d132 100644 --- a/llama_toolchain/memory/providers.py +++ b/llama_toolchain/memory/providers.py @@ -18,7 +18,7 @@ def available_memory_providers() -> List[ProviderSpec]: return [ InlineProviderSpec( api=Api.memory, - provider_id="meta-reference-faiss", + provider_type="meta-reference-faiss", pip_packages=EMBEDDING_DEPS + ["faiss-cpu"], module="llama_toolchain.memory.meta_reference.faiss", config_class="llama_toolchain.memory.meta_reference.faiss.FaissImplConfig", diff --git a/llama_toolchain/safety/providers.py b/llama_toolchain/safety/providers.py index dfacf3f67..8471ab139 100644 --- a/llama_toolchain/safety/providers.py +++ b/llama_toolchain/safety/providers.py @@ -13,7 +13,7 @@ def available_safety_providers() -> List[ProviderSpec]: return [ InlineProviderSpec( api=Api.safety, - provider_id="meta-reference", + provider_type="meta-reference", pip_packages=[ "accelerate", "codeshield",