mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
* Add distribution CLI scaffolding * More progress towards `llama distribution install` * getting closer to a distro definition, distro install + configure works * Distribution server now functioning * read existing configuration, save enums properly * Remove inference uvicorn server entrypoint and llama inference CLI command * updated dependency and client model name * Improved exception handling * local imports for faster cli * undo a typo, add a passthrough distribution * implement full-passthrough in the server * add safety adapters, configuration handling, server + clients * cleanup, moving stuff to common, nuke utils * Add a Path() wrapper at the earliest place * fixes * Bring agentic system api to toolchain Add adapter dependencies and resolve adapters using a topological sort * refactor to reduce size of `agentic_system` * move straggler files and fix some important existing bugs * ApiSurface -> Api * refactor a method out * Adapter -> Provider * Make each inference provider into its own subdirectory * installation fixes * Rename Distribution -> DistributionSpec, simplify RemoteProviders * dict key instead of attr * update inference config to take model and not model_dir * Fix passthrough streaming, send headers properly not part of body :facepalm * update safety to use model sku ids and not model dirs * Update cli_reference.md * minor fixes * add DistributionConfig, fix a bug in model download * Make install + start scripts do proper configuration automatically * Update CLI_reference * Nuke fp8_requirements, fold fbgemm into common requirements * Update README, add newline between API surface configurations * Refactor download functionality out of the Command so can be reused * Add `llama model download` alias for `llama download` * Show message about checksum file so users can check themselves * Simpler intro statements * get ollama working * Reduce a bunch of dependencies from toolchain Some improvements to the distribution install script * Avoid using `conda run` since it buffers everything * update dependencies and rely on LLAMA_TOOLCHAIN_DIR for dev purposes * add validation for configuration input * resort imports * make optional subclasses default to yes for configuration * Remove additional_pip_packages; move deps to providers * for inline make 8b model the default * Add scripts to MANIFEST * allow installing from test.pypi.org * Fix #2 to help with testing packages * Must install llama-models at that same version first * fix PIP_ARGS --------- Co-authored-by: Hardik Shah <hjshah@fb.com> Co-authored-by: Hardik Shah <hjshah@meta.com>
61 lines
2 KiB
Python
61 lines
2 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from functools import lru_cache
|
|
from typing import List, Optional
|
|
|
|
from .datatypes import Api, DistributionSpec, RemoteProviderSpec
|
|
from .distribution import api_providers
|
|
|
|
|
|
def client_module(api: Api) -> str:
|
|
return f"llama_toolchain.{api.value}.client"
|
|
|
|
|
|
def remote_spec(api: Api) -> RemoteProviderSpec:
|
|
return RemoteProviderSpec(
|
|
api=api,
|
|
provider_id=f"{api.value}-remote",
|
|
module=client_module(api),
|
|
)
|
|
|
|
|
|
@lru_cache()
|
|
def available_distribution_specs() -> List[DistributionSpec]:
|
|
providers = api_providers()
|
|
return [
|
|
DistributionSpec(
|
|
spec_id="inline",
|
|
description="Use code from `llama_toolchain` itself to serve all llama stack APIs",
|
|
provider_specs={
|
|
Api.inference: providers[Api.inference]["meta-reference"],
|
|
Api.safety: providers[Api.safety]["meta-reference"],
|
|
Api.agentic_system: providers[Api.agentic_system]["meta-reference"],
|
|
},
|
|
),
|
|
DistributionSpec(
|
|
spec_id="remote",
|
|
description="Point to remote services for all llama stack APIs",
|
|
provider_specs={x: remote_spec(x) for x in providers},
|
|
),
|
|
DistributionSpec(
|
|
spec_id="ollama-inline",
|
|
description="Like local-source, but use ollama for running LLM inference",
|
|
provider_specs={
|
|
Api.inference: providers[Api.inference]["meta-ollama"],
|
|
Api.safety: providers[Api.safety]["meta-reference"],
|
|
Api.agentic_system: providers[Api.agentic_system]["meta-reference"],
|
|
},
|
|
),
|
|
]
|
|
|
|
|
|
@lru_cache()
|
|
def resolve_distribution_spec(spec_id: str) -> Optional[DistributionSpec]:
|
|
for spec in available_distribution_specs():
|
|
if spec.spec_id == spec_id:
|
|
return spec
|
|
return None
|