mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
* API Keys passed from Client instead of distro configuration * delete distribution registry * Rename the "package" word away * Introduce a "Router" layer for providers Some providers need to be factorized and considered as thin routing layers on top of other providers. Consider two examples: - The inference API should be a routing layer over inference providers, routed using the "model" key - The memory banks API is another instance where various memory bank types will be provided by independent providers (e.g., a vector store is served by Chroma while a keyvalue memory can be served by Redis or PGVector) This commit introduces a generalized routing layer for this purpose. * update `apis_to_serve` * llama_toolchain -> llama_stack * Codemod from llama_toolchain -> llama_stack - added providers/registry - cleaned up api/ subdirectories and moved impls away - restructured api/api.py - from llama_stack.apis.<api> import foo should work now - update imports to do llama_stack.apis.<api> - update many other imports - added __init__, fixed some registry imports - updated registry imports - create_agentic_system -> create_agent - AgenticSystem -> Agent * Moved some stuff out of common/; re-generated OpenAPI spec * llama-toolchain -> llama-stack (hyphens) * add control plane API * add redis adapter + sqlite provider * move core -> distribution * Some more toolchain -> stack changes * small naming shenanigans * Removing custom tool and agent utilities and moving them client side * Move control plane to distribution server for now * Remove control plane from API list * no codeshield dependency randomly plzzzzz * Add "fire" as a dependency * add back event loggers * stack configure fixes * use brave instead of bing in the example client * add init file so it gets packaged * add init files so it gets packaged * Update MANIFEST * bug fix --------- Co-authored-by: Hardik Shah <hjshah@fb.com> Co-authored-by: Xi Yan <xiyan@meta.com> Co-authored-by: Ashwin Bharambe <ashwin@meta.com>
75 lines
2.4 KiB
Python
75 lines
2.4 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import argparse
|
|
import json
|
|
|
|
from llama_models.sku_list import resolve_model
|
|
|
|
from termcolor import colored
|
|
|
|
from llama_stack.cli.subcommand import Subcommand
|
|
from llama_stack.cli.table import print_table
|
|
from llama_stack.distribution.utils.serialize import EnumEncoder
|
|
|
|
|
|
class ModelDescribe(Subcommand):
|
|
"""Show details about a model"""
|
|
|
|
def __init__(self, subparsers: argparse._SubParsersAction):
|
|
super().__init__()
|
|
self.parser = subparsers.add_parser(
|
|
"describe",
|
|
prog="llama model describe",
|
|
description="Show details about a llama model",
|
|
formatter_class=argparse.RawTextHelpFormatter,
|
|
)
|
|
self._add_arguments()
|
|
self.parser.set_defaults(func=self._run_model_describe_cmd)
|
|
|
|
def _add_arguments(self):
|
|
self.parser.add_argument(
|
|
"-m",
|
|
"--model-id",
|
|
type=str,
|
|
required=True,
|
|
)
|
|
|
|
def _run_model_describe_cmd(self, args: argparse.Namespace) -> None:
|
|
model = resolve_model(args.model_id)
|
|
if model is None:
|
|
self.parser.error(
|
|
f"Model {args.model_id} not found; try 'llama model list' for a list of available models."
|
|
)
|
|
return
|
|
|
|
rows = [
|
|
(
|
|
colored("Model", "white", attrs=["bold"]),
|
|
colored(model.descriptor(), "white", attrs=["bold"]),
|
|
),
|
|
("HuggingFace ID", model.huggingface_repo or "<Not Available>"),
|
|
("Description", model.description_markdown),
|
|
("Context Length", f"{model.max_seq_length // 1024}K tokens"),
|
|
("Weights format", model.quantization_format.value),
|
|
("Model params.json", json.dumps(model.model_args, indent=4)),
|
|
]
|
|
|
|
if model.recommended_sampling_params is not None:
|
|
sampling_params = model.recommended_sampling_params.dict()
|
|
for k in ("max_tokens", "repetition_penalty"):
|
|
del sampling_params[k]
|
|
rows.append(
|
|
(
|
|
"Recommended sampling params",
|
|
json.dumps(sampling_params, cls=EnumEncoder, indent=4),
|
|
)
|
|
)
|
|
|
|
print_table(
|
|
rows,
|
|
separate_rows=True,
|
|
)
|