rename toolchain/ --> llama_toolchain/

This commit is contained in:
Hardik Shah 2024-07-21 23:48:38 -07:00
parent d95f5f863d
commit f9111652ef
73 changed files with 36 additions and 37 deletions

View file

@ -0,0 +1,82 @@
import argparse
import os
import textwrap
from pathlib import Path
from llama_toolchain.cli.subcommand import Subcommand
from llama_toolchain.utils import DEFAULT_DUMP_DIR
CONFIGS_BASE_DIR = os.path.join(DEFAULT_DUMP_DIR, "configs")
class InferenceConfigure(Subcommand):
"""Llama cli for configuring llama toolchain configs"""
def __init__(self, subparsers: argparse._SubParsersAction):
super().__init__()
self.parser = subparsers.add_parser(
"configure",
prog="llama inference configure",
description="Configure llama toolchain inference configs",
epilog=textwrap.dedent(
"""
Example:
llama inference configure
"""
),
formatter_class=argparse.RawTextHelpFormatter,
)
self._add_arguments()
self.parser.set_defaults(func=self._run_inference_configure_cmd)
def _add_arguments(self):
pass
def read_user_inputs(self):
checkpoint_dir = input("Enter the checkpoint directory for the model (e.g., ~/.llama/checkpoints/Meta-Llama-3-8B/): ")
model_parallel_size = input("Enter model parallel size (e.g., 1 for 8B / 8 for 70B and 405B): ")
return checkpoint_dir, model_parallel_size
def write_output_yaml(
self,
checkpoint_dir,
model_parallel_size,
yaml_output_path
):
current_dir = os.path.dirname(os.path.abspath(__file__))
default_conf_path = os.path.join(current_dir, "default_configuration.yaml")
with open(default_conf_path, "r") as f:
yaml_content = f.read()
yaml_content = yaml_content.format(
checkpoint_dir=checkpoint_dir,
model_parallel_size=model_parallel_size,
)
with open(yaml_output_path, 'w') as yaml_file:
yaml_file.write(yaml_content.strip())
print(f"YAML configuration has been written to {yaml_output_path}")
def _run_inference_configure_cmd(self, args: argparse.Namespace) -> None:
checkpoint_dir, model_parallel_size = self.read_user_inputs()
checkpoint_dir = os.path.expanduser(checkpoint_dir)
if not (
checkpoint_dir.endswith("original") or
checkpoint_dir.endswith("original/")
):
checkpoint_dir = os.path.join(checkpoint_dir, "original")
os.makedirs(CONFIGS_BASE_DIR, exist_ok=True)
yaml_output_path = Path(CONFIGS_BASE_DIR) / "inference.yaml"
self.write_output_yaml(
checkpoint_dir,
model_parallel_size,
yaml_output_path,
)

View file

@ -0,0 +1,9 @@
inference_config:
impl_type: "inline"
inline_config:
checkpoint_type: "pytorch"
checkpoint_dir: {checkpoint_dir}/
tokenizer_path: {checkpoint_dir}/tokenizer.model
model_parallel_size: {model_parallel_size}
max_seq_len: 2048
max_batch_size: 1

View file

@ -0,0 +1,30 @@
import argparse
import textwrap
from llama_toolchain.cli.inference.configure import InferenceConfigure
from llama_toolchain.cli.inference.start import InferenceStart
from llama_toolchain.cli.subcommand import Subcommand
class InferenceParser(Subcommand):
"""Llama cli for inference apis"""
def __init__(self, subparsers: argparse._SubParsersAction):
super().__init__()
self.parser = subparsers.add_parser(
"inference",
prog="llama inference",
description="Run inference on a llama model",
epilog=textwrap.dedent(
"""
Example:
llama inference start <options>
"""
),
)
subparsers = self.parser.add_subparsers(title="inference_subcommands")
# Add sub-commandsa
InferenceStart.create(subparsers)
InferenceConfigure.create(subparsers)

View file

@ -0,0 +1,54 @@
import argparse
import textwrap
from llama_toolchain.cli.subcommand import Subcommand
from llama_toolchain.inference.server import main as inference_server_init
class InferenceStart(Subcommand):
"""Llama Inference cli for starting inference server"""
def __init__(self, subparsers: argparse._SubParsersAction):
super().__init__()
self.parser = subparsers.add_parser(
"start",
prog="llama inference start",
description="Start an inference server",
epilog=textwrap.dedent(
"""
Example:
llama inference start <options>
"""
),
formatter_class=argparse.RawTextHelpFormatter,
)
self._add_arguments()
self.parser.set_defaults(func=self._run_inference_start_cmd)
def _add_arguments(self):
self.parser.add_argument(
"--port",
type=int,
help="Port to run the server on. Defaults to 5000",
default=5000,
)
self.parser.add_argument(
"--disable-ipv6",
action="store_true",
help="Disable IPv6 support",
default=False,
)
self.parser.add_argument(
"--config",
type=str,
help="Path to config file",
default="inference"
)
def _run_inference_start_cmd(self, args: argparse.Namespace) -> None:
inference_server_init(
config_path=args.config,
port=args.port,
disable_ipv6=args.disable_ipv6,
)