ollama remote adapter works

This commit is contained in:
Ashwin Bharambe 2024-08-28 06:51:07 -07:00
parent 2076d2b6db
commit 2a1552a5eb
14 changed files with 196 additions and 128 deletions

View file

@ -10,6 +10,7 @@ from llama_toolchain.cli.subcommand import Subcommand
from .build import ApiBuild
from .configure import ApiConfigure
from .start import ApiStart
class ApiParser(Subcommand):
@ -26,3 +27,4 @@ class ApiParser(Subcommand):
# Add sub-commands
ApiBuild.create(subparsers)
ApiConfigure.create(subparsers)
ApiStart.create(subparsers)

View file

@ -7,9 +7,6 @@
import argparse
import json
import os
import random
import string
import uuid
from pydantic import BaseModel
from datetime import datetime
from enum import Enum
@ -25,10 +22,6 @@ from llama_toolchain.common.config_dirs import BUILDS_BASE_DIR
from llama_toolchain.distribution.datatypes import * # noqa: F403
def random_string():
return "".join(random.choices(string.ascii_letters + string.digits, k=8))
class BuildType(Enum):
container = "container"
conda_env = "conda_env"
@ -42,6 +35,8 @@ class Dependencies(BaseModel):
def get_dependencies(
provider: ProviderSpec, dependencies: Dict[str, ProviderSpec]
) -> Dependencies:
from llama_toolchain.distribution.distribution import SERVER_DEPENDENCIES
def _deps(provider: ProviderSpec) -> Tuple[List[str], Optional[str]]:
if isinstance(provider, InlineProviderSpec):
return provider.pip_packages, provider.docker_image
@ -60,7 +55,9 @@ def get_dependencies(
pip_packages.extend(dep_pip_packages)
return Dependencies(docker_image=docker_image, pip_packages=pip_packages)
return Dependencies(
docker_image=docker_image, pip_packages=pip_packages + SERVER_DEPENDENCIES
)
def parse_dependencies(
@ -88,7 +85,6 @@ def parse_dependencies(
class ApiBuild(Subcommand):
def __init__(self, subparsers: argparse._SubParsersAction):
super().__init__()
self.parser = subparsers.add_parser(
@ -125,8 +121,8 @@ class ApiBuild(Subcommand):
self.parser.add_argument(
"--name",
type=str,
help="Name of the build target (image, conda env). Defaults to a random UUID",
required=False,
help="Name of the build target (image, conda env)",
required=True,
)
self.parser.add_argument(
"--type",
@ -153,11 +149,10 @@ class ApiBuild(Subcommand):
)
return
name = args.name or random_string()
if args.type == BuildType.container.value:
package_name = f"image-{args.provider}-{name}"
package_name = f"image-{args.provider}-{args.name}"
else:
package_name = f"env-{args.provider}-{name}"
package_name = f"env-{args.provider}-{args.name}"
package_name = package_name.replace("::", "-")
build_dir = BUILDS_BASE_DIR / args.api
@ -176,7 +171,7 @@ class ApiBuild(Subcommand):
}
with open(package_file, "w") as f:
c = PackageConfig(
built_at=str(datetime.now()),
built_at=datetime.now(),
package_name=package_name,
docker_image=(
package_name if args.type == BuildType.container.value else None

View file

@ -0,0 +1,83 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import argparse
from pathlib import Path
import pkg_resources
import yaml
from llama_toolchain.cli.subcommand import Subcommand
from llama_toolchain.distribution.datatypes import * # noqa: F403
class ApiStart(Subcommand):
def __init__(self, subparsers: argparse._SubParsersAction):
super().__init__()
self.parser = subparsers.add_parser(
"start",
prog="llama api start",
description="""start the server for a Llama API provider. You should have already built and configured the provider.""",
formatter_class=argparse.RawTextHelpFormatter,
)
self._add_arguments()
self.parser.set_defaults(func=self._run_api_start_cmd)
def _add_arguments(self):
self.parser.add_argument(
"--yaml-config",
type=str,
help="Yaml config containing the API build configuration",
required=True,
)
self.parser.add_argument(
"--port",
type=int,
help="Port to run the server on. Defaults to 5000",
default=5000,
)
self.parser.add_argument(
"--disable-ipv6",
action="store_true",
help="Disable IPv6 support",
default=False,
)
def _run_api_start_cmd(self, args: argparse.Namespace) -> None:
from llama_toolchain.common.exec import run_with_pty
config_file = Path(args.yaml_config)
if not config_file.exists():
self.parser.error(
f"Could not find {config_file}. Please run `llama api build` first"
)
return
with open(config_file, "r") as f:
config = PackageConfig(**yaml.safe_load(f))
if config.docker_image:
script = pkg_resources.resource_filename(
"llama_toolchain",
"distribution/start_container.sh",
)
run_args = [script, config.docker_image]
else:
script = pkg_resources.resource_filename(
"llama_toolchain",
"distribution/start_conda_env.sh",
)
run_args = [
script,
config.conda_env,
]
run_args.extend(["--yaml_config", str(config_file), "--port", str(args.port)])
if args.disable_ipv6:
run_args.append("--disable-ipv6")
run_with_pty(run_args)