mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 20:14:13 +00:00
Remove inference uvicorn server entrypoint and llama inference CLI command
This commit is contained in:
parent
3bc827cd5f
commit
d3e269fcf2
3 changed files with 1 additions and 179 deletions
|
@ -8,7 +8,6 @@ import argparse
|
|||
import textwrap
|
||||
|
||||
from llama_toolchain.cli.inference.configure import InferenceConfigure
|
||||
from llama_toolchain.cli.inference.start import InferenceStart
|
||||
from llama_toolchain.cli.subcommand import Subcommand
|
||||
|
||||
|
||||
|
@ -31,6 +30,5 @@ class InferenceParser(Subcommand):
|
|||
|
||||
subparsers = self.parser.add_subparsers(title="inference_subcommands")
|
||||
|
||||
# Add sub-commandsa
|
||||
InferenceStart.create(subparsers)
|
||||
# Add sub-commands
|
||||
InferenceConfigure.create(subparsers)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue