mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 10:54:19 +00:00
# What does this PR do? This PR brings back the facility to not force registration of resources onto the user. This is not just annoying but actually not feasible sometimes. For example, you may have a Stack which boots up with private providers for inference for models A and B. There is no way for the user to actually know which model is being served by these providers now (to be able to register it.) How will this avoid the users needing to do registration? In a follow-up diff, I will make sure I update the sample run.yaml files so they list the models served by the distributions explicitly. So when users do `llama stack build --template <...>` and run it, their distributions come up with the right set of models they expect. For self-hosted distributions, it also allows us to have a place to explicit list the models that need to be served to make the "complete" stack (including safety, e.g.) ## Test Plan Started ollama locally with two lightweight models: Llama3.2-3B-Instruct and Llama-Guard-3-1B. Updated all the tests including agents. Here's the tests I ran so far: ```bash pytest -s -v -m "fireworks and llama_3b" test_text_inference.py::TestInference \ --env FIREWORKS_API_KEY=... pytest -s -v -m "ollama and llama_3b" test_text_inference.py::TestInference pytest -s -v -m ollama test_safety.py pytest -s -v -m faiss test_memory.py pytest -s -v -m ollama test_agents.py \ --inference-model=Llama3.2-3B-Instruct --safety-model=Llama-Guard-3-1B ``` Found a few bugs here and there pre-existing that these test runs fixed.
87 lines
2.8 KiB
Python
87 lines
2.8 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described found in the
|
|
# LICENSE file in the root directory of this source tree.
|
|
|
|
from datetime import datetime
|
|
from pathlib import Path
|
|
|
|
import fire
|
|
import yaml
|
|
|
|
from llama_models import schema_utils
|
|
|
|
from .pyopenapi.options import Options
|
|
from .pyopenapi.specification import Info, Server
|
|
from .pyopenapi.utility import Specification
|
|
|
|
# We do some monkey-patching to ensure our definitions only use the minimal
|
|
# (json_schema_type, webmethod) definitions from the llama_models package. For
|
|
# generation though, we need the full definitions and implementations from the
|
|
# (json-strong-typing) package.
|
|
|
|
from .strong_typing.schema import json_schema_type
|
|
|
|
schema_utils.json_schema_type = json_schema_type
|
|
|
|
from llama_stack.distribution.stack import LlamaStack
|
|
|
|
|
|
# TODO: this should be fixed in the generator itself so it reads appropriate annotations
|
|
STREAMING_ENDPOINTS = [
|
|
"/agents/turn/create",
|
|
"/inference/chat_completion",
|
|
]
|
|
|
|
|
|
def patch_sse_stream_responses(spec: Specification):
|
|
for path, path_item in spec.document.paths.items():
|
|
if path in STREAMING_ENDPOINTS:
|
|
content = path_item.post.responses["200"].content.pop("application/json")
|
|
path_item.post.responses["200"].content["text/event-stream"] = content
|
|
|
|
|
|
def main(output_dir: str):
|
|
output_dir = Path(output_dir)
|
|
if not output_dir.exists():
|
|
raise ValueError(f"Directory {output_dir} does not exist")
|
|
|
|
now = str(datetime.now())
|
|
print(
|
|
"Converting the spec to YAML (openapi.yaml) and HTML (openapi.html) at " + now
|
|
)
|
|
print("")
|
|
spec = Specification(
|
|
LlamaStack,
|
|
Options(
|
|
server=Server(url="http://any-hosted-llama-stack.com"),
|
|
info=Info(
|
|
title="[DRAFT] Llama Stack Specification",
|
|
version="0.0.1",
|
|
description="""This is the specification of the llama stack that provides
|
|
a set of endpoints and their corresponding interfaces that are tailored to
|
|
best leverage Llama Models. The specification is still in draft and subject to change.
|
|
Generated at """
|
|
+ now,
|
|
),
|
|
),
|
|
)
|
|
|
|
patch_sse_stream_responses(spec)
|
|
|
|
with open(output_dir / "llama-stack-spec.yaml", "w", encoding="utf-8") as fp:
|
|
yaml.dump(spec.get_json(), fp, allow_unicode=True)
|
|
|
|
with open(output_dir / "llama-stack-spec.html", "w") as fp:
|
|
spec.write_html(fp, pretty_print=True)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
fire.Fire(main)
|