forked from phoenix-oss/llama-stack-mirror
## What does this PR do?
This is a long-pending change and particularly important to get done
now.
Specifically:
- we cannot "localize" (aka download) any URLs from media attachments
anywhere near our modeling code. it must be done within llama-stack.
- `PIL.Image` is infesting all our APIs via `ImageMedia ->
InterleavedTextMedia` and that cannot be right at all. Anything in the
API surface must be "naturally serializable". We need a standard `{
type: "image", image_url: "<...>" }` which is more extensible
- `UserMessage`, `SystemMessage`, etc. are moved completely to
llama-stack from the llama-models repository.
See https://github.com/meta-llama/llama-models/pull/244 for the
corresponding PR in llama-models.
## Test Plan
```bash
cd llama_stack/providers/tests
pytest -s -v -k "fireworks or ollama or together" inference/test_vision_inference.py
pytest -s -v -k "(fireworks or ollama or together) and llama_3b" inference/test_text_inference.py
pytest -s -v -k chroma memory/test_memory.py \
--env EMBEDDING_DIMENSION=384 --env CHROMA_DB_PATH=/tmp/foobar
pytest -s -v -k fireworks agents/test_agents.py \
--safety-shield=meta-llama/Llama-Guard-3-8B \
--inference-model=meta-llama/Llama-3.1-8B-Instruct
```
Updated the client sdk (see PR ...), installed the SDK in the same
environment and then ran the SDK tests:
```bash
cd tests/client-sdk
LLAMA_STACK_CONFIG=together pytest -s -v agents/test_agents.py
LLAMA_STACK_CONFIG=ollama pytest -s -v memory/test_memory.py
# this one needed a bit of hacking in the run.yaml to ensure I could register the vision model correctly
INFERENCE_MODEL=llama3.2-vision:latest LLAMA_STACK_CONFIG=ollama pytest -s -v inference/test_inference.py
```
71 lines
2.4 KiB
Python
71 lines
2.4 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described found in the
|
|
# LICENSE file in the root directory of this source tree.
|
|
|
|
from datetime import datetime
|
|
from pathlib import Path
|
|
|
|
import fire
|
|
import yaml
|
|
|
|
from llama_models import schema_utils
|
|
|
|
# We do some monkey-patching to ensure our definitions only use the minimal
|
|
# (json_schema_type, webmethod) definitions from the llama_models package. For
|
|
# generation though, we need the full definitions and implementations from the
|
|
# (json-strong-typing) package.
|
|
|
|
from .strong_typing.schema import json_schema_type, register_schema
|
|
|
|
schema_utils.json_schema_type = json_schema_type
|
|
schema_utils.register_schema = register_schema
|
|
|
|
from llama_stack.apis.version import LLAMA_STACK_API_VERSION # noqa: E402
|
|
from llama_stack.distribution.stack import LlamaStack # noqa: E402
|
|
|
|
from .pyopenapi.options import Options # noqa: E402
|
|
from .pyopenapi.specification import Info, Server # noqa: E402
|
|
from .pyopenapi.utility import Specification # noqa: E402
|
|
|
|
|
|
def main(output_dir: str):
|
|
output_dir = Path(output_dir)
|
|
if not output_dir.exists():
|
|
raise ValueError(f"Directory {output_dir} does not exist")
|
|
|
|
now = str(datetime.now())
|
|
print(
|
|
"Converting the spec to YAML (openapi.yaml) and HTML (openapi.html) at " + now
|
|
)
|
|
print("")
|
|
spec = Specification(
|
|
LlamaStack,
|
|
Options(
|
|
server=Server(url="http://any-hosted-llama-stack.com"),
|
|
info=Info(
|
|
title="Llama Stack Specification",
|
|
version=LLAMA_STACK_API_VERSION,
|
|
description="""This is the specification of the Llama Stack that provides
|
|
a set of endpoints and their corresponding interfaces that are tailored to
|
|
best leverage Llama Models.""",
|
|
),
|
|
),
|
|
)
|
|
|
|
with open(output_dir / "llama-stack-spec.yaml", "w", encoding="utf-8") as fp:
|
|
yaml.dump(spec.get_json(), fp, allow_unicode=True)
|
|
|
|
with open(output_dir / "llama-stack-spec.html", "w") as fp:
|
|
spec.write_html(fp, pretty_print=True)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
fire.Fire(main)
|