mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
* remove configure from build * remove config from build * configure to regenerate file * update memory providers * remove comments * udpate build script * add reedme * update doc * rename getting started * update build cli * update docker build script * configure update * clean up configure * [tmp fix] hardware requirement tmp fix * clean up build * fix configure * add example build files for conda & docker * remove resolve_distribution_spec * remove available_distribution_specs * example build files * update example build files * more clean up on build * add name args to override name * move distribution to yaml files * generate distribution specs * getting started guide * getting started * add build yaml to Dockerfile * cleanup distribution_dependencies * configure from docker image name * build relative paths * minor comment * getting started * Update getting_started.md * Update getting_started.md * address comments, configure within docker file * remove distribution types! * update getting started * update documentation * remove listing distribution * minor heading * address nits, remove docker_image=null * gitignore
83 lines
2.2 KiB
Python
83 lines
2.2 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import importlib
|
|
import inspect
|
|
from typing import Dict, List
|
|
|
|
from llama_toolchain.agentic_system.api import AgenticSystem
|
|
from llama_toolchain.inference.api import Inference
|
|
from llama_toolchain.memory.api import Memory
|
|
from llama_toolchain.safety.api import Safety
|
|
from llama_toolchain.telemetry.api import Telemetry
|
|
|
|
from .datatypes import (
|
|
Api,
|
|
ApiEndpoint,
|
|
DistributionSpec,
|
|
InlineProviderSpec,
|
|
ProviderSpec,
|
|
remote_provider_spec,
|
|
)
|
|
|
|
# These are the dependencies needed by the distribution server.
|
|
# `llama-toolchain` is automatically installed by the installation script.
|
|
SERVER_DEPENDENCIES = [
|
|
"fastapi",
|
|
"uvicorn",
|
|
]
|
|
|
|
|
|
def stack_apis() -> List[Api]:
|
|
return [v for v in Api]
|
|
|
|
|
|
def api_endpoints() -> Dict[Api, List[ApiEndpoint]]:
|
|
apis = {}
|
|
|
|
protocols = {
|
|
Api.inference: Inference,
|
|
Api.safety: Safety,
|
|
Api.agentic_system: AgenticSystem,
|
|
Api.memory: Memory,
|
|
Api.telemetry: Telemetry,
|
|
}
|
|
|
|
for api, protocol in protocols.items():
|
|
endpoints = []
|
|
protocol_methods = inspect.getmembers(protocol, predicate=inspect.isfunction)
|
|
|
|
for name, method in protocol_methods:
|
|
if not hasattr(method, "__webmethod__"):
|
|
continue
|
|
|
|
webmethod = method.__webmethod__
|
|
route = webmethod.route
|
|
|
|
if webmethod.method == "GET":
|
|
method = "get"
|
|
elif webmethod.method == "DELETE":
|
|
method = "delete"
|
|
else:
|
|
method = "post"
|
|
endpoints.append(ApiEndpoint(route=route, method=method, name=name))
|
|
|
|
apis[api] = endpoints
|
|
|
|
return apis
|
|
|
|
|
|
def api_providers() -> Dict[Api, Dict[str, ProviderSpec]]:
|
|
ret = {}
|
|
for api in stack_apis():
|
|
name = api.name.lower()
|
|
module = importlib.import_module(f"llama_toolchain.{name}.providers")
|
|
ret[api] = {
|
|
"remote": remote_provider_spec(api),
|
|
**{a.provider_type: a for a in module.available_providers()},
|
|
}
|
|
|
|
return ret
|