mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-11 03:46:03 +00:00
Make sure we always serve the routing table APIs if the corresponding router APIs are being served
This commit is contained in:
parent
a5d7caf21b
commit
8be385c994
2 changed files with 10 additions and 4 deletions
|
|
@ -26,6 +26,8 @@ from pydantic import BaseModel, ValidationError
|
|||
from termcolor import cprint
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from llama_stack.distribution.distribution import builtin_automatically_routed_apis
|
||||
|
||||
from llama_stack.providers.utils.telemetry.tracing import (
|
||||
end_trace,
|
||||
setup_logger,
|
||||
|
|
@ -285,6 +287,10 @@ def main(
|
|||
else:
|
||||
apis_to_serve = set(impls.keys())
|
||||
|
||||
for inf in builtin_automatically_routed_apis():
|
||||
if inf.router_api.value in apis_to_serve:
|
||||
apis_to_serve.add(inf.routing_table_api.value)
|
||||
|
||||
apis_to_serve.add("inspect")
|
||||
for api_str in apis_to_serve:
|
||||
api = Api(api_str)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue