mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-05 04:17:32 +00:00
Introduce a "Router" layer for providers
Some providers need to be factorized and considered as thin routing layers on top of other providers. Consider two examples: - The inference API should be a routing layer over inference providers, routed using the "model" key - The memory banks API is another instance where various memory bank types will be provided by independent providers (e.g., a vector store is served by Chroma while a keyvalue memory can be served by Redis or PGVector) This commit introduces a generalized routing layer for this purpose.
This commit is contained in:
parent
5c1f2616b5
commit
b6a3ef51da
12 changed files with 384 additions and 118 deletions
|
@ -105,13 +105,6 @@ class StackConfigure(Subcommand):
|
|||
image_name = build_config.name.replace("::", "-")
|
||||
run_config_file = builds_dir / f"{image_name}-run.yaml"
|
||||
|
||||
api2providers = build_config.distribution_spec.providers
|
||||
|
||||
stub_config = {
|
||||
api_str: {"provider_id": provider}
|
||||
for api_str, provider in api2providers.items()
|
||||
}
|
||||
|
||||
if run_config_file.exists():
|
||||
cprint(
|
||||
f"Configuration already exists for {build_config.name}. Will overwrite...",
|
||||
|
@ -123,10 +116,12 @@ class StackConfigure(Subcommand):
|
|||
config = StackRunConfig(
|
||||
built_at=datetime.now(),
|
||||
image_name=image_name,
|
||||
providers=stub_config,
|
||||
apis_to_serve=[],
|
||||
provider_map={},
|
||||
)
|
||||
|
||||
config.providers = configure_api_providers(config.providers)
|
||||
config = configure_api_providers(config, build_config.distribution_spec)
|
||||
|
||||
config.docker_image = (
|
||||
image_name if build_config.image_type == "docker" else None
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue