mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
update build cli
This commit is contained in:
parent
768ed09dec
commit
d1f0d17644
6 changed files with 109 additions and 117 deletions
|
@ -17,62 +17,43 @@ def available_distribution_specs() -> List[DistributionSpec]:
|
|||
distribution_type="local",
|
||||
description="Use code from `llama_toolchain` itself to serve all llama stack APIs",
|
||||
providers={
|
||||
Api.inference: "meta-reference",
|
||||
Api.memory: "meta-reference-faiss",
|
||||
Api.safety: "meta-reference",
|
||||
Api.agentic_system: "meta-reference",
|
||||
Api.telemetry: "console",
|
||||
},
|
||||
),
|
||||
DistributionSpec(
|
||||
distribution_type="remote",
|
||||
description="Point to remote services for all llama stack APIs",
|
||||
providers={
|
||||
**{x: "remote" for x in Api},
|
||||
Api.telemetry: "console",
|
||||
"inference": "meta-reference",
|
||||
"memory": "meta-reference-faiss",
|
||||
"safety": "meta-reference",
|
||||
"agentic_system": "meta-reference",
|
||||
"telemetry": "console",
|
||||
},
|
||||
),
|
||||
DistributionSpec(
|
||||
distribution_type="local-ollama",
|
||||
description="Like local, but use ollama for running LLM inference",
|
||||
providers={
|
||||
Api.inference: remote_provider_type("ollama"),
|
||||
Api.safety: "meta-reference",
|
||||
Api.agentic_system: "meta-reference",
|
||||
Api.memory: "meta-reference-faiss",
|
||||
Api.telemetry: "console",
|
||||
"inference": remote_provider_type("ollama"),
|
||||
"safety": "meta-reference",
|
||||
"agentic_system": "meta-reference",
|
||||
"memory": "meta-reference-faiss",
|
||||
"telemetry": "console",
|
||||
},
|
||||
),
|
||||
DistributionSpec(
|
||||
distribution_type="local-plus-fireworks-inference",
|
||||
description="Use Fireworks.ai for running LLM inference",
|
||||
providers={
|
||||
Api.inference: remote_provider_type("fireworks"),
|
||||
Api.safety: "meta-reference",
|
||||
Api.agentic_system: "meta-reference",
|
||||
Api.memory: "meta-reference-faiss",
|
||||
Api.telemetry: "console",
|
||||
},
|
||||
),
|
||||
DistributionSpec(
|
||||
distribution_type="local-plus-together-inference",
|
||||
description="Use Together.ai for running LLM inference",
|
||||
providers={
|
||||
Api.inference: remote_provider_type("together"),
|
||||
Api.safety: "meta-reference",
|
||||
Api.agentic_system: "meta-reference",
|
||||
Api.memory: "meta-reference-faiss",
|
||||
Api.telemetry: "console",
|
||||
"inference": remote_provider_type("fireworks"),
|
||||
"safety": "meta-reference",
|
||||
"agentic_system": "meta-reference",
|
||||
"memory": "meta-reference-faiss",
|
||||
"telemetry": "console",
|
||||
},
|
||||
),
|
||||
DistributionSpec(
|
||||
distribution_type="local-plus-tgi-inference",
|
||||
description="Use TGI for running LLM inference",
|
||||
providers={
|
||||
Api.inference: remote_provider_type("tgi"),
|
||||
Api.safety: "meta-reference",
|
||||
Api.agentic_system: "meta-reference",
|
||||
Api.memory: "meta-reference-faiss",
|
||||
"inference": remote_provider_type("tgi"),
|
||||
"safety": "meta-reference",
|
||||
"agentic_system": "meta-reference",
|
||||
"memory": "meta-reference-faiss",
|
||||
},
|
||||
),
|
||||
]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue