mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 16:32:38 +00:00
all distros
This commit is contained in:
parent
a097bfa761
commit
7103892f54
16 changed files with 79 additions and 62 deletions
|
|
@ -25,12 +25,6 @@ providers:
|
|||
type: sqlite
|
||||
namespace: null
|
||||
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/faiss_store.db
|
||||
- provider_id: chromadb
|
||||
provider_type: remote::chromadb
|
||||
config: {}
|
||||
- provider_id: pgvector
|
||||
provider_type: remote::pgvector
|
||||
config: {}
|
||||
safety:
|
||||
- provider_id: llama-guard
|
||||
provider_type: inline::llama-guard
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
from pathlib import Path
|
||||
|
||||
from llama_stack.distribution.datatypes import ModelInput, Provider, ShieldInput
|
||||
from llama_stack.providers.inline.memory.faiss.config import FaissImplConfig
|
||||
from llama_stack.providers.remote.inference.tgi import TGIImplConfig
|
||||
from llama_stack.templates.template import DistributionTemplate, RunConfigSettings
|
||||
|
||||
|
|
@ -22,7 +23,7 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
"datasetio": ["remote::huggingface", "inline::localfs"],
|
||||
"scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"],
|
||||
}
|
||||
|
||||
name = "tgi"
|
||||
inference_provider = Provider(
|
||||
provider_id="tgi-inference",
|
||||
provider_type="remote::tgi",
|
||||
|
|
@ -30,6 +31,11 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
url="${env.TGI_URL}",
|
||||
),
|
||||
)
|
||||
memory_provider = Provider(
|
||||
provider_id="faiss",
|
||||
provider_type="inline::faiss",
|
||||
config=FaissImplConfig.sample_run_config(f"distributions/{name}"),
|
||||
)
|
||||
|
||||
inference_model = ModelInput(
|
||||
model_id="${env.INFERENCE_MODEL}",
|
||||
|
|
@ -41,7 +47,7 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
)
|
||||
|
||||
return DistributionTemplate(
|
||||
name="tgi",
|
||||
name=name,
|
||||
distro_type="self_hosted",
|
||||
description="Use (an external) TGI server for running LLM inference",
|
||||
docker_image=None,
|
||||
|
|
@ -52,6 +58,7 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
"run.yaml": RunConfigSettings(
|
||||
provider_overrides={
|
||||
"inference": [inference_provider],
|
||||
"memory": [memory_provider],
|
||||
},
|
||||
default_models=[inference_model],
|
||||
),
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue