mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
better
This commit is contained in:
parent
14372ddb0b
commit
46715e7889
4 changed files with 35 additions and 44 deletions
|
|
@ -153,7 +153,7 @@ providers:
|
||||||
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/ci-tests/files}
|
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/ci-tests/files}
|
||||||
metadata_store:
|
metadata_store:
|
||||||
table_name: files_metadata
|
table_name: files_metadata
|
||||||
backend: sql_postgres
|
backend: sql_default
|
||||||
safety:
|
safety:
|
||||||
- provider_id: llama-guard
|
- provider_id: llama-guard
|
||||||
provider_type: inline::llama-guard
|
provider_type: inline::llama-guard
|
||||||
|
|
@ -234,10 +234,10 @@ providers:
|
||||||
config:
|
config:
|
||||||
kvstore:
|
kvstore:
|
||||||
namespace: batches
|
namespace: batches
|
||||||
backend: kv_postgres
|
backend: kv_default
|
||||||
storage:
|
storage:
|
||||||
backends:
|
backends:
|
||||||
kv_postgres:
|
kv_default:
|
||||||
type: kv_postgres
|
type: kv_postgres
|
||||||
host: ${env.POSTGRES_HOST:=localhost}
|
host: ${env.POSTGRES_HOST:=localhost}
|
||||||
port: ${env.POSTGRES_PORT:=5432}
|
port: ${env.POSTGRES_PORT:=5432}
|
||||||
|
|
@ -245,7 +245,7 @@ storage:
|
||||||
user: ${env.POSTGRES_USER:=llamastack}
|
user: ${env.POSTGRES_USER:=llamastack}
|
||||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||||
table_name: ${env.POSTGRES_TABLE_NAME:=llamastack_kvstore}
|
table_name: ${env.POSTGRES_TABLE_NAME:=llamastack_kvstore}
|
||||||
sql_postgres:
|
sql_default:
|
||||||
type: sql_postgres
|
type: sql_postgres
|
||||||
host: ${env.POSTGRES_HOST:=localhost}
|
host: ${env.POSTGRES_HOST:=localhost}
|
||||||
port: ${env.POSTGRES_PORT:=5432}
|
port: ${env.POSTGRES_PORT:=5432}
|
||||||
|
|
@ -255,18 +255,18 @@ storage:
|
||||||
stores:
|
stores:
|
||||||
metadata:
|
metadata:
|
||||||
namespace: registry
|
namespace: registry
|
||||||
backend: kv_postgres
|
backend: kv_default
|
||||||
inference:
|
inference:
|
||||||
table_name: inference_store
|
table_name: inference_store
|
||||||
backend: sql_postgres
|
backend: sql_default
|
||||||
max_write_queue_size: 10000
|
max_write_queue_size: 10000
|
||||||
num_writers: 4
|
num_writers: 4
|
||||||
conversations:
|
conversations:
|
||||||
table_name: openai_conversations
|
table_name: openai_conversations
|
||||||
backend: sql_postgres
|
backend: sql_default
|
||||||
prompts:
|
prompts:
|
||||||
namespace: prompts
|
namespace: prompts
|
||||||
backend: kv_postgres
|
backend: kv_default
|
||||||
registered_resources:
|
registered_resources:
|
||||||
models: []
|
models: []
|
||||||
shields: []
|
shields: []
|
||||||
|
|
|
||||||
|
|
@ -153,7 +153,7 @@ providers:
|
||||||
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter-gpu/files}
|
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter-gpu/files}
|
||||||
metadata_store:
|
metadata_store:
|
||||||
table_name: files_metadata
|
table_name: files_metadata
|
||||||
backend: sql_postgres
|
backend: sql_default
|
||||||
safety:
|
safety:
|
||||||
- provider_id: llama-guard
|
- provider_id: llama-guard
|
||||||
provider_type: inline::llama-guard
|
provider_type: inline::llama-guard
|
||||||
|
|
@ -237,10 +237,10 @@ providers:
|
||||||
config:
|
config:
|
||||||
kvstore:
|
kvstore:
|
||||||
namespace: batches
|
namespace: batches
|
||||||
backend: kv_postgres
|
backend: kv_default
|
||||||
storage:
|
storage:
|
||||||
backends:
|
backends:
|
||||||
kv_postgres:
|
kv_default:
|
||||||
type: kv_postgres
|
type: kv_postgres
|
||||||
host: ${env.POSTGRES_HOST:=localhost}
|
host: ${env.POSTGRES_HOST:=localhost}
|
||||||
port: ${env.POSTGRES_PORT:=5432}
|
port: ${env.POSTGRES_PORT:=5432}
|
||||||
|
|
@ -248,7 +248,7 @@ storage:
|
||||||
user: ${env.POSTGRES_USER:=llamastack}
|
user: ${env.POSTGRES_USER:=llamastack}
|
||||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||||
table_name: ${env.POSTGRES_TABLE_NAME:=llamastack_kvstore}
|
table_name: ${env.POSTGRES_TABLE_NAME:=llamastack_kvstore}
|
||||||
sql_postgres:
|
sql_default:
|
||||||
type: sql_postgres
|
type: sql_postgres
|
||||||
host: ${env.POSTGRES_HOST:=localhost}
|
host: ${env.POSTGRES_HOST:=localhost}
|
||||||
port: ${env.POSTGRES_PORT:=5432}
|
port: ${env.POSTGRES_PORT:=5432}
|
||||||
|
|
@ -258,18 +258,18 @@ storage:
|
||||||
stores:
|
stores:
|
||||||
metadata:
|
metadata:
|
||||||
namespace: registry
|
namespace: registry
|
||||||
backend: kv_postgres
|
backend: kv_default
|
||||||
inference:
|
inference:
|
||||||
table_name: inference_store
|
table_name: inference_store
|
||||||
backend: sql_postgres
|
backend: sql_default
|
||||||
max_write_queue_size: 10000
|
max_write_queue_size: 10000
|
||||||
num_writers: 4
|
num_writers: 4
|
||||||
conversations:
|
conversations:
|
||||||
table_name: openai_conversations
|
table_name: openai_conversations
|
||||||
backend: sql_postgres
|
backend: sql_default
|
||||||
prompts:
|
prompts:
|
||||||
namespace: prompts
|
namespace: prompts
|
||||||
backend: kv_postgres
|
backend: kv_default
|
||||||
registered_resources:
|
registered_resources:
|
||||||
models: []
|
models: []
|
||||||
shields: []
|
shields: []
|
||||||
|
|
|
||||||
|
|
@ -153,7 +153,7 @@ providers:
|
||||||
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files}
|
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files}
|
||||||
metadata_store:
|
metadata_store:
|
||||||
table_name: files_metadata
|
table_name: files_metadata
|
||||||
backend: sql_postgres
|
backend: sql_default
|
||||||
safety:
|
safety:
|
||||||
- provider_id: llama-guard
|
- provider_id: llama-guard
|
||||||
provider_type: inline::llama-guard
|
provider_type: inline::llama-guard
|
||||||
|
|
@ -234,10 +234,10 @@ providers:
|
||||||
config:
|
config:
|
||||||
kvstore:
|
kvstore:
|
||||||
namespace: batches
|
namespace: batches
|
||||||
backend: kv_postgres
|
backend: kv_default
|
||||||
storage:
|
storage:
|
||||||
backends:
|
backends:
|
||||||
kv_postgres:
|
kv_default:
|
||||||
type: kv_postgres
|
type: kv_postgres
|
||||||
host: ${env.POSTGRES_HOST:=localhost}
|
host: ${env.POSTGRES_HOST:=localhost}
|
||||||
port: ${env.POSTGRES_PORT:=5432}
|
port: ${env.POSTGRES_PORT:=5432}
|
||||||
|
|
@ -245,7 +245,7 @@ storage:
|
||||||
user: ${env.POSTGRES_USER:=llamastack}
|
user: ${env.POSTGRES_USER:=llamastack}
|
||||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||||
table_name: ${env.POSTGRES_TABLE_NAME:=llamastack_kvstore}
|
table_name: ${env.POSTGRES_TABLE_NAME:=llamastack_kvstore}
|
||||||
sql_postgres:
|
sql_default:
|
||||||
type: sql_postgres
|
type: sql_postgres
|
||||||
host: ${env.POSTGRES_HOST:=localhost}
|
host: ${env.POSTGRES_HOST:=localhost}
|
||||||
port: ${env.POSTGRES_PORT:=5432}
|
port: ${env.POSTGRES_PORT:=5432}
|
||||||
|
|
@ -255,18 +255,18 @@ storage:
|
||||||
stores:
|
stores:
|
||||||
metadata:
|
metadata:
|
||||||
namespace: registry
|
namespace: registry
|
||||||
backend: kv_postgres
|
backend: kv_default
|
||||||
inference:
|
inference:
|
||||||
table_name: inference_store
|
table_name: inference_store
|
||||||
backend: sql_postgres
|
backend: sql_default
|
||||||
max_write_queue_size: 10000
|
max_write_queue_size: 10000
|
||||||
num_writers: 4
|
num_writers: 4
|
||||||
conversations:
|
conversations:
|
||||||
table_name: openai_conversations
|
table_name: openai_conversations
|
||||||
backend: sql_postgres
|
backend: sql_default
|
||||||
prompts:
|
prompts:
|
||||||
namespace: prompts
|
namespace: prompts
|
||||||
backend: kv_postgres
|
backend: kv_default
|
||||||
registered_resources:
|
registered_resources:
|
||||||
models: []
|
models: []
|
||||||
shields: []
|
shields: []
|
||||||
|
|
|
||||||
|
|
@ -160,15 +160,6 @@ def get_distribution_template(name: str = "starter") -> DistributionTemplate:
|
||||||
provider_type="inline::localfs",
|
provider_type="inline::localfs",
|
||||||
config=files_config,
|
config=files_config,
|
||||||
)
|
)
|
||||||
postgres_files_config = dict(files_config)
|
|
||||||
postgres_metadata_store = dict(postgres_files_config.get("metadata_store", {}))
|
|
||||||
postgres_metadata_store["backend"] = "sql_postgres"
|
|
||||||
postgres_files_config["metadata_store"] = postgres_metadata_store
|
|
||||||
postgres_files_provider = Provider(
|
|
||||||
provider_id="meta-reference-files",
|
|
||||||
provider_type="inline::localfs",
|
|
||||||
config=postgres_files_config,
|
|
||||||
)
|
|
||||||
embedding_provider = Provider(
|
embedding_provider = Provider(
|
||||||
provider_id="sentence-transformers",
|
provider_id="sentence-transformers",
|
||||||
provider_type="inline::sentence-transformers",
|
provider_type="inline::sentence-transformers",
|
||||||
|
|
@ -197,7 +188,8 @@ def get_distribution_template(name: str = "starter") -> DistributionTemplate:
|
||||||
provider_shield_id="${env.CODE_SCANNER_MODEL:=}",
|
provider_shield_id="${env.CODE_SCANNER_MODEL:=}",
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
postgres_config = PostgresSqlStoreConfig.sample_run_config()
|
postgres_sql_config = PostgresSqlStoreConfig.sample_run_config()
|
||||||
|
postgres_kv_config = PostgresKVStoreConfig.sample_run_config()
|
||||||
default_overrides = {
|
default_overrides = {
|
||||||
"inference": remote_inference_providers + [embedding_provider],
|
"inference": remote_inference_providers + [embedding_provider],
|
||||||
"vector_io": [
|
"vector_io": [
|
||||||
|
|
@ -282,14 +274,13 @@ def get_distribution_template(name: str = "starter") -> DistributionTemplate:
|
||||||
"run-with-postgres-store.yaml": RunConfigSettings(
|
"run-with-postgres-store.yaml": RunConfigSettings(
|
||||||
provider_overrides={
|
provider_overrides={
|
||||||
**default_overrides,
|
**default_overrides,
|
||||||
"files": [postgres_files_provider],
|
|
||||||
"agents": [
|
"agents": [
|
||||||
Provider(
|
Provider(
|
||||||
provider_id="meta-reference",
|
provider_id="meta-reference",
|
||||||
provider_type="inline::meta-reference",
|
provider_type="inline::meta-reference",
|
||||||
config=dict(
|
config=dict(
|
||||||
persistence_store=postgres_config,
|
persistence_store=postgres_sql_config,
|
||||||
responses_store=postgres_config,
|
responses_store=postgres_sql_config,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
],
|
],
|
||||||
|
|
@ -299,7 +290,7 @@ def get_distribution_template(name: str = "starter") -> DistributionTemplate:
|
||||||
provider_type="inline::reference",
|
provider_type="inline::reference",
|
||||||
config=dict(
|
config=dict(
|
||||||
kvstore=KVStoreReference(
|
kvstore=KVStoreReference(
|
||||||
backend="kv_postgres",
|
backend="kv_default",
|
||||||
namespace="batches",
|
namespace="batches",
|
||||||
).model_dump(exclude_none=True),
|
).model_dump(exclude_none=True),
|
||||||
),
|
),
|
||||||
|
|
@ -307,24 +298,24 @@ def get_distribution_template(name: str = "starter") -> DistributionTemplate:
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
storage_backends={
|
storage_backends={
|
||||||
"kv_postgres": PostgresKVStoreConfig.sample_run_config(),
|
"kv_default": postgres_kv_config,
|
||||||
"sql_postgres": postgres_config,
|
"sql_default": postgres_sql_config,
|
||||||
},
|
},
|
||||||
storage_stores={
|
storage_stores={
|
||||||
"metadata": KVStoreReference(
|
"metadata": KVStoreReference(
|
||||||
backend="kv_postgres",
|
backend="kv_default",
|
||||||
namespace="registry",
|
namespace="registry",
|
||||||
).model_dump(exclude_none=True),
|
).model_dump(exclude_none=True),
|
||||||
"inference": InferenceStoreReference(
|
"inference": InferenceStoreReference(
|
||||||
backend="sql_postgres",
|
backend="sql_default",
|
||||||
table_name="inference_store",
|
table_name="inference_store",
|
||||||
).model_dump(exclude_none=True),
|
).model_dump(exclude_none=True),
|
||||||
"conversations": SqlStoreReference(
|
"conversations": SqlStoreReference(
|
||||||
backend="sql_postgres",
|
backend="sql_default",
|
||||||
table_name="openai_conversations",
|
table_name="openai_conversations",
|
||||||
).model_dump(exclude_none=True),
|
).model_dump(exclude_none=True),
|
||||||
"prompts": KVStoreReference(
|
"prompts": KVStoreReference(
|
||||||
backend="kv_postgres",
|
backend="kv_default",
|
||||||
namespace="prompts",
|
namespace="prompts",
|
||||||
).model_dump(exclude_none=True),
|
).model_dump(exclude_none=True),
|
||||||
},
|
},
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue