mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-16 20:52:37 +00:00
The Self-Hosted Distribution documentation contain steps to start llama server via conda environment. If the user generates conda environment using llaama build command and template, it generate an environment with name of the distribution and not defaulyt name of local as per the example run yaml file. It will thereore fail when user tried to run the server. This PR fixes the conda env name in the run yaml file. Signed-off-by: Martin Hickey <martin.hickey@ie.ibm.com>
51 lines
1.1 KiB
YAML
51 lines
1.1 KiB
YAML
version: '2'
|
|
built_at: '2024-10-08T17:40:45.325529'
|
|
image_name: local
|
|
docker_image: null
|
|
conda_env: fireworks
|
|
apis:
|
|
- shields
|
|
- agents
|
|
- models
|
|
- memory
|
|
- memory_banks
|
|
- inference
|
|
- safety
|
|
providers:
|
|
inference:
|
|
- provider_id: fireworks0
|
|
provider_type: remote::fireworks
|
|
config:
|
|
url: https://api.fireworks.ai/inference
|
|
# api_key: <ENTER_YOUR_API_KEY>
|
|
safety:
|
|
safety:
|
|
- provider_id: meta0
|
|
provider_type: inline::llama-guard
|
|
config:
|
|
model: Llama-Guard-3-1B
|
|
excluded_categories: []
|
|
- provider_id: meta1
|
|
provider_type: inline::prompt-guard
|
|
config:
|
|
model: Prompt-Guard-86M
|
|
memory:
|
|
- provider_id: meta0
|
|
provider_type: inline::meta-reference
|
|
config: {}
|
|
# Uncomment to use weaviate memory provider
|
|
# - provider_id: weaviate0
|
|
# provider_type: remote::weaviate
|
|
# config: {}
|
|
agents:
|
|
- provider_id: meta0
|
|
provider_type: inline::meta-reference
|
|
config:
|
|
persistence_store:
|
|
namespace: null
|
|
type: sqlite
|
|
db_path: ~/.llama/runtime/kvstore.db
|
|
telemetry:
|
|
- provider_id: meta0
|
|
provider_type: inline::meta-reference
|
|
config: {}
|