version: '2' built_at: '2024-10-08T17:40:45.325529' image_name: local docker_image: null conda_env: local apis: - shields - agents - models - memory - memory_banks - inference - safety providers: inference: - provider_id: fireworks0 provider_type: remote::fireworks config: url: https://api.fireworks.ai/inference # api_key: safety: safety: - provider_id: meta0 provider_type: inline::llama-guard config: model: Llama-Guard-3-1B excluded_categories: [] - provider_id: meta1 provider_type: inline::prompt-guard config: model: Prompt-Guard-86M memory: - provider_id: meta0 provider_type: meta-reference config: {} # Uncomment to use weaviate memory provider # - provider_id: weaviate0 # provider_type: remote::weaviate # config: {} agents: - provider_id: meta0 provider_type: meta-reference config: persistence_store: namespace: null type: sqlite db_path: ~/.llama/runtime/kvstore.db telemetry: - provider_id: meta0 provider_type: meta-reference config: {}