version: '2' built_at: '2024-10-08T17:40:45.325529' image_name: local docker_image: null conda_env: local apis: - shields - agents - models - memory - memory_banks - inference - safety providers: inference: - provider_id: inference0 provider_type: inline::meta-reference config: model: Llama3.2-3B-Instruct quantization: null torch_seed: null max_seq_len: 4096 max_batch_size: 1 - provider_id: inference1 provider_type: inline::meta-reference config: model: Llama-Guard-3-1B quantization: null torch_seed: null max_seq_len: 2048 max_batch_size: 1 safety: - provider_id: meta0 provider_type: inline::llama-guard config: model: Llama-Guard-3-1B excluded_categories: [] - provider_id: meta1 provider_type: inline::prompt-guard config: model: Prompt-Guard-86M # Uncomment to use prompt guard # prompt_guard_shield: # model: Prompt-Guard-86M memory: - provider_id: meta0 provider_type: inline::meta-reference config: {} # Uncomment to use pgvector # - provider_id: pgvector # provider_type: remote::pgvector # config: # host: 127.0.0.1 # port: 5432 # db: postgres # user: postgres # password: mysecretpassword agents: - provider_id: meta0 provider_type: inline::meta-reference config: persistence_store: namespace: null type: sqlite db_path: ~/.llama/runtime/agents_store.db telemetry: - provider_id: meta0 provider_type: inline::meta-reference config: {}