version: '2' built_at: '2024-10-08T17:42:33.690666' image_name: local-gpu docker_image: local-gpu conda_env: null apis: - memory - inference - agents - shields - safety - models - memory_banks providers: inference: - provider_id: meta-reference provider_type: meta-reference config: model: Llama3.1-8B-Instruct quantization: null torch_seed: null max_seq_len: 4096 max_batch_size: 1 safety: - provider_id: meta-reference provider_type: meta-reference config: llama_guard_shield: null prompt_guard_shield: null memory: - provider_id: meta-reference provider_type: meta-reference config: {} agents: - provider_id: meta-reference provider_type: meta-reference config: persistence_store: namespace: null type: sqlite db_path: ~/.llama/runtime/kvstore.db telemetry: - provider_id: meta-reference provider_type: meta-reference config: {}