llama-stack-mirror/llama_toolchain/configs/hjshah.yaml
2024-07-21 23:48:38 -07:00

9 lines
361 B
YAML

inference_config:
impl_type: "inline"
inline_config:
checkpoint_type: "pytorch"
checkpoint_dir: /home/hjshah/local/checkpoints/Meta-Llama-3.1-8B-Instruct-20240710150000
tokenizer_path: /home/hjshah/local/checkpoints/Meta-Llama-3.1-8B-Instruct-20240710150000/tokenizer.model
model_parallel_size: 1
max_seq_len: 2048
max_batch_size: 1