llama-stack-mirror/toolchain/configs/ashwin.yaml
2024-07-19 12:30:35 -07:00

9 lines
367 B
YAML

model_inference_config:
impl_type: "inline"
inline_config:
checkpoint_type: "pytorch"
checkpoint_dir: /home/ashwin/local/checkpoints/Meta-Llama-3.1-8B-Instruct-20240710150000
tokenizer_path: /home/ashwin/local/checkpoints/Meta-Llama-3.1-8B-Instruct-20240710150000/tokenizer.model
model_parallel_size: 1
max_seq_len: 2048
max_batch_size: 1