diff --git a/toolchain/configs/ashwin.yaml b/toolchain/configs/ashwin.yaml index 80e07df96..8eec6d923 100644 --- a/toolchain/configs/ashwin.yaml +++ b/toolchain/configs/ashwin.yaml @@ -2,9 +2,9 @@ model_inference_config: impl_type: "inline" inline_config: checkpoint_type: "pytorch" - checkpoint_dir: /home/ashwin/local/checkpoints/Meta-Llama-3.1-8B-Instruct-20240710150000 - tokenizer_path: /home/ashwin/local/checkpoints/Meta-Llama-3.1-8B-Instruct-20240710150000/tokenizer.model - model_parallel_size: 1 + checkpoint_dir: /home/ashwin/local/checkpoints/Meta-Llama-3.1-70B-Instruct-20240710150000 + tokenizer_path: /home/ashwin/local/checkpoints/Meta-Llama-3.1-70B-Instruct-20240710150000/tokenizer.model + model_parallel_size: 8 max_seq_len: 2048 max_batch_size: 1 quantization: diff --git a/toolchain/safety/api/datatypes.py b/toolchain/safety/api/datatypes.py index 907cdd21e..51a8f88c3 100644 --- a/toolchain/safety/api/datatypes.py +++ b/toolchain/safety/api/datatypes.py @@ -13,8 +13,6 @@ from toolchain.common.deployment_types import RestAPIExecutionConfig @json_schema_type class BuiltinShield(Enum): llama_guard = "llama_guard" - injection_shield = "injection_shield" - jailbreak_shield = "jailbreak_shield" code_scanner_guard = "code_scanner_guard" third_party_shield = "third_party_shield" injection_shield = "injection_shield"