From 9c80a576673696ff084fe0b0907edee00870413f Mon Sep 17 00:00:00 2001 From: Botao Chen Date: Wed, 4 Dec 2024 20:26:52 -0800 Subject: [PATCH] remove unnecessary provider apis from expermental post training template --- .../recipes/lora_finetuning_single_device.py | 2 +- .../experimental-post-training/build.yaml | 10 ------ .../experimental-post-training/run.yaml | 33 +------------------ 3 files changed, 2 insertions(+), 43 deletions(-) diff --git a/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py b/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py index ce8f10503..c97651a34 100644 --- a/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py +++ b/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py @@ -163,7 +163,7 @@ class LoraFinetuningSingleDevice: log.info(f"Model is initialized with precision {self._dtype}.") self._tokenizer = await self._setup_tokenizer() - log.info("Tokenizer is initialized from file.") + log.info("Tokenizer is initialized.") self._optimizer = await self._setup_optimizer( optimizer_config=self.training_config.optimizer_config diff --git a/llama_stack/templates/experimental-post-training/build.yaml b/llama_stack/templates/experimental-post-training/build.yaml index 32afdbc0f..1461d0596 100644 --- a/llama_stack/templates/experimental-post-training/build.yaml +++ b/llama_stack/templates/experimental-post-training/build.yaml @@ -8,16 +8,6 @@ distribution_spec: - inline::torchtune datasetio: - remote::huggingface - inference: - - inline::meta-reference - memory: - - inline::faiss - - remote::chromadb - - remote::pgvector - safety: - - inline::llama-guard - agents: - - inline::meta-reference telemetry: - inline::meta-reference image_type: conda diff --git a/llama_stack/templates/experimental-post-training/run.yaml b/llama_stack/templates/experimental-post-training/run.yaml index 3cda9c062..65ec858ce 100644 --- a/llama_stack/templates/experimental-post-training/run.yaml +++ b/llama_stack/templates/experimental-post-training/run.yaml @@ -3,45 +3,14 @@ image_name: experimental-post-training docker_image: null conda_env: experimental-post-training apis: -- agents -- inference -- memory -- safety - telemetry - datasetio - post_training providers: - inference: - - provider_id: meta-reference-inference - provider_type: inline::meta-reference - config: - model: ${env.INFERENCE_MODEL} - max_seq_len: 4096 - checkpoint_dir: ${env.INFERENCE_CHECKPOINT_DIR:null} datasetio: - provider_id: huggingface-0 provider_type: remote::huggingface config: {} - memory: - - provider_id: faiss - provider_type: inline::faiss - config: - kvstore: - type: sqlite - namespace: null - db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/faiss_store.db - safety: - - provider_id: llama-guard - provider_type: inline::llama-guard - config: {} - agents: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - persistence_store: - type: sqlite - namespace: null - db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/agents_store.db telemetry: - provider_id: meta-reference provider_type: inline::meta-reference @@ -57,7 +26,7 @@ metadata_store: db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/registry.db models: - metadata: {} - model_id: ${env.INFERENCE_MODEL} + model_id: ${env.POST_TRAINING_MODEL} provider_id: meta-reference-inference provider_model_id: null shields: []