remove unnecessary provider apis from expermental post training template

This commit is contained in:
Botao Chen 2024-12-04 20:26:52 -08:00
parent 29a1ddce8a
commit 9c80a57667
3 changed files with 2 additions and 43 deletions

View file

@ -163,7 +163,7 @@ class LoraFinetuningSingleDevice:
log.info(f"Model is initialized with precision {self._dtype}.") log.info(f"Model is initialized with precision {self._dtype}.")
self._tokenizer = await self._setup_tokenizer() self._tokenizer = await self._setup_tokenizer()
log.info("Tokenizer is initialized from file.") log.info("Tokenizer is initialized.")
self._optimizer = await self._setup_optimizer( self._optimizer = await self._setup_optimizer(
optimizer_config=self.training_config.optimizer_config optimizer_config=self.training_config.optimizer_config

View file

@ -8,16 +8,6 @@ distribution_spec:
- inline::torchtune - inline::torchtune
datasetio: datasetio:
- remote::huggingface - remote::huggingface
inference:
- inline::meta-reference
memory:
- inline::faiss
- remote::chromadb
- remote::pgvector
safety:
- inline::llama-guard
agents:
- inline::meta-reference
telemetry: telemetry:
- inline::meta-reference - inline::meta-reference
image_type: conda image_type: conda

View file

@ -3,45 +3,14 @@ image_name: experimental-post-training
docker_image: null docker_image: null
conda_env: experimental-post-training conda_env: experimental-post-training
apis: apis:
- agents
- inference
- memory
- safety
- telemetry - telemetry
- datasetio - datasetio
- post_training - post_training
providers: providers:
inference:
- provider_id: meta-reference-inference
provider_type: inline::meta-reference
config:
model: ${env.INFERENCE_MODEL}
max_seq_len: 4096
checkpoint_dir: ${env.INFERENCE_CHECKPOINT_DIR:null}
datasetio: datasetio:
- provider_id: huggingface-0 - provider_id: huggingface-0
provider_type: remote::huggingface provider_type: remote::huggingface
config: {} config: {}
memory:
- provider_id: faiss
provider_type: inline::faiss
config:
kvstore:
type: sqlite
namespace: null
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/faiss_store.db
safety:
- provider_id: llama-guard
provider_type: inline::llama-guard
config: {}
agents:
- provider_id: meta-reference
provider_type: inline::meta-reference
config:
persistence_store:
type: sqlite
namespace: null
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/agents_store.db
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
@ -57,7 +26,7 @@ metadata_store:
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/registry.db db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/registry.db
models: models:
- metadata: {} - metadata: {}
model_id: ${env.INFERENCE_MODEL} model_id: ${env.POST_TRAINING_MODEL}
provider_id: meta-reference-inference provider_id: meta-reference-inference
provider_model_id: null provider_model_id: null
shields: [] shields: []