forked from phoenix-oss/llama-stack-mirror
## context This is the follow up of https://github.com/meta-llama/llama-stack/pull/674. Since torchtune is still in alpha stage and the apis are not guarantee backward compatible. Pin the torchtune and torchao pkg version to avoid the latest torchtune release breaks llama stack post training. We will bump the version number manually after with the new pkg release some testing ## test ping an old torchtune pkg version (0.4.0) and the 0.4.0 was installed <img width="1016" alt="Screenshot 2025-01-16 at 3 06 47 PM" src="https://github.com/user-attachments/assets/630b05d0-8d0d-4e2f-8b48-22e578a62659" />
25 lines
873 B
Python
25 lines
873 B
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from typing import List
|
|
|
|
from llama_stack.providers.datatypes import Api, InlineProviderSpec, ProviderSpec
|
|
|
|
|
|
def available_providers() -> List[ProviderSpec]:
|
|
return [
|
|
InlineProviderSpec(
|
|
api=Api.post_training,
|
|
provider_type="inline::torchtune",
|
|
pip_packages=["torch", "torchtune==0.5.0", "torchao==0.8.0", "numpy"],
|
|
module="llama_stack.providers.inline.post_training.torchtune",
|
|
config_class="llama_stack.providers.inline.post_training.torchtune.TorchtunePostTrainingConfig",
|
|
api_dependencies=[
|
|
Api.datasetio,
|
|
Api.datasets,
|
|
],
|
|
),
|
|
]
|