mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 15:23:51 +00:00
Changed from config to model_args
This commit is contained in:
parent
93472042f8
commit
db4f18099f
2 changed files with 9 additions and 18 deletions
|
@ -151,15 +151,11 @@ class Llama:
|
||||||
elif isinstance(config.quantization, Int4QuantizationConfig):
|
elif isinstance(config.quantization, Int4QuantizationConfig):
|
||||||
from .quantization.loader import convert_to_int4_quantized_model
|
from .quantization.loader import convert_to_int4_quantized_model
|
||||||
|
|
||||||
assert (
|
|
||||||
config.quantization.scheme is not None
|
|
||||||
), "Please specify a quantization scheme."
|
|
||||||
|
|
||||||
model = Transformer(model_args)
|
model = Transformer(model_args)
|
||||||
model = convert_to_int4_quantized_model(model, model_args, config)
|
model = convert_to_int4_quantized_model(model, model_args, config)
|
||||||
model.load_state_dict(state_dict, strict=True)
|
model.load_state_dict(state_dict, strict=True)
|
||||||
|
|
||||||
if config.quantization.spinquant:
|
if model_args.quantization_args.spinquant:
|
||||||
# Add a wrapper for adding hadamard transform for spinquant.
|
# Add a wrapper for adding hadamard transform for spinquant.
|
||||||
# This needs to be done after loading the state dict otherwise an error will be raised while
|
# This needs to be done after loading the state dict otherwise an error will be raised while
|
||||||
# loading the state dict.
|
# loading the state dict.
|
||||||
|
|
|
@ -20,10 +20,6 @@ from llama_models.datatypes import CheckpointQuantizationFormat
|
||||||
from llama_models.llama3.api.args import ModelArgs
|
from llama_models.llama3.api.args import ModelArgs
|
||||||
from llama_models.llama3.reference_impl.model import Transformer, TransformerBlock
|
from llama_models.llama3.reference_impl.model import Transformer, TransformerBlock
|
||||||
from llama_models.sku_list import resolve_model
|
from llama_models.sku_list import resolve_model
|
||||||
from termcolor import cprint
|
|
||||||
from torch import nn, Tensor
|
|
||||||
|
|
||||||
from torchao.quantization.GPTQ import Int8DynActInt4WeightLinear
|
|
||||||
|
|
||||||
from llama_stack.apis.inference import QuantizationType
|
from llama_stack.apis.inference import QuantizationType
|
||||||
from llama_stack.apis.inference.inference import Int4QuantizationConfig
|
from llama_stack.apis.inference.inference import Int4QuantizationConfig
|
||||||
|
@ -31,6 +27,10 @@ from llama_stack.apis.inference.inference import Int4QuantizationConfig
|
||||||
from llama_stack.providers.impls.meta_reference.inference.config import (
|
from llama_stack.providers.impls.meta_reference.inference.config import (
|
||||||
MetaReferenceQuantizedInferenceConfig,
|
MetaReferenceQuantizedInferenceConfig,
|
||||||
)
|
)
|
||||||
|
from termcolor import cprint
|
||||||
|
from torch import nn, Tensor
|
||||||
|
|
||||||
|
from torchao.quantization.GPTQ import Int8DynActInt4WeightLinear
|
||||||
|
|
||||||
|
|
||||||
def swiglu_wrapper(
|
def swiglu_wrapper(
|
||||||
|
@ -309,21 +309,16 @@ def convert_to_int4_quantized_model(
|
||||||
) -> Transformer:
|
) -> Transformer:
|
||||||
"""Convert the model to int4 quantized model."""
|
"""Convert the model to int4 quantized model."""
|
||||||
|
|
||||||
quant_config = config.quantization
|
if model_args.quantization_args is None:
|
||||||
if not isinstance(quant_config, Int4QuantizationConfig):
|
raise ValueError("'quantization_args' cannot be None. Please specify it.")
|
||||||
raise ValueError("Only int4 quantization is supported")
|
|
||||||
|
|
||||||
if quant_config.type != QuantizationType.int4.value:
|
quantization_args = model_args.quantization_args
|
||||||
raise ValueError("Only int4 quantization is supported")
|
|
||||||
|
|
||||||
if quant_config.scheme != "int4_weight_int8_dynamic_activation":
|
if quantization_args.scheme != "int4_weight_int8_dynamic_activation":
|
||||||
raise NotImplementedError(
|
raise NotImplementedError(
|
||||||
"Only int4 quantization with 'int4_weight_int8_dynamic_activation' scheme is supported."
|
"Only int4 quantization with 'int4_weight_int8_dynamic_activation' scheme is supported."
|
||||||
)
|
)
|
||||||
|
|
||||||
if model_args.quantization_args is None:
|
|
||||||
raise ValueError("'quantization_args' cannot be None. Please specify it.")
|
|
||||||
|
|
||||||
group_size = model_args.quantization_args.group_size
|
group_size = model_args.quantization_args.group_size
|
||||||
if group_size is None:
|
if group_size is None:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue