From 37ebd142a2cc5bff9aeee04cb1cbb6e561187ea4 Mon Sep 17 00:00:00 2001 From: Laughing-q <1185102784@qq.com> Date: Wed, 30 Oct 2024 20:46:48 +0800 Subject: [PATCH] Update exporter.py --- ultralytics/engine/exporter.py | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/ultralytics/engine/exporter.py b/ultralytics/engine/exporter.py index 4c431756c..a0615680b 100644 --- a/ultralytics/engine/exporter.py +++ b/ultralytics/engine/exporter.py @@ -1093,29 +1093,24 @@ class Exporter: resource_utilization = mct.core.ResourceUtilization(weights_memory=3146176 * 0.76) - if not self.args.gptq: - # Perform post training quantization - quant_model, _ = mct.ptq.pytorch_post_training_quantization( - in_module=self.model, + quant_model = ( + mct.gptq.pytorch_gradient_post_training_quantization( # Perform Gradient-Based Post Training Quantization + model=self.model, representative_data_gen=representative_dataset_gen, target_resource_utilization=resource_utilization, + gptq_config=mct.gptq.get_pytorch_gptq_config(n_epochs=1000, use_hessian_based_weights=False), core_config=config, target_platform_capabilities=tpc, ) - - else: - gptq_config = mct.gptq.get_pytorch_gptq_config(n_epochs=1000, use_hessian_based_weights=False) - - # Perform Gradient-Based Post Training Quantization - - quant_model, _ = mct.gptq.pytorch_gradient_post_training_quantization( - model=self.model, + if self.args.gptq + else mct.ptq.pytorch_post_training_quantization( # Perform post training quantization + in_module=self.model, representative_data_gen=representative_dataset_gen, target_resource_utilization=resource_utilization, - gptq_config=gptq_config, core_config=config, target_platform_capabilities=tpc, ) + ) if self.args.nms: check_requirements("sony-custom-layers[torch]")