Skip to content

Commit

Permalink
[quantizer] fix add_observer attribute error of torch_q (#220)
Browse files Browse the repository at this point in the history
  • Loading branch information
Unbinilium committed Apr 29, 2023
1 parent 1e4ed63 commit 2c7dbbd
Showing 1 changed file with 7 additions and 2 deletions.
9 changes: 7 additions & 2 deletions tinynn/graph/quantization/quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -3657,6 +3657,11 @@ def prepare_qat(
if hasattr(n.module, "qconfig"):
delattr(n.module, "qconfig")

if hasattr(torch_q, 'add_observer_'):
add_observer_func = torch_q.add_observer_
else:
add_observer_func = sys.modules['torch.ao.quantization.quantize']._add_observer_

if LooseVersion(torch.__version__) >= LooseVersion("1.8.0"):
if LooseVersion(torch.__version__) >= LooseVersion("1.13.0"):
prepare_custom_config_dict = torch.ao.quantization.get_default_custom_config_dict()
Expand All @@ -3667,13 +3672,13 @@ def prepare_qat(
"float_to_observed_custom_module_class", {}
)

torch_q.add_observer_(
add_observer_func(
graph.module,
qconfig_propagation_list=whitelist,
custom_module_class_mapping=custom_module_class_mapping,
)
else:
torch_q.add_observer_(
add_observer_func(
graph.module,
qconfig_propagation_list=whitelist,
)
Expand Down

0 comments on commit 2c7dbbd

Please sign in to comment.