extra_config = {
'extra_qconfig_dict': {
'w_observer': 'MSEObserver', # custom weight observer
'a_observer': 'MSEObserver', # custom activation observer
'w_fakequantize': 'AdaRoundFakeQuantize', # custom weight fake quantize function
'a_fakequantize': 'FixedFakeQuantize', # custom activation fake quantize function
'w_qscheme': {
'bit': 8, # custom bitwidth for weight,
'symmetry': False, # custom whether quant is symmetric for weight,
'per_channel': True, # custom whether quant is per-channel or per-tensor for weight,
'pot_scale': True, # custom whether scale is power of two for weight.
'p': 2.4
},
'a_qscheme': {
'bit': 8, # custom bitwidth for activation,
'symmetry': False, # custom whether quant is symmetric for activation,
'per_channel': True, # custom whether quant is per-channel or per-tensor for activation,
'pot_scale': False, # custom whether scale is power of two for activation.
'p': 2.4
}
},
'extra_quantizer_dict': {
'additional_function_type': [operator.add,], # additional function type, a list, use function full name, like operator.add.
'additional_module_type': (MyConv, ), # additional module type, a tuple, use class full name, like torch.nn.Upsample.
# 'additional_node_name': [layer1_1_conv1] , # addition node name, a list, use full node name, like layer1_1_conv1.
},
文件mylayer.py中,定义了myconv
def MyConv(nn.Module): ... def forward(): ...
训练代码中,
extra_config = { 'extra_qconfig_dict': { 'w_observer': 'MSEObserver', # custom weight observer 'a_observer': 'MSEObserver', # custom activation observer 'w_fakequantize': 'AdaRoundFakeQuantize', # custom weight fake quantize function 'a_fakequantize': 'FixedFakeQuantize', # custom activation fake quantize function 'w_qscheme': { 'bit': 8, # custom bitwidth for weight, 'symmetry': False, # custom whether quant is symmetric for weight, 'per_channel': True, # custom whether quant is per-channel or per-tensor for weight, 'pot_scale': True, # custom whether scale is power of two for weight. 'p': 2.4 }, 'a_qscheme': { 'bit': 8, # custom bitwidth for activation, 'symmetry': False, # custom whether quant is symmetric for activation, 'per_channel': True, # custom whether quant is per-channel or per-tensor for activation, 'pot_scale': False, # custom whether scale is power of two for activation. 'p': 2.4 } }, 'extra_quantizer_dict': {
'additional_function_type': [operator.add,], # additional function type, a list, use function full name, like operator.add.
}
...
myModel = prepare_by_platform(myModel, BackendType.Academic, extra_config)
但是print(myModel)后,发现MyConv并没有被替换,怎么办? 谢谢