Open Qm-jmz opened 3 months ago
I got same MACs and Params between before and after when purning yolov7-tiny, but yolov7 works well. this is my code:
import torch from models.experimental import attempt_load import torch_pruning as tp def purned(weights, device): # Load model model = attempt_load(weights, map_location=device) # load FP32 model print(model) ################################################################################ # Pruning example_inputs = torch.randn(1, 3, 640, 640).to(device) imp = tp.importance.MagnitudeImportance(p=2) # L2 norm pruning # imp = tp.importance.BNScaleImportance() ignored_layers = [] from models.yolo import Detect,IDetect for m in model.modules(): if isinstance(m, (Detect,IDetect)): ignored_layers.append(m) print(ignored_layers) iterative_steps = 1 # progressive pruning pruner = tp.pruner.MagnitudePruner( model, example_inputs, importance=imp, iterative_steps=iterative_steps, pruning_ratio=1, # remove 50% channels, ResNet18 = {64, 128, 256, 512} => ResNet18_Half = {32, 64, 128, 256} ignored_layers=ignored_layers, ) base_macs, base_nparams = tp.utils.count_ops_and_params(model, example_inputs) pruner.step() pruned_macs, pruned_nparams = tp.utils.count_ops_and_params(model, example_inputs) print(model) print("Before Pruning: MACs=%f G, #Params=%f G"%(base_macs/1e9, base_nparams/1e9)) print("After Pruning: MACs=%f G, #Params=%f G"%(pruned_macs/1e9, pruned_nparams/1e9)) #################################################################################### purned('best.pt', 'cuda')
what I get:
Fusing layers... IDetect.fuse Model( (model): Sequential( (0): Conv( (conv): Conv2d(3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (1): Conv( (conv): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (2): Conv( (conv): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (3): Conv( (conv): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (4): Conv( (conv): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (5): Conv( (conv): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (6): Concat() (7): Conv( (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (8): MP( (m): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (9): Conv( (conv): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (10): Conv( (conv): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (11): Conv( (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (12): Conv( (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (13): Concat() (14): Conv( (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (15): MP( (m): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (16): Conv( (conv): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (17): Conv( (conv): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (18): Conv( (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (19): Conv( (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (20): Concat() (21): Conv( (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (22): MP( (m): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (23): Conv( (conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (24): Conv( (conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (25): Conv( (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (26): Conv( (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (27): Concat() (28): Conv( (conv): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (29): Conv( (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (30): Conv( (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (31): SP( (m): MaxPool2d(kernel_size=5, stride=1, padding=2, dilation=1, ceil_mode=False) ) (32): SP( (m): MaxPool2d(kernel_size=9, stride=1, padding=4, dilation=1, ceil_mode=False) ) (33): SP( (m): MaxPool2d(kernel_size=13, stride=1, padding=6, dilation=1, ceil_mode=False) ) (34): Concat() (35): Conv( (conv): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (36): Concat() (37): Conv( (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (38): Conv( (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (39): Upsample(scale_factor=2.0, mode=nearest) (40): Conv( (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (41): Concat() (42): Conv( (conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (43): Conv( (conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (44): Conv( (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (45): Conv( (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (46): Concat() (47): Conv( (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (48): Conv( (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (49): Upsample(scale_factor=2.0, mode=nearest) (50): Conv( (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (51): Concat() (52): Conv( (conv): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (53): Conv( (conv): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (54): Conv( (conv): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (55): Conv( (conv): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (56): Concat() (57): Conv( (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (58): Conv( (conv): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (59): Concat() (60): Conv( (conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (61): Conv( (conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (62): Conv( (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (63): Conv( (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (64): Concat() (65): Conv( (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (66): Conv( (conv): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (67): Concat() (68): Conv( (conv): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (69): Conv( (conv): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (70): Conv( (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (71): Conv( (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (72): Concat() (73): Conv( (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (74): Conv( (conv): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (75): Conv( (conv): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (76): Conv( (conv): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (77): IDetect( (m): ModuleList( (0): Conv2d(128, 48, kernel_size=(1, 1), stride=(1, 1)) (1): Conv2d(256, 48, kernel_size=(1, 1), stride=(1, 1)) (2): Conv2d(512, 48, kernel_size=(1, 1), stride=(1, 1)) ) (ia): ModuleList( (0): ImplicitA() (1): ImplicitA() (2): ImplicitA() ) (im): ModuleList( (0): ImplicitM() (1): ImplicitM() (2): ImplicitM() ) ) ) ) [IDetect( (m): ModuleList( (0): Conv2d(128, 48, kernel_size=(1, 1), stride=(1, 1)) (1): Conv2d(256, 48, kernel_size=(1, 1), stride=(1, 1)) (2): Conv2d(512, 48, kernel_size=(1, 1), stride=(1, 1)) ) (ia): ModuleList( (0): ImplicitA() (1): ImplicitA() (2): ImplicitA() ) (im): ModuleList( (0): ImplicitM() (1): ImplicitM() (2): ImplicitM() ) )] C:\Users\AppData\Roaming\Python\Python310\site-packages\torch_pruning\dependency.py:667: UserWarning: Unwrapped parameters detected: ['model.77.ia.0.implicit', 'model.77.ia.2.implicit', 'model.77.im.2.implicit', 'model.77.im.0.implicit', 'model.77.ia.1.implicit', 'model.77.im.1.implicit']. Torch-Pruning will prune the last non-singleton dimension of these parameters. If you wish to change this behavior, please provide an unwrapped_parameters argument. warnings.warn(warning_str) Model( (model): Sequential( (0): Conv( (conv): Conv2d(3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (1): Conv( (conv): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (2): Conv( (conv): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (3): Conv( (conv): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (4): Conv( (conv): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (5): Conv( (conv): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (6): Concat() (7): Conv( (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (8): MP( (m): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (9): Conv( (conv): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (10): Conv( (conv): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (11): Conv( (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (12): Conv( (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (13): Concat() (14): Conv( (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (15): MP( (m): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (16): Conv( (conv): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (17): Conv( (conv): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (18): Conv( (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (19): Conv( (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (20): Concat() (21): Conv( (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (22): MP( (m): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (23): Conv( (conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (24): Conv( (conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (25): Conv( (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (26): Conv( (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (27): Concat() (28): Conv( (conv): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (29): Conv( (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (30): Conv( (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (31): SP( (m): MaxPool2d(kernel_size=5, stride=1, padding=2, dilation=1, ceil_mode=False) ) (32): SP( (m): MaxPool2d(kernel_size=9, stride=1, padding=4, dilation=1, ceil_mode=False) ) (33): SP( (m): MaxPool2d(kernel_size=13, stride=1, padding=6, dilation=1, ceil_mode=False) ) (34): Concat() (35): Conv( (conv): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (36): Concat() (37): Conv( (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (38): Conv( (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (39): Upsample(scale_factor=2.0, mode=nearest) (40): Conv( (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (41): Concat() (42): Conv( (conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (43): Conv( (conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (44): Conv( (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (45): Conv( (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (46): Concat() (47): Conv( (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (48): Conv( (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (49): Upsample(scale_factor=2.0, mode=nearest) (50): Conv( (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (51): Concat() (52): Conv( (conv): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (53): Conv( (conv): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (54): Conv( (conv): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (55): Conv( (conv): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (56): Concat() (57): Conv( (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (58): Conv( (conv): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (59): Concat() (60): Conv( (conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (61): Conv( (conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (62): Conv( (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (63): Conv( (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (64): Concat() (65): Conv( (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (66): Conv( (conv): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (67): Concat() (68): Conv( (conv): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (69): Conv( (conv): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (70): Conv( (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (71): Conv( (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (72): Concat() (73): Conv( (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (74): Conv( (conv): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (75): Conv( (conv): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (76): Conv( (conv): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (act): LeakyReLU(negative_slope=0.1, inplace=True) ) (77): IDetect( (m): ModuleList( (0): Conv2d(128, 48, kernel_size=(1, 1), stride=(1, 1)) (1): Conv2d(256, 48, kernel_size=(1, 1), stride=(1, 1)) (2): Conv2d(512, 48, kernel_size=(1, 1), stride=(1, 1)) ) (ia): ModuleList( (0): ImplicitA() (1): ImplicitA() (2): ImplicitA() ) (im): ModuleList( (0): ImplicitM() (1): ImplicitM() (2): ImplicitM() ) ) ) ) Before Pruning: MACs=6.597421 G, #Params=0.006035 G After Pruning: MACs=6.597421 G, #Params=0.006035 G
Did you set the "requires_grad" of the modules that you want to prune to True?
你别说,还真是
I got same MACs and Params between before and after when purning yolov7-tiny, but yolov7 works well. this is my code:
what I get: