Closed 587687525 closed 1 year ago
please have a try with:
pruner = AutoCompressPruner(model=model, config_list=config_list, total_iteration=3, admm_params=admm_params,
sa_params=sa_params, log_dir='./log', keep_intermediate_result=True,
evaluator=evaluator, speedup=False)
ban the speedup during pruning, and add the speedup manually after _, model, masks, _, _ = pruner.get_best_result()
.
please have a try with:
pruner = AutoCompressPruner(model=model, config_list=config_list, total_iteration=3, admm_params=admm_params, sa_params=sa_params, log_dir='./log', keep_intermediate_result=True, evaluator=evaluator, speedup=False)
ban the speedup during pruning, and add the speedup manually after
_, model, masks, _, _ = pruner.get_best_result()
.
@J-shang I have changed the code, but the error still appears.
pruner = AutoCompressPruner(model=model, config_list=config_list, total_iteration=3, admm_params=admm_params,
sa_params=sa_params, log_dir='./log', keep_intermediate_result=True,
evaluator=evaluator, speedup=False)
# pruner = LevelPruner(model, config_list)
pruner.compress()
_, model, masks, _, _ = pruner.get_best_result()
ModelSpeedup(model, torch.randn(4, 3, 512, 512).to(torch.device('cuda')),
masks).speedup_model()
torch.save(model.state_dict(), osp.join(opt.Train.Checkpoint.checkpoint_dir, 'compressed.pth'))
hello @587687525 , I think you already got solution in the wechat group, put the model on cpu or give a small confidence is both ok, 😃
Could you close this issue if you have not any other problems? @587687525 thanks
I'm having the same issue when putting to speedup TensorRT - can someone explain the solution???
Describe the bug: torch.cuda.OutOfMemoryError occurred when running to ModelSpeedup.
Environment:
Reproduce the problem
import nni import numpy as np import torch import tqdm from PIL import Image from nni.algorithms.compression.v2.pytorch import TorchEvaluator from nni.algorithms.compression.v2.pytorch.pruning import AutoCompressPruner from nni.compression.pytorch.speedup import ModelSpeedup from torch.nn import ParameterList from torch.nn import functional as F from torch.optim import Adam from torchvision import transforms
from lib.RhNet import RhNet_SwinB from utils.eval import evaluate_acc from utils.misc import load_config from lib.transforms import dynamic_resize, tonumpy, normalize, totensor # 误报未引用依赖
filepath = osp.split(osp.abspath(file))[0] repopath = osp.split(filepath)[0] sys.path.append(repopath)
torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cudnn.allow_tf32 = False
def criterion(input, target): return input['loss']
def training_func(model, optimizer, criterion, lr_schedulers=None, max_steps=None, max_epochs=None): print(f"[{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}] Starting Training.")
def evaluating_func(model): print(f"[{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}] Starting evaluation.") model.eval()
def main(opt, args): model = RhNet_SwinB(**opt.Model) model.load_state_dict(torch.load(args.weights, map_location=torch.device('cpu')), strict=True)
if name == 'main': parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default=r"./weights/RhineSOD.pth", help="weights path") parser.add_argument('--gpu', '-g', action='store_true', default=True) parser.add_argument('--config', '-c', type=str, default='configs/RhineSOD.yaml') parser.add_argument('--imgsize', type=int, default=320, help='input image size') parser.add_argument('--thres', type=int, default=50) parser.add_argument('--original_path', type=str, default=r"G:\ML-Dataset\DUTS-TR\images", help="input image path") parser.add_argument('--label_path', type=str, default=r"G:\ML-Dataset\DUTS-TR\masks", help="input image path") parser.add_argument('--mask_path', type=str, default="./outputs/mask", help="output masked path") args = parser.parse_args()