This repository contains the dataset link and the code for our paper MCCG: A ConvNeXt-based Multiple-Classifier Method for Cross-view Geo-localization.
15
stars
1
forks
source link
How to set target_layers after attention mechanism when using heatmaps #6
Hello, I am working on the follow-up work on your code. Thank you very much for sharing the code, which has been of great help to me. I have made modifications to your network and added an attention mechanism after convnext. I want to use a heat map for visualization now, but I have encountered a problem. When I used a heatmap to obtain the heatmap of the last layer of convnext, the progress was very smooth. But I don't know how to obtain the heat map after the attention mechanism. This is because the attention mechanism does not have a norm layer, so my target_layer cannot be set. I don't know if you can provide some suggestions, which will be very useful to me
here is my code
`import argparse
import os
import math
import numpy as np
import torch
import yaml
from PIL import Image
import matplotlib.pyplot as plt
from torch.backends import cudnn
from torchvision import models
from torchvision import transforms
from utils import heatmap, show_cam_on_image, center_crop_img, load_network
from models.LSKnet.make_model import build_convnext
from datasets.queryDataset import Dataset_query,Query_transforms
gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >=0:
gpu_ids.append(id)
print('We use the scale: %s'%opt.ms)
with open(os.path.join('model',opt.name,opt.fname),'w',encoding='utf-8') as f:
text = str(('We use the scale: %s'%opt.ms))+'\n'
f.write(text)
str_ms = opt.ms.split(',')
ms = []
for s in str_ms:
s_f = float(s)
ms.append(math.sqrt(s_f))
set gpu ids
if len(gpu_ids)>0:
torch.cuda.set_device(gpu_ids[0])
cudnn.benchmark = True
def main():
model = models.mobilenet_v3_large(pretrained=True)
Hello, I am working on the follow-up work on your code. Thank you very much for sharing the code, which has been of great help to me. I have made modifications to your network and added an attention mechanism after convnext. I want to use a heat map for visualization now, but I have encountered a problem. When I used a heatmap to obtain the heatmap of the last layer of convnext, the progress was very smooth. But I don't know how to obtain the heat map after the attention mechanism. This is because the attention mechanism does not have a norm layer, so my target_layer cannot be set. I don't know if you can provide some suggestions, which will be very useful to me
here is my code `import argparse import os
import math import numpy as np import torch import yaml from PIL import Image import matplotlib.pyplot as plt from torch.backends import cudnn from torchvision import models from torchvision import transforms from utils import heatmap, show_cam_on_image, center_crop_img, load_network from models.LSKnet.make_model import build_convnext from datasets.queryDataset import Dataset_query,Query_transforms
parser = argparse.ArgumentParser(description='Training') parser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2') parser.add_argument('--which_epoch',default='last', type=str, help='0,1,2,3...or last') parser.add_argument('--test_dir',default='/root/autodl-tmp/University1652/test',type=str, help='./test_data') parser.add_argument('--name', default='convnext_emakd', type=str, help='save model path') parser.add_argument('--part', default='', type=str, help='test drone distance') parser.add_argument('--mode', default=2, type=int, help='2:drone->satellite 1:satellite->drone') parser.add_argument('--padmode',default='', type=str,help='bp or fp') parser.add_argument('--pad', default=0, type=int, help='') parser.add_argument('--pool', default='avg', type=str, help='avg|max') parser.add_argument('--batchsize', default=8, type=int, help='batchsize') parser.add_argument('--h', default=256, type=int, help='height') parser.add_argument('--w', default=256, type=int, help='width') parser.add_argument('--multi', action='store_true',help='use multiple query' ) parser.add_argument('--fp16', action='store_true', help='use fp16.' ) parser.add_argument('--ms',default='1', type=str,help='multiple_scale: e.g. 1 1,1.1 1,1.1,1.2') parser.add_argument('--model', default='convnext_tiny', type=str, metavar='MODEL', help='Name of model to train') opt = parser.parse_args()
load config
load the training config
yaml.warnings({'YAMLLoadWarning': False}) config_path = os.path.join('./model',opt.name,'opts.yaml') with open(config_path, 'r') as stream: config = yaml.load(stream,Loader=yaml.FullLoader)
opt.fp16 = config['fp16'] opt.fname = 'test.txt' opt.views = config['views'] opt.block = config['block'] opt.share = config['share'] if 'resnet' in config: opt.resnet = config['resnet'] if 'h' in config: opt.h = config['h'] opt.w = config['w']
if 'nclasses' in config: # tp compatible with old config files opt.nclasses = config['nclasses'] else: opt.nclasses = 729
str_ids = opt.gpu_ids.split(',') test_dir = opt.test_dir
gpu_ids = [] for str_id in str_ids: id = int(str_id) if id >=0: gpu_ids.append(id)
print('We use the scale: %s'%opt.ms) with open(os.path.join('model',opt.name,opt.fname),'w',encoding='utf-8') as f: text = str(('We use the scale: %s'%opt.ms))+'\n' f.write(text) str_ms = opt.ms.split(',') ms = [] for s in str_ms: s_f = float(s) ms.append(math.sqrt(s_f))
set gpu ids
if len(gpu_ids)>0: torch.cuda.set_device(gpu_ids[0]) cudnn.benchmark = True
def main():
model = models.mobilenet_v3_large(pretrained=True)
if name == 'main': main() `