WeitaiKang / SegVG

[ECCV 2024] SegVG: Transferring Object Bounding Box to Segmentation for Visual Grounding
41 stars 2 forks source link

gref/best_checkpoint.pth get none result #4

Open lovehuanhuan opened 1 week ago

lovehuanhuan commented 1 week ago

With these code, i got [none,none,none,none] box output

import torch from PIL import Image import os import torch.utils.data as data from torchvision import transforms import matplotlib.pyplot as plt from datasets import SegVGDataset from datasets import InputExample from datasets import convert_examples_to_features from models import build_model import argparse from utils.misc import * from pytorch_pretrained_bert.tokenization import BertTokenizer

from datasets import data_loader

def get_args_parser(): parser = argparse.ArgumentParser('Set transformer detector', add_help=False) parser.add_argument('--lr', default=1e-4, type=float) parser.add_argument('--lr_bert', default=0., type=float) parser.add_argument('--lr_visu_cnn', default=0., type=float) parser.add_argument('--lr_visu_tra', default=1e-5, type=float) parser.add_argument('--batch_size', default=32, type=int) parser.add_argument('--weight_decay', default=1e-4, type=float) parser.add_argument('--epochs', default=100, type=int) parser.add_argument('--lr_power', default=0.9, type=float, help='lr poly power') parser.add_argument('--clip_max_norm', default=0., type=float, help='gradient clipping max norm') parser.add_argument('--eval', dest='eval', default=False, action='store_true', help='if evaluation only') parser.add_argument('--optimizer', default='rmsprop', type=str) parser.add_argument('--lr_scheduler', default='poly', type=str) parser.add_argument('--lr_drop', default=80, type=int)

# Augmentation options
parser.add_argument('--aug_blur', action='store_true',
                    help="If true, use gaussian blur augmentation")
parser.add_argument('--aug_crop', action='store_true',
                    help="If true, use random crop augmentation")
parser.add_argument('--aug_scale', action='store_true',
                    help="If true, use multi-scale augmentation")
parser.add_argument('--aug_translate', action='store_true',
                    help="If true, use random translate augmentation")

# Model parameters
parser.add_argument('--model_name', type=str, default='SegVG',
                    help="Name of model to be exploited.")

# Transformers in two branches
parser.add_argument('--bert_enc_num', default=12, type=int)
parser.add_argument('--detr_enc_num', default=6, type=int)

# DETR parameters
# * Backbone
parser.add_argument('--backbone', default='resnet101', type=str,
                    help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
                    help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), help="Type of positional embedding to use on top of the image features")
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
                    help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=0, type=int,
                    help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
                    help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
                    help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
                    help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
                    help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=100, type=int,
                    help="Number of query slots")
parser.add_argument('--pre_norm', action='store_true')

parser.add_argument('--imsize', default=640, type=int, help='image size')
parser.add_argument('--emb_size', default=512, type=int,
                    help='fusion module embedding dimensions')
# Vision-Language Transformer
parser.add_argument('--use_vl_type_embed', action='store_true',
                    help="If true, use vl_type embedding")
parser.add_argument('--vl_dropout', default=0.1, type=float,
                    help="Dropout applied in the vision-language transformer")
parser.add_argument('--vl_nheads', default=8, type=int,
                    help="Number of attention heads inside the vision-language transformer's attentions")
parser.add_argument('--vl_hidden_dim', default=256, type=int,
                    help='Size of the embeddings (dimension of the vision-language transformer)')
parser.add_argument('--vl_dim_feedforward', default=2048, type=int,
                    help="Intermediate size of the feedforward layers in the vision-language transformer blocks")
parser.add_argument('--vl_enc_layers', default=6, type=int,
                    help='Number of encoders in the vision-language transformer')

# Dataset parameters
parser.add_argument('--data_root', type=str, default='/data/kangweitai/VG/',
                    help='path to ReferIt splits data folder')
parser.add_argument('--split_root', type=str, default='/data/kangweitai/VG/split/',
                    help='location of pre-parsed dataset info')
parser.add_argument('--dataset', default='referit', type=str,
                    help='referit/flickr/unc/unc+/gref')
parser.add_argument('--max_query_len', default=40, type=int,
                    help='maximum time steps (lang length) per batch')

# dataset parameters
parser.add_argument('--output_dir', default='./outputs',
                    help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
                    help='device to use for training / testing')
parser.add_argument('--seed', default=13, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--detr_model', default='./checkpoints/detr-r101-gref.pth', type=str, help='detr model')
parser.add_argument('--bert_model', default='bert-base-uncased', type=str, help='bert model')
parser.add_argument('--light', dest='light', default=False, action='store_true', help='if use smaller model')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
                    help='start epoch')
parser.add_argument('--num_workers', default=2, type=int)

# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
                    help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')

# evalutaion options
parser.add_argument('--eval_set', default='text', type=str)
parser.add_argument('--eval_model', default='', type=str)

return parser

def load_image(image_path, imsize=640): """ 加载并预处理单张图片。 """ image = Image.open(image_path).convert("RGB")

transform = transforms.Compose([
    transforms.Resize((imsize, imsize)),  # 调整到固定尺寸
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])  # 标准化
])
image = transform(image)

return image

def load_text_data(text_input, tokenizer, max_query_len=64):

"""

将文本转化为模型输入格式。

"""

tokens = tokenizer.tokenize(text_input)

tokens = ["[CLS]"] + tokens[:max_query_len - 2] + ["[SEP]"] # 添加特殊标记

token_ids = tokenizer.convert_tokens_to_ids(tokens)

attention_mask = [1] * len(token_ids)

填充到固定长度

padding_length = max_query_len - len(token_ids)

token_ids += [0] * padding_length

attention_mask += [0] * padding_length

转换为张量

input_ids = torch.tensor([token_ids], dtype=torch.long)

attention_mask = torch.tensor([attention_mask], dtype=torch.long)

return input_ids, attention_mask

def prepare_nested_tensor(image, text_input, tokenizer, max_query_len=64, device='cuda'): """ 将图像和文本数据联合封装为 NestedTensor,并移动到目标设备。 """

处理文本数据

examples = read_examples(text_input, unique_id=0)
features = convert_examples_to_features(examples, max_query_len, tokenizer)

# 提取文本特征
input_ids = torch.tensor(features[0].input_ids, dtype=torch.long).to(device)
attention_mask = torch.tensor(features[0].input_mask, dtype=torch.long).to(device)

print(features[0].input_mask)
print(features[0].input_ids)

# 将图像数据转换为张量并移动到设备
image = image.to(device)

print('shape')
print(image[0])

# 生成图像掩码
# 假设填充值为 0,生成非零区域的掩码
#image_mask = (image.sum(dim=0) != 0).to(torch.bool).unsqueeze(0)  # 添加 batch 维度
image_mask = torch.ones_like(image[0], dtype=torch.bool).unsqueeze(0)

# 构建 NestedTensor 对象
nested_tensor_text = NestedTensor(input_ids.unsqueeze(0), attention_mask.unsqueeze(0))
nested_tensor_img = NestedTensor(image.unsqueeze(0), image_mask)

return nested_tensor_img, nested_tensor_text

def read_examples(text_input, unique_id=None): """ 处理自定义文本输入,创建 InputExample 对象。 如果没有提供 unique_id,则使用文本的哈希值作为唯一标识符。 """ examples = [] text_a = text_input

如果没有提供 unique_id,则使用文本的哈希值

if unique_id is None:
    unique_id = hash(text_input)
examples.append(InputExample(unique_id=unique_id, text_a=text_a, text_b=None))
return examples

示例使用,传入文本并自动生成 unique_id

text_input = 'the teacher near the blackboard'

examples = read_examples(text_input)

def main(model_path, image_path, text_input, args): """ 加载模型并进行推理。 """

加载模型

model = build_model(args)  # 通过SegVG模型构建器构建模型

model.to(args.device)

n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)

checkpoint = torch.load(model_path, map_location='cpu')  # 加载预训练的权重
missing_keys, unexpected_keys = model.load_state_dict(checkpoint['model'], strict=False) #
print('Missing keys')
print(missing_keys)
print('Unexpected keys')
print(unexpected_keys)

#model.eval()  # 设置为评估模式

# 加载图像
image = load_image(image_path)
image = image.to(args.device)

# 加载文本数据
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
nested_tensor_img,nested_tensor_text = prepare_nested_tensor(image, text_input, tokenizer, args.max_query_len, device=args.device)

# 执行推理
with torch.no_grad():
    pred_box, _ = model(nested_tensor_img, nested_tensor_text)

# 可视化预测结果(框和分割)
pred_box = pred_box.cpu().numpy()
#seg_output = seg_output.cpu().numpy()

print("Predicted bounding box:", pred_box)

# 可视化分割掩码(假设为2D输出)
#plt.imshow(seg_output.squeeze(), cmap='gray')
#plt.title('Predicted Segmentation Mask')
#plt.show()

if name == 'main': model_path = '/mnt/ckpt/gref/best_checkpoint.pth' # 替换为实际的模型路径 image_path = './frame_120.png' # 替换为你要推理的图像路径 text_input = 'a woman with long hair' # 用来描述图像的文本

parser = argparse.ArgumentParser('SegVG evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
# 调用主函数进行推理
main(model_path, image_path, text_input,args)
lovehuanhuan commented 4 days ago

result is as below:

Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex. /usr/local/lib/python3.10/dist-packages/timm/models/layers/init.py:48: FutureWarning: Importing from timm.models.layers is deprecated, please import via timm.layers warnings.warn(f"Importing from {name} is deprecated, please import via timm.layers", FutureWarning) /usr/local/lib/python3.10/dist-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead. warnings.warn( /usr/local/lib/python3.10/dist-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or None for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing weights=None. warnings.warn(msg) number of params: 143936517 /mnt/SegVG/run_example.py:230: FutureWarning: You are using torch.load with weights_only=False (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for weights_only will be flipped to True. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via torch.serialization.add_safe_globals. We recommend you start setting weights_only=True for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature. checkpoint = torch.load(model_path, map_location='cpu') # 加载预训练的权重 Missing keys [] Unexpected keys ['textmodel.embeddings.position_ids'] [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] [101, 1037, 2450, 2007, 2146, 2606, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] shape tensor([[ 1.4612, 1.4954, 1.5297, ..., 1.3755, 1.3927, 1.4783], [ 1.4612, 1.4954, 1.5297, ..., 1.3755, 1.3927, 1.4783], [ 1.4612, 1.5125, 1.5297, ..., 1.3755, 1.3927, 1.4783], ..., [-1.6384, -1.6727, -1.6555, ..., 0.2967, 0.3481, 0.5193], [-1.6384, -1.6384, -1.6042, ..., 0.2796, 0.3309, 0.4851], [-1.6042, -1.6042, -1.5528, ..., 0.2796, 0.3309, 0.4851]], device='cuda:0') /usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py:1086: FutureWarning: The device argument is deprecated and will be removed in v5 of Transformers. warnings.warn( Predicted bounding box: [[[nan nan nan nan]]

[[nan nan nan nan]]

[[nan nan nan nan]]

[[nan nan nan nan]]

[[nan nan nan nan]]

[[nan nan nan nan]]]