When I try to reproduce the results using command below
test_eva_vitb16_macc_boxes_masks.sh
I ran into the errorAttributeError: 'NoneType' object has no attribute 'memory_efficient_attention, and refers to this line
in forward x = xops.memory_efficient_attention( CLIPSelf/src/open_clip/eva_clip/eva_vit_model.py", line 211
Is there any bug in this code or what should I do to avoid this bug?
Thanks.
————————————————————————————————————————
loading annotations into memory...
Done (t=0.31s)
creating index...
index created!
2024-04-10,08:09:54 | INFO | Evaluate before training
2024-04-10,08:09:54 | INFO | Region classifier
0%| | 0/5000 [00:00<?, ?it/s]2024-04-10,08:10:40 | INFO | Add a new rope freq of shape: torch.Size([4096, 64])
Add a new rope freq of shape: torch.Size([4096, 64])
0%| | 0/5000 [00:46<?, ?it/s]
Traceback (most recent call last):
File "/home/nkd-434/anaconda3/envs/clipself/lib/python3.9/runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/home/nkd-434/anaconda3/envs/clipself/lib/python3.9/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/training/main.py", line 346, in
main(sys.argv[1:])
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/training/main.py", line 267, in main
evaluate(model, data, start_epoch, args)
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/training/train.py", line 172, in evaluate
zero_shot_metrics = zero_shot_eval(model, data, epoch, args)
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/training/zero_shot.py", line 188, in zero_shot_eval
all_box_sizes, all_is_thing, all_cls_labels = run(model, data['val'].dataloader, args)
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/training/zero_shot.py", line 72, in run
roi_features = roi_extractor(images, rois, normalize=True,
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/open_clip/eva_clip/model.py", line 337, in encode_pseudo_boxes
features = self.visual.extract_roi_features(image, normed_boxes, extract_type=extract_type)
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/open_clip/eva_clip/eva_vit_model.py", line 626, in extract_roi_features
x = self.encode_dense(x, keep_shape=True)
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/open_clip/eva_clip/eva_vit_model.py", line 614, in encode_dense
x = blk(x, rel_pos_bias=rel_pos_bias)
File "/home/nkd-434/anaconda3/envs/clipself/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, kwargs)
File "/home/nkd-434/anaconda3/envs/clipself/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
return forward_call(*args, *kwargs)
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/open_clip/eva_clip/eva_vit_model.py", line 306, in forward
x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))
File "/home/nkd-434/anaconda3/envs/clipself/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
return self._call_impl(args, kwargs)
File "/home/nkd-434/anaconda3/envs/clipself/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/open_clip/eva_clip/eva_vit_model.py", line 211, in forward
x = xops.memory_efficient_attention(
AttributeError: 'NoneType' object has no attribute 'memory_efficient_attention'
Hi, thanks for your great work.
When I try to reproduce the results using command below
test_eva_vitb16_macc_boxes_masks.sh
I ran into the errorAttributeError: 'NoneType' object has no attribute 'memory_efficient_attention, and refers to this linein forward x = xops.memory_efficient_attention(
CLIPSelf/src/open_clip/eva_clip/eva_vit_model.py", line 211 Is there any bug in this code or what should I do to avoid this bug?Thanks.
———————————————————————————————————————— loading annotations into memory... Done (t=0.31s) creating index... index created! 2024-04-10,08:09:54 | INFO | Evaluate before training 2024-04-10,08:09:54 | INFO | Region classifier 0%| | 0/5000 [00:00<?, ?it/s]2024-04-10,08:10:40 | INFO | Add a new rope freq of shape: torch.Size([4096, 64]) Add a new rope freq of shape: torch.Size([4096, 64]) 0%| | 0/5000 [00:46<?, ?it/s] Traceback (most recent call last): File "/home/nkd-434/anaconda3/envs/clipself/lib/python3.9/runpy.py", line 197, in _run_module_as_main return _run_code(code, main_globals, None, File "/home/nkd-434/anaconda3/envs/clipself/lib/python3.9/runpy.py", line 87, in _run_code exec(code, run_globals) File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/training/main.py", line 346, in
main(sys.argv[1:])
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/training/main.py", line 267, in main
evaluate(model, data, start_epoch, args)
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/training/train.py", line 172, in evaluate
zero_shot_metrics = zero_shot_eval(model, data, epoch, args)
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/training/zero_shot.py", line 188, in zero_shot_eval
all_box_sizes, all_is_thing, all_cls_labels = run(model, data['val'].dataloader, args)
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/training/zero_shot.py", line 72, in run
roi_features = roi_extractor(images, rois, normalize=True,
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/open_clip/eva_clip/model.py", line 337, in encode_pseudo_boxes
features = self.visual.extract_roi_features(image, normed_boxes, extract_type=extract_type)
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/open_clip/eva_clip/eva_vit_model.py", line 626, in extract_roi_features
x = self.encode_dense(x, keep_shape=True)
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/open_clip/eva_clip/eva_vit_model.py", line 614, in encode_dense
x = blk(x, rel_pos_bias=rel_pos_bias)
File "/home/nkd-434/anaconda3/envs/clipself/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, kwargs)
File "/home/nkd-434/anaconda3/envs/clipself/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
return forward_call(*args, *kwargs)
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/open_clip/eva_clip/eva_vit_model.py", line 306, in forward
x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))
File "/home/nkd-434/anaconda3/envs/clipself/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
return self._call_impl(args, kwargs)
File "/home/nkd-434/anaconda3/envs/clipself/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "/home/nkd-434/Documents/jjy/MLC/CLIPSelf/src/open_clip/eva_clip/eva_vit_model.py", line 211, in forward
x = xops.memory_efficient_attention(
AttributeError: 'NoneType' object has no attribute 'memory_efficient_attention'