Open gilbertk422 opened 3 years ago
Can you try with CUDA 10.1 or CUDA 11?
10.1 @YuvalNirkin, I tried with both CUDA 10.1 and CUDA 11 but I am getting the following error
(init_fcgan) C:\data\dev\projects\fsgan\inference>python swap.py ../docs/examples/shinzo_abe.mp4 -t ../docs/examples/conan_obrien.mp4 -o . -b 1 -db 1 -pb 1 -lb 1 -sb 1 --finetune --finetune_save
=> using GPU devices: 0
=> Loading face pose model: "hopenet_robust_alpha1.pth"...
=> Loading face landmarks model: "hr18_wflw_landmarks.pth"...
=> Loading face segmentation model: "celeba_unet_256_1_2_segmentation_v2.pth"...
=> Loading face reenactment model: "nfv_msrunet_256_1_2_reenactment_v2.1.pth"...
=> Loading face completion model: "ijbc_msrunet_256_1_2_inpainting_v2.pth"...
=> Loading face blending model: "ijbc_msrunet_256_1_2_blending_v2.pth"...
OpenH264 Video Codec provided by Cisco Systems, Inc.
=> Finetuning the reenactment generator on: "shinzo_abe_seq00.mp4"...
0%| | 0/200 [00:05<?, ?batches/s]
Traceback (most recent call last):
File "swap.py", line 504, in <module>
main(**vars(parser.parse_args()))
File "swap.py", line 498, in main
face_swapping(source[0], target[0], output, select_source, select_target)
File "swap.py", line 280, in __call__
self.finetune(src_vid_seq_path, self.finetune_save)
File "swap.py", line 208, in finetune
img_pred = self.Gr(input)
File "C:\ProgramData\Anaconda3\envs\init_fcgan\lib\site-packages\torch\nn\modules\module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "C:\data\dev\projects\fsgan\models\res_unet.py", line 343, in forward
x = self.base.inner(x)
File "C:\ProgramData\Anaconda3\envs\init_fcgan\lib\site-packages\torch\nn\modules\module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "C:\data\dev\projects\fsgan\models\res_unet.py", line 192, in forward
return self.flat_block(x)
File "C:\ProgramData\Anaconda3\envs\init_fcgan\lib\site-packages\torch\nn\modules\module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "C:\data\dev\projects\fsgan\models\res_unet.py", line 154, in forward
return self.model(x)
File "C:\ProgramData\Anaconda3\envs\init_fcgan\lib\site-packages\torch\nn\modules\module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "C:\ProgramData\Anaconda3\envs\init_fcgan\lib\site-packages\torch\nn\modules\container.py", line 117, in forward
input = module(input)
File "C:\ProgramData\Anaconda3\envs\init_fcgan\lib\site-packages\torch\nn\modules\module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "C:\data\dev\projects\fsgan\models\res_unet.py", line 123, in forward
out = x + self.model(x)
File "C:\ProgramData\Anaconda3\envs\init_fcgan\lib\site-packages\torch\nn\modules\module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "C:\ProgramData\Anaconda3\envs\init_fcgan\lib\site-packages\torch\nn\modules\container.py", line 117, in forward
input = module(input)
File "C:\ProgramData\Anaconda3\envs\init_fcgan\lib\site-packages\torch\nn\modules\module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "C:\ProgramData\Anaconda3\envs\init_fcgan\lib\site-packages\torch\nn\modules\conv.py", line 423, in forward
return self._conv_forward(input, self.weight)
File "C:\ProgramData\Anaconda3\envs\init_fcgan\lib\site-packages\torch\nn\modules\conv.py", line 419, in _conv_forward
return F.conv2d(input, weight, self.bias, self.stride,
RuntimeError: CUDA out of memory. Tried to allocate 64.00 MiB (GPU 0; 6.00 GiB total capacity; 4.03 GiB already allocated; 25.91 MiB free; 4.22 GiB reserved in total by PyTorch)
OS: Windows Python: 3.8 CUDA: 10.2 Torch: 1.7.0 TorchVision: 0.8.1
Reproduce: python inference/swap.py 1.mp4 -t 2.mp4 --gpus 0 -b 1 -pb 1 -db 1 -lb 1 -sb 1
Error: 0%| | 0/300 [00:01<?, ?frames/s] => using GPU devices: 0 => Loading face pose model: "hopenet_robust_alpha1.pth"... => Loading face landmarks model: "hr18_wflw_landmarks.pth"... => Loading face segmentation model: "celeba_unet_256_1_2_segmentation_v2.pth"... => Loading face reenactment model: "nfv_msrunet_256_1_2_reenactment_v2.1.pth"... => Loading face completion model: "ijbc_msrunet_256_1_2_inpainting_v2.pth"... => Loading face blending model: "ijbc_msrunet_256_1_2_blending_v2.pth"... => Detecting faces in video: "1.mp4..." Traceback (most recent call last): File "inference/swap.py", line 504, in
main(vars(parser.parse_args()))
File "inference/swap.py", line 498, in main
face_swapping(source[0], target[0], output, select_source, select_target)
File "inference/swap.py", line 239, in call
source_cache_dir, source_seq_filepath, = self.cache(source_path)
File "D:\Work\fsgan\fsgan\preprocess\preprocess_video.py", line 446, in cache
self.face_detector(input_path, det_file_path)
File "D:\Work\fsgan\face_detection_dsfd\face_detector.py", line 92, in call
detections_batch = self.net(frame_tensor_batch)
File "D:\Work\fsgan\fsgan\venv\lib\site-packages\torch\nn\modules\module.py", line 727, in _call_impl
result = self.forward(*input, *kwargs)
File "D:\Work\fsgan\face_detection_dsfd\face_ssd_infer.py", line 398, in forward
conv3_3_x = self.layer1(x)
File "D:\Work\fsgan\fsgan\venv\lib\site-packages\torch\nn\modules\module.py", line 727, in _call_impl
result = self.forward(input, kwargs)
File "D:\Work\fsgan\fsgan\venv\lib\site-packages\torch\nn\modules\container.py", line 117, in forward
input = module(input)
File "D:\Work\fsgan\fsgan\venv\lib\site-packages\torch\nn\modules\module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "D:\Work\fsgan\fsgan\venv\lib\site-packages\torch\nn\modules\conv.py", line 423, in forward
return self._conv_forward(input, self.weight)
File "D:\Work\fsgan\fsgan\venv\lib\site-packages\torch\nn\modules\conv.py", line 419, in _conv_forward
return F.conv2d(input, weight, self.bias, self.stride,
RuntimeError: cuDNN error: CUDNN_STATUS_ALLOC_FAILED