Traceback (most recent call last):
File "diffusion/ImageFusion-main/FusionDiff/inference.py", line 82, in
valid(config_path, model_path, timestr)
File "/diffusion/ImageFusion-main/FusionDiff/inference.py", line 73, in valid
diffusion.sample(model, valid_sourceImg1, valid_sourceImg2, add_noise, concat_type, model_name, model_path,
File "/opt/conda/envs/torch/lib/python3.9/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, kwargs)
File "/share1/workspaceCQ/hongwai/diffusion/ImageFusion-main/FusionDiff/Diffusion.py", line 186, in sample
imgs = self.p_sample_loop(model, sourceImg1, sourceImg2, concat_type, add_noise, log_info)
File "/opt/conda/envs/torch/lib/python3.9/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, *kwargs)
File "/diffusion/ImageFusion-main/FusionDiff/Diffusion.py", line 176, in p_sample_loop
imgs = self.p_sample(model, sourceImg1, sourceImg2, imgs, t, concat_type, add_noise)
File "/opt/conda/envs/torch/lib/python3.9/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(args, kwargs)
File "//diffusion/ImageFusion-main/FusionDiff/Diffusion.py", line 144, in p_sample
modelmean, , model_log_variance = self.p_mean_variance(
File "/diffusion/ImageFusion-main/FusionDiff/Diffusion.py", line 125, in p_mean_variance
input = torch.cat([sourceImg1, sourceImg2, x_t], dim=1)
RuntimeError: Sizes of tensors must match except in dimension 1. Expected size 624 but got size 200 for tensor number 1 in the list.
Traceback (most recent call last): File "diffusion/ImageFusion-main/FusionDiff/inference.py", line 82, in
valid(config_path, model_path, timestr)
File "/diffusion/ImageFusion-main/FusionDiff/inference.py", line 73, in valid
diffusion.sample(model, valid_sourceImg1, valid_sourceImg2, add_noise, concat_type, model_name, model_path,
File "/opt/conda/envs/torch/lib/python3.9/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, kwargs)
File "/share1/workspaceCQ/hongwai/diffusion/ImageFusion-main/FusionDiff/Diffusion.py", line 186, in sample
imgs = self.p_sample_loop(model, sourceImg1, sourceImg2, concat_type, add_noise, log_info)
File "/opt/conda/envs/torch/lib/python3.9/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, *kwargs)
File "/diffusion/ImageFusion-main/FusionDiff/Diffusion.py", line 176, in p_sample_loop
imgs = self.p_sample(model, sourceImg1, sourceImg2, imgs, t, concat_type, add_noise)
File "/opt/conda/envs/torch/lib/python3.9/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(args, kwargs)
File "//diffusion/ImageFusion-main/FusionDiff/Diffusion.py", line 144, in p_sample
modelmean, , model_log_variance = self.p_mean_variance(
File "/diffusion/ImageFusion-main/FusionDiff/Diffusion.py", line 125, in p_mean_variance
input = torch.cat([sourceImg1, sourceImg2, x_t], dim=1)
RuntimeError: Sizes of tensors must match except in dimension 1. Expected size 624 but got size 200 for tensor number 1 in the list.