File "/DifFace/sampler.py", line 485, in inference
sample = _process_batch_aligned(y0, cond_kwargs, model_kwargs_ir)
File "/DifFace/sampler.py", line 374, in _process_batchaligned
sample, = self.sample_func_ir_aligned(
File "/DifFace/sampler.py", line 223, in sample_func_ir_aligned
sample = self.diffusion.ddim_sample_loop(
File "/DifFace/models/gaussian_diffusion.py", line 870, in ddim_sample_loop
for sample in self.ddim_sample_loop_progressive(
File "/DifFace/models/gaussian_diffusion.py", line 935, in ddim_sample_loop_progressive
out = self.ddim_sample(
File "/DifFace/models/gaussian_diffusion.py", line 808, in ddim_sample
out = self.p_mean_variance(
File "/DifFace/models/respace.py", line 88, in p_mean_variance
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
File "/DifFace/models/gaussian_diffusion.py", line 350, in p_mean_variance
pred_xstart = self._refine_xstart(
File "/DifFace/models/gaussian_diffusion.py", line 394, in _refine_xstart
grad = th.autograd.grad(
File "/usr/local/lib/python3.10/dist-packages/torch/autograd/init.py", line 399, in grad
result = Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
Environment
Issue
When try to inpaint,
Workaround
https://github.com/zsyOAOA/DifFace/blob/124bd2ce595d1c94afa0a662f9b9c0f2556689f7/models/gaussian_diffusion.py#L395 By checking the torch.Tensor.requires_grad, it of loss is set False. Thus, the regularizer should be enable_autograd.