Closed choROPeNt closed 1 year ago
okay solved it with
image = []
for step, i in enumerate(indices):
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.p_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if (step+1) % 5 == 0:
print('step',step)
samples = out["sample"] # [1 3 256 256] double
samples = ((samples + 1) * 127.5).clamp(0, 255).to(th.uint8) # [1 3 256 256] unit8
samples = samples.permute(0, 2, 3, 1) # # [1 256 256 3] unit8
samples = samples.contiguous() # [1 256 256 3] unit8
image.extend([sample.cpu().numpy() for sample in [samples]]) #
arr = np.concatenate(image, axis=0)
img = Image.fromarray(arr[-1])
out_path = os.path.join(logger.get_dir(), f"samples_{label}_{str(step).zfill(4)}.npz")
out_image = os.path.join(logger.get_dir(), f"samples_{label}_{str(step).zfill(4)}.tif")
img.save(out_image, compression='raw')
np.savez(out_path, arr[-1])
yield out
img = out["sample"]
looks cool
how can i successfully save each n-timesteps of the diffusion sampling step. For so far i edited lines of code in
p_sample_loop_progressive()
, with something likebut i always get the fully noisy image.
Many thanks in advance