openai / guided-diffusion

MIT License
5.9k stars 782 forks source link

Sample during training problem #128

Open YI-HAO-SU opened 8 months ago

YI-HAO-SU commented 8 months ago

Recently, I tried using the great code that the author provided to fill my task. While the sampling output was not as expected, the shape of things can be recognized in images but the color is very different from the source images. To make sure the model is trained well and converged, I would like to sample during training. However, the generated images are noisy images. Can anyone help me to solve this problem?

Below is the code I modified to generate for the purpose `

def run_loop(self):
        i = 0
        data_iter = iter(self.dataloader)
        while (
            not self.lr_anneal_steps
            or self.step + self.resume_step < self.lr_anneal_steps
        ):

        try:
                batch, cond = next(data_iter)
        except StopIteration:
                # StopIteration is thrown if dataset ends
                # reinitialize data loader
                data_iter = iter(self.dataloader)
                batch, cond = next(data_iter)

        sample = self.run_step(batch, cond)
        save_image(sample[0], '/work/kevin20307/Difseg/NCKU/model/model_step_resume_1000/img1.png')

        i += 1

        if self.step % self.log_interval == 0:
            logger.dumpkvs()
        if self.step % self.save_interval == 0:
            self.save()
            # Run for a finite amount of time in integration tests.
            if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
                return
        self.step += 1
    # Save the last checkpoint if it wasn't already saved.
    if (self.step - 1) % self.save_interval != 0:
        self.save()

def run_step(self, batch, cond):
    batch=th.cat((batch, cond), dim=1)
    cond={}
    sample = self.forward_backward(batch, cond)
    took_step = self.mp_trainer.optimize(self.opt)
    if took_step:
        self._update_ema()
    self._anneal_lr()
    self.log_step()
    return sample

def forward_backward(self, batch, cond):
    self.mp_trainer.zero_grad()
    for i in range(0, batch.shape[0], self.microbatch):
        micro = batch[i : i + self.microbatch].to(dist_util.dev())
        micro_cond = {
            k: v[i : i + self.microbatch].to(dist_util.dev())
            for k, v in cond.items()
        }
        last_batch = (i + self.microbatch) >= batch.shape[0]
        t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())

        compute_losses = functools.partial(
            self.diffusion.training_losses_segmentation,
            self.ddp_model,
            self.classifier,
            micro,
            t,
            model_kwargs=micro_cond,
        )

        if last_batch or not self.use_ddp:
            losses1 = compute_losses()
        else:
            with self.ddp_model.no_sync():
                losses1 = compute_losses()

        if isinstance(self.schedule_sampler, LossAwareSampler):
            self.schedule_sampler.update_with_local_losses(
                t, losses["loss"].detach()
            )
        losses = losses1[0]
        sample = losses1[1]

        loss = (losses["loss"] * weights).mean()

        log_loss_dict(
            self.diffusion, t, {k: v * weights for k, v in losses.items()}
        )
        self.mp_trainer.backward(loss)
        return sample`

And the generated img1 like 圖片