heykeetae / Self-Attention-GAN

Pytorch implementation of Self-Attention Generative Adversarial Networks (SAGAN)
2.53k stars 475 forks source link

Added tensorboard logger #13

Open JohnnyRisk opened 6 years ago

JohnnyRisk commented 6 years ago

You can add a tensorboard logging to the train.py by inserting the following code at line 180. This needs to be modified because l4 does not exist if imsize is less than 64 and the network fails if imsize is greater than 64. When the larger imsize is fixed I will issue a pull request. Also build_tensorboard should be changed. Hope this helps!

def build_tensorboard(self):
    from logger import Logger
    if os.path.exists(self.log_path):
        shutil.rmtree(self.log_path)
    os.makedirs(self.log_path)
    self.logger = Logger(self.log_path)

Insert at line 180 in train.py

Print out log info

        if (step + 1) % self.log_step == 0:
            elapsed = time.time() - start_time
            elapsed = str(datetime.timedelta(seconds=elapsed))
            print("Elapsed [{}], G_step [{}/{}], D_step[{}/{}], d_out_real: {:.4f}, "
                  " ave_gamma_l3: {:.4f}, ave_gamma_l4: {:.4f}".
                  format(elapsed, step + 1, self.total_step, (step + 1),
                         self.total_step, d_loss_real.item(),
                         self.G.attn1.gamma.mean().item(), self.G.attn2.gamma.mean().item()))

            # (1) Log values of the losses (scalars)
            info = {
                'd_loss_real': d_loss_real.item(),
                'd_loss_fake': d_loss_fake.item(),
                'd_loss': d_loss.item(),
                'g_loss_fake': g_loss_fake.item(),
                'ave_gamma_l3': self.G.attn1.gamma.mean().item(),
                'ave_gamma_l4': self.G.attn2.gamma.mean().item(),
            }

            for tag, value in info.items():
                self.logger.scalar_summary(tag, value, step + 1)

        # Sample images / Save and log
        if (step + 1) % self.sample_step == 0:

            # (2) Log values and gradients of the parameters (histogram)
            for net, name in zip([self.G, self.D], ['G_', 'D_']):
                for tag, value in net.named_parameters():
                    tag = name + tag.replace('.', '/')
                    self.logger.histo_summary(tag, self.to_np(value), step + 1)

            # (3) Log the images
            info = {

                'fake_images': self.to_np(fake_images.view(*display_vars)[:10, :, :, :]),
                'real_images': self.to_np(real_images.view(*display_vars)[:10, :, :, :]),
            }

            fake_images, _, _ = self.G(fixed_z)
            save_image(denorm(fake_images.data),
                       os.path.join(self.sample_path, '{}_fake.png'.format(step + 1)))

            info['fixed_fake_images'] = self.to_np(denorm(real_images.data).view(*display_vars)[:10, :, :, :])

            for tag, image in info.items():
                self.logger.image_summary(tag, image, step + 1)
JohnnyRisk commented 6 years ago

appologies please replace info['fixed_fake_images'] with the following line info['fixed_fake_images'] = self.to_np(denorm(fake_images.data).view(*display_vars)[:5, :, :, :])

dunkle commented 5 years ago

could share the code entirely?

dunkle commented 5 years ago

You can add a tensorboard logging to the train.py by inserting the following code at line 180. This needs to be modified because l4 does not exist if imsize is less than 64 and the network fails if imsize is greater than 64. When the larger imsize is fixed I will issue a pull request. Also build_tensorboard should be changed. Hope this helps!

def build_tensorboard(self):
    from logger import Logger
    if os.path.exists(self.log_path):
        shutil.rmtree(self.log_path)
    os.makedirs(self.log_path)
    self.logger = Logger(self.log_path)

Insert at line 180 in train.py

Print out log info

if (step + 1) % self.log_step == 0: elapsed = time.time() - start_time elapsed = str(datetime.timedelta(seconds=elapsed)) print("Elapsed [{}], G_step [{}/{}], D_step[{}/{}], d_out_real: {:.4f}, " " ave_gamma_l3: {:.4f}, ave_gamma_l4: {:.4f}". format(elapsed, step + 1, self.total_step, (step + 1), self.total_step, d_loss_real.item(), self.G.attn1.gamma.mean().item(), self.G.attn2.gamma.mean().item()))

            # (1) Log values of the losses (scalars)
            info = {
                'd_loss_real': d_loss_real.item(),
                'd_loss_fake': d_loss_fake.item(),
                'd_loss': d_loss.item(),
                'g_loss_fake': g_loss_fake.item(),
                'ave_gamma_l3': self.G.attn1.gamma.mean().item(),
                'ave_gamma_l4': self.G.attn2.gamma.mean().item(),
            }

            for tag, value in info.items():
                self.logger.scalar_summary(tag, value, step + 1)

        # Sample images / Save and log
        if (step + 1) % self.sample_step == 0:

            # (2) Log values and gradients of the parameters (histogram)
            for net, name in zip([self.G, self.D], ['G_', 'D_']):
                for tag, value in net.named_parameters():
                    tag = name + tag.replace('.', '/')
                    self.logger.histo_summary(tag, self.to_np(value), step + 1)

            # (3) Log the images
            info = {

                'fake_images': self.to_np(fake_images.view(*display_vars)[:10, :, :, :]),
                'real_images': self.to_np(real_images.view(*display_vars)[:10, :, :, :]),
            }

            fake_images, _, _ = self.G(fixed_z)
            save_image(denorm(fake_images.data),
                       os.path.join(self.sample_path, '{}_fake.png'.format(step + 1)))

            info['fixed_fake_images'] = self.to_np(denorm(real_images.data).view(*display_vars)[:10, :, :, :])

            for tag, image in info.items():
                self.logger.image_summary(tag, image, step + 1)

share the code entirely? thx