Open SandroChen opened 3 years ago
Since G.img_resolution
is not something you can change, as it's saved when the model is instantiated and trained, I find it easier to simply resize the generated image to the size you desire using, e.g., PIL.Image
:
from PIL import Image
import torch
import dnnlib, legacy
def load_model(device, url):
with dnnlib.util.open_url(url) as f:
generator = legacy.load_network_pkl(f)['G_ema'].to(device)
return generator
if name == 'main':
device = 'cuda'
G = load_model(device, './model_zoo/ffhq_stylegan3.pkl')
z = torch.randn(1, 512).to(device)
ws = G.mapping(z, 0, truncation_psi=0.6)
imgs = G.synthesis(ws)
imgs = ((imgs.permute(0, 2, 3, 1)+1)*127.5).clamp(0, 255).to(torch.uint8).cpu().numpy() # NCWH => NWHC and [-1.0, 1.0] -> [0, 255]
print(imgs.shape) # Should be [1, 1024, 1024, 3]
img = Image.fromarray(imgs[0], 'RGB').resize(size=(512, 512), resample=Image.LANCZOS) # or you can center-crop
Then you can save img
or do what you want with it.
I see the parameter block_resolutions is removed in stylegan3 and I cannot find a way to set the output size. Setting img_resolution doesn't work as showed below:
import torch import dnnlib, legacy
def load_model(device, url): with dnnlib.util.open_url(url) as f: generator = legacy.load_network_pkl(f)['G_ema'].to(device) return generator
if name == 'main': device = 'cuda' G = load_model(device, './model_zoo/ffhq_stylegan3.pkl') G.img_resolution = 512 z = torch.randn(1, 512).to(device) ws = G.mapping(z, 0, truncation_psi=0.6) imgs = G.synthesis(ws) imgs = ((imgs.permute(0, 2, 3, 1)+1)*127.5).clamp(0, 255).to(torch.uint8).cpu().numpy() print(imgs.shape)