NVlabs / nvdiffrast

Nvdiffrast - Modular Primitives for High-Performance Differentiable Rendering
Other
1.42k stars 157 forks source link

Sample scripts to reproduce the results in the documentation #195

Closed wuzirui closed 3 months ago

wuzirui commented 4 months ago

Hi! Thanks for making this work open-sourced. I wonder whether you can share the scripts to produce the results of using the "Spot" mesh? I am having some problems regarding texture mapping, so it would be very helpful if you could share them. Thanks!

image
s-laine commented 4 months ago

Here's the script used for generating these two output images:

import numpy as np
import torch
import imageio
import util
import nvdiffrast.torch as dr

#----------------------------------------------------------------------------

def rotate_z(a):
    s, c = np.sin(a), np.cos(a)
    return np.array([[ c, s, 0, 0],
                     [-s, c, 0, 0],
                     [ 0, 0, 1, 0],
                     [ 0, 0, 0, 1]]).astype(np.float32)

#----------------------------------------------------------------------------

def load_obj(filename, normalization=False):
    """
    Load Wavefront .obj file.
    This function only supports vertices (v x x x) and faces (f x x x).
    """

    # load vertices
    vertices  = []
    texcoords = []

    with open(filename) as f:
        lines = f.readlines()

    for line in lines:
        if len(line.split()) == 0:
            continue
        if line.split()[0] == 'v':
            vertices.append([float(v) for v in line.split()[1:4]])
        elif line.split()[0] == 'vt':
            texcoords.append([float(v) for v in line.split()[1:3]])

    vertices  = np.array(vertices)
    texcoords = np.array(texcoords)

    # load faces
    faces = []
    tfaces = []
    for line in lines:
        if len(line.split()) == 0:
            continue
        if line.split()[0] == 'f':
            vs = line.split()[1:]
            nv = len(vs)
            vv = vs[0].split('/')
            v0 = int(vv[0])
            t0 = int(vv[1]) if len(vv) > 1 else -1
            for i in range(nv - 2):
                vv = vs[i + 1].split('/')
                v1 = int(vv[0])
                t1 = int(vv[1]) if len(vv) > 1 else -1
                vv = vs[i + 2].split('/')
                v2 = int(vv[0])
                t2 = int(vv[1]) if len(vv) > 1 else -1
                faces.append((v0, v1, v2))
                tfaces.append((t0, t1, t2))
    faces = np.array(faces, dtype=np.uint32)-1  # 1-based index -> 0-based
    tfaces = np.array(tfaces, dtype=np.uint32)-1
    return vertices, texcoords, faces, tfaces

#----------------------------------------------------------------------------

def img_spot():
    resolution = [256, 256]
    crop_slice = np.s_[5:5+220, 25:25+220, :3]

    pos, uv, tri_pos, tri_uv = load_obj("spot_triangulated.obj")
    pos = np.concatenate([pos, np.ones_like(pos[:, :1])], axis=1)
    print("pos:    ", pos.shape)
    print("uv:     ", uv.shape)
    print("tri_pos:", tri_pos.shape)
    print("tri_uv: ", tri_uv.shape)
    tri_pos = tri_pos.astype(np.int32)
    tri_uv = tri_uv.astype(np.int32)

    tex = imageio.v2.imread("spot_texture.png")[::-1].astype(np.float32) / 255.0
    tex = torch.as_tensor(tex, dtype=torch.float32, device='cuda')

    pos     = torch.as_tensor(pos,     dtype=torch.float32, device='cuda')
    uv      = torch.as_tensor(uv,      dtype=torch.float32, device='cuda')
    tri_pos = torch.as_tensor(tri_pos, dtype=torch.int32,   device='cuda')
    tri_uv  = torch.as_tensor(tri_uv,  dtype=torch.int32,   device='cuda')

    glctx = dr.RasterizeCudaContext()

    # Modelview and modelview + projection matrices.
    proj  = util.projection(x=0.25, n=1.0, f=20.0)
    r_mv  = util.translate(0, 0, -5.0)
    r_mv  = np.matmul(r_mv, rotate_z(0.1))
    r_mv  = np.matmul(r_mv, util.rotate_y(2.5))
    r_mv  = np.matmul(r_mv, util.rotate_x(0.1))
    r_mvp = np.matmul(proj, r_mv).astype(np.float32)
    r_mvp = torch.as_tensor(r_mvp, dtype=torch.float32, device='cuda')

    cpos = torch.matmul(r_mvp, pos.t()).t()[None, ...] # Add the minibatch axis.
    cpos = cpos.contiguous()
    rast_out, rast_out_db = dr.rasterize(glctx, cpos, tri_pos, resolution=resolution)
    attr_out, attr_out_da = dr.interpolate(uv[None, ...], rast_out, tri_uv, rast_db=rast_out_db, diff_attrs='all')
    tex_out = dr.texture(tex[None, ...], attr_out, attr_out_da, filter_mode='linear-mipmap-linear')
    texw_out = torch.where(rast_out[..., 3:] > 0, tex_out, torch.tensor(1.0).cuda())

    def save(fn, x):
        print(f"Saving '{fn}'")
        x = x[0].detach().cpu().numpy()[::-1]
        x = x[crop_slice]
        util.save_image(fn, x)

    save('spot_tex.png', tex_out)
    save('spot_texw.png', texw_out)

#----------------------------------------------------------------------------

if __name__ == "__main__":
    img_spot()

#----------------------------------------------------------------------------

You'll have to take util.py from the samples/torch directory and the Spot model files spot_triangulated.obj and spot_texture.png from Keenan Crane's spot.zip that can be downloaded here. Then you can run the script to produce spot_tex.png and spot_texw.png that correspond to the textured output images.

Thanos-DB commented 1 week ago

May I suggest adding this script to the README? It would be very helpful as a quick-start guide for new users. Or even a getting_started.ipynb. I could create a pull-request if you want me to.

s-laine commented 1 week ago

Good idea, I'll add this in the readme one way or another at the next update.