Open abrar-khan-368 opened 1 month ago
First of all, the dataset I used is glb, is your dataset yes?
yes consists glb files.
can i know which blender version are you using, i used 3.2.0
I installed blender directly with pip, blender=1.4
`def save_images(object_file: str) -> None: os.makedirs(args.output_dir, exist_ok=True) reset_scene() load_object(object_file) object_uid = os.path.basename(object_file).split(".")[0] normalize_scene(box_scale=2) add_lighting(option='random') camera, cam_constraint = setup_camera()
empty = bpy.data.objects.new("Empty", None)
scene.collection.objects.link(empty)
cam_constraint.target = empty
img_dir = os.path.join(args.output_dir, object_uid)
os.makedirs(img_dir, exist_ok=True)
# Prepare to save camera parameters
cam_params = {
"intrinsics": get_calibration_matrix_K_from_blender(camera.data, return_principles=True),
"poses": []
}
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
for n in tree.nodes:
tree.nodes.remove(n)
bpy.context.scene.view_layers["ViewLayer"].use_pass_normal = True
bpy.context.scene.view_layers["ViewLayer"].use_pass_z = True
rl = tree.nodes.new(type='CompositorNodeRLayers')
composite = tree.nodes.new(type='CompositorNodeComposite')
links.new(rl.outputs['Image'], composite.inputs['Image'])
image_save = tree.nodes.new(type='CompositorNodeOutputFile')
links.new(rl.outputs['Image'], image_save.inputs['Image'])
# 设置深度节点
depth = tree.nodes.new(type="CompositorNodeMapValue")
depth.offset = [-0.7]
depth.size = [0.7]
depth.use_min = True
depth.min = [0]
depth.use_max = True
depth.max = [255]
links.new(rl.outputs['Depth'], depth.inputs[0])
depth_out = tree.nodes.new(type="CompositorNodeComposite")
links.new(depth.outputs[0], depth_out.inputs[0])
depth_save = tree.nodes.new(type="CompositorNodeOutputFile")
links.new(rl.outputs['Depth'], depth_save.inputs['Image'])
# 设置法线节点
normal = tree.nodes.new(type="CompositorNodeNormalize")
links.new(rl.outputs['Normal'], normal.inputs[0])
normal_out = tree.nodes.new(type="CompositorNodeComposite")
links.new(normal.outputs[0], normal_out.inputs[0])
normal_save = tree.nodes.new(type="CompositorNodeOutputFile")
links.new(rl.outputs['Normal'], normal_save.inputs['Image'])
for i in range(args.num_images):
render_path = os.path.join(img_dir, f"{i:03d}.png")
image_save.base_path = img_dir
image_save.file_slots[0].path= f"{i:03d}_image.png"
image_save.format.file_format='PNG'
depth_save.base_path = img_dir
depth_save.file_slots[0].path= f"{i:03d}_depth.png"
depth_save.format.file_format='PNG'
normal_save.base_path = img_dir
normal_save.file_slots[0].path= f"{i:03d}_normal.png"
normal_save.format.file_format='PNG'
# Set the camera position
camera_option = 'random' if i > 0 else 'front'
camera = set_camera_location(camera, option=camera_option)
bpy.ops.render.render(write_still=True)
# # Render the color image
# render_path = os.path.join(img_dir, f"{i:03d}.png")
# scene.render.filepath = render_path
# bpy.ops.render.render(write_still=True)
# # Save camera RT matrix (C2W)
# location, rotation = camera.matrix_world.decompose()[0:2]
# RT = compose_RT(rotation.to_matrix(), np.array(location))
# cam_params["poses"].append(RT)
# # Render the normal image
# bpy.context.scene.view_layers["ViewLayer"].use_pass_normal = True
# #bpy.context.view_layer.use_pass_normal = True
# normal_path = os.path.join(img_dir, f"{i:03d}_normal.png")
# scene.render.filepath = normal_path
# bpy.ops.render.render(write_still=True)
# bpy.context.scene.view_layers["ViewLayer"].use_pass_normal = False
# #bpy.context.view_layer.use_pass_normal = False
# # Render the depth image
# bpy.context.scene.view_layers["ViewLayer"].use_pass_z = True
# #bpy.context.view_layer.use_pass_z = True
# depth_path = os.path.join(img_dir, f"{i:03d}_depth.png")
# scene.render.filepath = depth_path
# bpy.ops.render.render(write_still=True)
# bpy.context.scene.view_layers["ViewLayer"].use_pass_z = False
# #bpy.context.view_layer.use_pass_z = False
# Save camera intrinsics and poses
np.savez(os.path.join(img_dir, 'camera.npz'), **cam_params)`
**Solved the issue by modifying the code in save_images**
Also, i need help in training the instant mesh with my own data. I prepared the data with your script but there's a requirement for filtered_obj_name.json. I checked the paper, it said following criteria: (i) objects without texture maps, (ii) objects with rendered images occupying less than 10% of the view from any angle, (iii) including multiple separate objects, (iv) objects with no caption information provided by the Cap3D dataset, and (v) low-quality objects
i m not sure the json format of this file, can you share any information about this?
Hello, I use your google drive glb data to test your render script, but i got black RGB and black depth image. I use following installation:
pip install blender==1.4 pip install bpy==2.91a0 && bpy_post_install
I meet the same issue, the RGB and depth image are black. I use blender 2.9.0-linux, please hlep, thank you! @Mrguanglei
While rendering the objects, i am getting the black image for RGB images, is there anything wrong am i doing? Followed the instructions mentioned on the readme file.