Arthur151 / ROMP

Monocular, One-stage, Regression of Multiple 3D People and their 3D positions & trajectories in camera & global coordinates. ROMP[ICCV21], BEV[CVPR22], TRACE[CVPR2023]
https://www.yusun.work/
Apache License 2.0
1.36k stars 231 forks source link

convert2fbx.py multiple person #500

Open ypilseong opened 10 months ago

ypilseong commented 10 months ago

The fbx_output_mmhuman3d file is a file that only processes one person. So, I modified it to a code that processes 3 people. Then, a total of 4 armatures are created, and all 4 objects act in the same pose. Can you advise me? My code is as below.


import os
import sys
import bpy
import time
# import joblib
import argparse
import numpy as np
# import addon_utils
from math import radians
from mathutils import Matrix, Vector, Quaternion, Euler
import pickle
import os.path
import pandas as pd

# Globals
male_model_path = 'data/body_models/smpl/SMPL_m_unityDoubleBlends_lbs_10_scale5_207_v1.0.0.fbx'
female_model_path = 'data/body_models/smpl/SMPL_f_unityDoubleBlends_lbs_10_scale5_207_v1.0.0.fbx'

bone_name_from_index = {
    0: 'Pelvis',
    1: 'L_Hip',
    2: 'R_Hip',
    3: 'Spine1',
    4: 'L_Knee',
    5: 'R_Knee',
    6: 'Spine2',
    7: 'L_Ankle',
    8: 'R_Ankle',
    9: 'Spine3',
    10: 'L_Foot',
    11: 'R_Foot',
    12: 'Neck',
    13: 'L_Collar',
    14: 'R_Collar',
    15: 'Head',
    16: 'L_Shoulder',
    17: 'R_Shoulder',
    18: 'L_Elbow',
    19: 'R_Elbow',
    20: 'L_Wrist',
    21: 'R_Wrist',
    22: 'L_Hand',
    23: 'R_Hand'
}

# Helper functions

# Computes rotation matrix through Rodrigues formula as in cv2.Rodrigues
# Source: smpl/plugins/blender/corrective_bpy_sh.py
def Rodrigues(rotvec):
    theta = np.linalg.norm(rotvec)
    r = (rotvec / theta).reshape(3, 1) if theta > 0. else rotvec
    cost = np.cos(theta)
    mat = np.asarray([[0, -r[2], r[1]],
                      [r[2], 0, -r[0]],
                      [-r[1], r[0], 0]])
    return (cost * np.eye(3) + (1 - cost) * r.dot(r.T) + np.sin(theta) * mat)

# Setup scene
def setup_scene(model_path, fps_target, person_ids):
    scene = bpy.data.scenes['Scene']

    # Set up a dictionary to keep track of armature objects for each person_id
    armatures = {}

    ###########################
    # Engine independent setup
    ###########################

    scene.render.fps = fps_target

    # Remove default cube
    if 'Cube' in bpy.data.objects:
        bpy.data.objects['Cube'].select_set(True)
        bpy.ops.object.delete()

    # Import gender specific .fbx template file
    for person_id in range(person_ids):
        # Append a unique identifier to the armature name based on person_id
        armature_name = f'Armature_{person_id}'
        bpy.ops.import_scene.fbx(filepath=model_path)

        # Rename the imported armature object
        armature = bpy.data.objects['Armature']
        armature.name = armature_name
        armatures[person_id] = armature
        armature.location = Vector((0, 0, person_id * 2))
    # Now, armatures is a dictionary where keys are person_ids and values are corresponding armature objects
    return armatures
# Process single pose into keyframed bone orientations
def process_pose(current_frame, pose, trans, person_id_, armature):

    # rod_rots = pose.reshape(24, 3)

    mat_rots = [Rodrigues(rod_rot) for rod_rot in pose]

    if person_id_ == 1:
        print(f"Processing pose for person_id 1 at frame {current_frame}")

        bones1 = armature.pose.bones
        bones1[bone_name_from_index[0]].location = Vector((100 * trans[1], 100 * trans[2], 100 * trans[0])) #- pelvis_position

        bones1['f_avg_root'].location = Vector(trans)
        bones1[bone_name_from_index[0]].keyframe_insert('location', frame=current_frame)

        for index, mat_rot in enumerate(mat_rots, 0):
            if index >= 24:
                continue

            bone1 = bones1[bone_name_from_index[index]]

            bone_rotation = Matrix(mat_rot).to_quaternion()
            quat_x_90_cw = Quaternion((1.0, 0.0, 0.0), radians(-90))
            quat_x_n135_cw = Quaternion((1.0, 0.0, 0.0), radians(-135))
            quat_x_p45_cw = Quaternion((1.0, 0.0, 0.0), radians(45))
            quat_y_90_cw = Quaternion((0.0, 1.0, 0.0), radians(-90))
            quat_z_90_cw = Quaternion((0.0, 0.0, 1.0), radians(-90))
            quat_z_180_cw = Quaternion((0.0, 0.0, 1.0), radians(-180))

            if index == 0:
            # Rotate pelvis so that avatar stands upright and looks along negative Y avis
            # bone.rotation_quaternion = bone_rotation
                bone1.rotation_quaternion = (quat_x_90_cw @ quat_z_180_cw) @ bone_rotation
            else:
                bone1.rotation_quaternion = bone_rotation

            bone1.keyframe_insert('rotation_quaternion', frame=current_frame)

        return
    elif person_id_ == 2:
        print(f"Processing pose for person_id 2 at frame {current_frame}")

        bones2 = armature.pose.bones
        bones2[bone_name_from_index[0]].location = Vector((100 * trans[1], 100 * trans[2], 100 * trans[0])) #- pelvis_position

        bones2['f_avg_root'].location = Vector(trans)
        bones2[bone_name_from_index[0]].keyframe_insert('location', frame=current_frame)

        for index, mat_rot in enumerate(mat_rots, 0):
            if index >= 24:
                continue

            bone2 = bones2[bone_name_from_index[index]]

            bone_rotation = Matrix(mat_rot).to_quaternion()
            quat_x_90_cw = Quaternion((1.0, 0.0, 0.0), radians(-90))
            quat_x_n135_cw = Quaternion((1.0, 0.0, 0.0), radians(-135))
            quat_x_p45_cw = Quaternion((1.0, 0.0, 0.0), radians(45))
            quat_y_90_cw = Quaternion((0.0, 1.0, 0.0), radians(-90))
            quat_z_90_cw = Quaternion((0.0, 0.0, 1.0), radians(-90))
            quat_z_180_cw = Quaternion((0.0, 0.0, 1.0), radians(-180))

            if index == 0:
            # Rotate pelvis so that avatar stands upright and looks along negative Y avis
            # bone.rotation_quaternion = bone_rotation
                bone2.rotation_quaternion = (quat_x_90_cw @ quat_z_180_cw) @ bone_rotation
            else:
                bone2.rotation_quaternion = bone_rotation

            bone2.keyframe_insert('rotation_quaternion', frame=current_frame)

        return
    else :
        print(f"Processing pose for person_id 0 at frame {current_frame}")
        bones = armature.pose.bones
        bones[bone_name_from_index[0]].location = Vector((100 * trans[1], 100 * trans[2], 100 * trans[0])) #- pelvis_position

        bones['f_avg_root'].location = Vector(trans)
        bones[bone_name_from_index[0]].keyframe_insert('location', frame=current_frame)

        for index, mat_rot in enumerate(mat_rots, 0):
            if index >= 24:
                continue

            bone = bones[bone_name_from_index[index]]

            bone_rotation = Matrix(mat_rot).to_quaternion()
            quat_x_90_cw = Quaternion((1.0, 0.0, 0.0), radians(-90))
            quat_x_n135_cw = Quaternion((1.0, 0.0, 0.0), radians(-135))
            quat_x_p45_cw = Quaternion((1.0, 0.0, 0.0), radians(45))
            quat_y_90_cw = Quaternion((0.0, 1.0, 0.0), radians(-90))
            quat_z_90_cw = Quaternion((0.0, 0.0, 1.0), radians(-90))
            quat_z_180_cw = Quaternion((0.0, 0.0, 1.0), radians(-180))

            if index == 0:
            # Rotate pelvis so that avatar stands upright and looks along negative Y avis
            # bone.rotation_quaternion = bone_rotation
                bone.rotation_quaternion = (quat_x_90_cw @ quat_z_180_cw) @ bone_rotation
            else:
                bone.rotation_quaternion = bone_rotation

            bone.keyframe_insert('rotation_quaternion', frame=current_frame)

        return

# Process all the poses from the pose file
def process_poses(
        input_path,
        gender,
        fps_source,
        fps_target,
        start_origin,
        person_id=1,
):
    print('Processing: ' + input_path)

    file_data = np.load(input_path, allow_pickle=True)

    body_pose = file_data['smpl'][()]['body_pose']
    global_orient = file_data['smpl'][()]['global_orient'].reshape(-1, 1, 3)
    pred_cams = file_data['pred_cams'].reshape(-1, 3)
    person_ids = file_data['person_id']
    # keypoints_3d = file_data['keypoints_3d'][()]

    poses = []
    poses1 = []
    poses2 =[]
    trans = []
    for i in range(body_pose.shape[0]):
        if person_ids[i] == 1:
            poses1.append(np.concatenate([global_orient[i], body_pose[i]], axis=0))
        elif person_ids[i] == 2:
            poses2.append(np.concatenate([global_orient[i], body_pose[i]], axis=0))
        else:
            poses.append(np.concatenate([global_orient[i], body_pose[i]], axis=0))
    # for i in range(body_pose.shape[0]):
    #     poses.append(np.concatenate([global_orient[i], body_pose[i]], axis=0))
    #     #trans.append((keypoints_3d[i][8] + keypoints_3d[i][11])/2 * 5)
        trans.append(pred_cams[i])

    poses = np.array(poses)
    poses1 = np.array(poses1)
    poses2 = np.array(poses2)
    trans = np.array(trans)

    # trans = np.zeros((poses.shape[0], 3))

    if gender == 'female':
        model_path = female_model_path
        for k, v in bone_name_from_index.items():
            bone_name_from_index[k] = 'f_avg_' + v
    elif gender == 'male':
        model_path = male_model_path
        for k, v in bone_name_from_index.items():
            bone_name_from_index[k] = 'm_avg_' + v
    else:
        print('ERROR: Unsupported gender: ' + gender)
        sys.exit(1)

    # Limit target fps to source fps
    if fps_target > fps_source:
        fps_target = fps_source

    print(f'Gender: {gender}')
    print(f'Number of source poses: {str(poses.shape[0])}')
    print(f'Source frames-per-second: {str(fps_source)}')
    print(f'Target frames-per-second: {str(fps_target)}')
    print('--------------------------------------------------')

    armatures = setup_scene(model_path, fps_target, len(set(person_ids)))

    scene = bpy.data.scenes['Scene']
    sample_rate = int(fps_source / fps_target)
    scene.frame_end = (int)(poses.shape[0] / sample_rate)

    # Retrieve pelvis world position.
    # Unit is [cm] due to Armature scaling.
    # Need to make copy since reference will change when bone location is modified.
    bpy.ops.object.mode_set(mode='EDIT')
    #pelvis_position = Vector(bpy.data.armatures[0].edit_bones[bone_name_from_index[0]].head)
    bpy.ops.object.mode_set(mode='OBJECT')

    source_index = 0
    frame = 1
    source_index_0 = 0
    source_index_1 = 0
    source_index_2 = 0
    offset = np.array([0.0, 0.0, 0.0])

    while source_index < poses.shape[0]:
        print('Adding pose: ' + str(source_index))

        if start_origin:
            if source_index == 0:
                offset = np.array([trans[source_index][0], trans[source_index][1], 0])

    # Go to new frame
        scene.frame_set(frame)
        if person_ids[source_index] == 6:
            source_index += sample_rate
            frame += 1
            continue

        armature = armatures[person_ids[source_index]]
        if person_ids[source_index] == 1:

            process_pose(frame, poses1[source_index_1], (trans[source_index] - offset),  person_ids[source_index], armature)
            source_index += sample_rate
            source_index_1 += sample_rate
            frame += 1

        elif person_ids[source_index] == 2:

            process_pose(frame, poses2[source_index_2], (trans[source_index] - offset),  person_ids[source_index], armature)
            source_index += sample_rate
            source_index_2 += sample_rate
            frame += 1

        else:

            process_pose(frame, poses[source_index_0], (trans[source_index] - offset),  person_ids[source_index], armature)
            source_index_0 += sample_rate
            source_index += sample_rate
            frame += 1

def export_animated_mesh(output_path):
    # Create output directory if needed
    output_dir = os.path.dirname(output_path)
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir, exist_ok=True)

    # Select only skinned mesh and rig
    bpy.ops.object.select_all(action='DESELECT')
    bpy.data.objects['Armature_0'].select_set(True)
    bpy.data.objects['Armature_0'].children[0].select_set(True)
    bpy.data.objects['Armature_1'].select_set(True)
    bpy.data.objects['Armature_1'].children[0].select_set(True)
    bpy.data.objects['Armature_2'].select_set(True)
    bpy.data.objects['Armature_2'].children[0].select_set(True)
    bpy.data.objects['Armature_3'].select_set(True)
    bpy.data.objects['Armature_3'].children[0].select_set(True)

    if output_path.endswith('.glb'):
        print('Exporting to glTF binary (.glb)')
        # Currently exporting without shape/pose shapes for smaller file sizes
        bpy.ops.export_scene.gltf(filepath=output_path, export_format='GLB', export_selected=True, export_morph=False)
    elif output_path.endswith('.fbx'):
        print('Exporting to FBX binary (.fbx)')
        bpy.ops.export_scene.fbx(filepath=output_path, use_selection=True, add_leaf_bones=False)
    else:
        print('ERROR: Unsupported export format: ' + output_path)
        sys.exit(1)

    return

if __name__ == '__main__':
    try:

        input_path = 'demo_result/inference_result_multi9.npz'
        output_path = 'test_trans6.fbx'
        fps_source = 30
        fps_target = 30
        gender = 'female'
        start_origin = 1
        person_id = 0

        startTime = time.perf_counter()

        # Process data
        cwd = os.getcwd()

        # Turn relative input/output paths into absolute paths
        if not input_path.startswith(os.path.sep):
            input_path = os.path.join(cwd, input_path)

        if not output_path.startswith(os.path.sep):
            output_path = os.path.join(cwd, output_path)

        print('Input path: ' + input_path)
        print('Output path: ' + output_path)

        if not (output_path.endswith('.fbx') or output_path.endswith('.glb')):
            print('ERROR: Invalid output format (must be .fbx or .glb)')
            sys.exit(1)

        # Process pose file
        if input_path.endswith('.npz'):
            if not os.path.isfile(input_path):
                print('ERROR: Invalid input file')
                sys.exit(1)

            poses_processed = process_poses(
                input_path=input_path,
                gender=gender,
                fps_source=fps_source,
                fps_target=fps_target,
                start_origin=start_origin,
                person_id=person_id
            )
            export_animated_mesh(output_path)

        print('--------------------------------------------------')
        print('Animation export finished.')
        print(f'Poses processed: {str(poses_processed)}')
        print(f'Processing time : {time.perf_counter() - startTime:.2f} s')
        print('--------------------------------------------------')
        sys.exit(0)

    except SystemExit as ex:
        if ex.code is None:
            exit_status = 0
        else:
            exit_status = ex.code

        print('Exiting. Exit status: ' + str(exit_status))

        # Only exit to OS when we are not running in Blender GUI
        if bpy.app.background:
            sys.exit(exit_status)