Open h-cha opened 1 year ago
data/230629_remocon1/transforms_charuco3.json
unity_generate_transform_cv2.py
unity_rob_q, unity_rob_pos = read_data()
# unity -> openCV の座標系に変換
right_q, right_pos = left_to_right(unity_rob_q, unity_rob_pos)
# quaternionから回転行列を生成
right_rot = q_to_mat(right_q)
opencv_rt = mat4x4(right_rot, right_pos)
w_rt= np.asarray(opencv_rt)
frame_dict["transform_matrix"] = w_rt
# opencv->openGL座標変換(y,z反転)
flip_mat = np.array([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]
])
for f in transform_dict["frames"]:
f["transform_matrix"] = np.matmul(f["transform_matrix"], flip_mat) # flip cameras (it just works)
データから被写体を真ん中になるようにする正規化やってない
data/230629_remocon1/transforms_cv2.json
unity_generate_transform_cv3.py
unity_rob_q, unity_rob_pos = read_data()
# unity -> openCV の座標系に変換
right_q, right_pos = left_to_right(unity_rob_q, unity_rob_pos)
# quaternionから回転行列を生成
right_rot = q_to_mat(right_q)
# 逆行列生成 camera->wold座標変換
w_rotmat = right_rot.T
w_tvec = -(right_rot.T).dot(right_pos)
opencv_rt = mat4x4(w_rotmat, w_tvec)
# <class 'numpy.matrix'> から<class 'numpy.ndarray'>変換
w_rt= np.asarray(opencv_rt)
frame_dict["transform_matrix"] = w_rt
# opencv->openGL座標変換(y,z反転)
flip_mat = np.array([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]
])
for f in transform_dict["frames"]:
f["transform_matrix"] = np.matmul(f["transform_matrix"], flip_mat) # flip cameras (it just works)
データから被写体を真ん中になるようにする正規化やってない
data/230629_remocon1/transforms_cv3.json
unity_generate_transform_cv4.py
unity_rob_q, unity_rob_pos = read_data()
# unity -> openCV の座標系に変換
right_q, right_pos = left_to_right(unity_rob_q, unity_rob_pos)
# quaternionから回転行列を生成
right_rot = q_to_mat(right_q)
# 逆行列生成 camera->wold座標変換
w_rotmat = right_rot.T
w_tvec = -(right_rot.T).dot(right_pos)
opencv_rt = mat4x4(w_rotmat, w_tvec)
# <class 'numpy.matrix'> から<class 'numpy.ndarray'>変換
w_rt= np.asarray(opencv_rt)
frame_dict["transform_matrix"] = w_rt
print("flip coordinates ...")
for f in transform_dict["frames"]:
c2w = f["transform_matrix"]
c2w[0:3, 2] *= -1 # flip the y and z axis
c2w[0:3, 1] *= -1
c2w = c2w[[1, 0, 2, 3], :] # swap y and z
c2w[2, :] *= -1 # flip whole world upside down
print("computing center of attention...")
# 正規化コード以下略
# unity (左手系)-> opencv (右手系)
def left_to_right(quaternion, position):
# y軸反転
quaternion[1] = -quaternion[1]
position[1] = -position[1]
return quaternion, position
# quaternion (右手系)-> 回転行列(右手系)
def q_to_mat(quaternion):
# quat = np.array([0. , 0.67385289, 0.44923526, 0.58660887])
rot = Rotation.from_quat(quaternion)
# 回転行列に変換
rot = rot.as_matrix()
return rot
# rot(mat3x3), pos(x,y,z)回転行列作成
def mat4x4(rot, position):
mat = np.zeros((4, 4))
mat[0:3, 0:3] = rot
mat[0:3, 3] = position
mat[3, 3] = 1
return mat
データから被写体を真ん中になるようにする正規化した
data/230629_remocon1/transforms_cv4.json
カメラ外部パラメータとは カメラ座標系から世界座標系への変換 ~外部パラメータの逆変換~
# 逆行列生成 camera->wold座標変換
w_rotmat = right_rot.T
w_tvec = -(right_rot.T).dot(right_pos)
charuco_estimate.py
を使ってみる