From 583656322fbb720f2ae570f5acca9845f266fbd7 Mon Sep 17 00:00:00 2001 From: liangyuxuan <16126883+liangyuxuan123@user.noreply.gitee.com> Date: Tue, 18 Nov 2025 17:51:52 +0800 Subject: [PATCH] 1 --- .../VisionDetect/utils/calibration_tools.py | 19 ++-- .../VisionDetect/utils/data_tools.py | 34 +++++-- .../utils/error_calculation_tool.py | 89 +++++++++++++++++++ 3 files changed, 131 insertions(+), 11 deletions(-) create mode 100644 vision_detect/vision_detect/VisionDetect/utils/error_calculation_tool.py diff --git a/vision_detect/vision_detect/VisionDetect/utils/calibration_tools.py b/vision_detect/vision_detect/VisionDetect/utils/calibration_tools.py index 150c5d2..2aa72fe 100644 --- a/vision_detect/vision_detect/VisionDetect/utils/calibration_tools.py +++ b/vision_detect/vision_detect/VisionDetect/utils/calibration_tools.py @@ -1,6 +1,7 @@ import math import json import logging +import argparse import numpy as np import transforms3d as tfs @@ -252,10 +253,18 @@ def calculate_hand_eye_matrix( a = _Calibration(mode, input_format, hand_pose_path, camera_pose_path, save_path) if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--mode", type=str, required=True) + parser.add_argument("--hand_path", type=str, required=True) + parser.add_argument("--camera_path", type=str, required=True) + parser.add_argument("--save_path", type=str, required=True) + parser.add_argument("--input_format", type=str, default="quat") + args = parser.parse_args() + calculate_hand_eye_matrix( - mode= "eye_in_hand", - input_format= "quat", - hand_pose_path="", - camera_pose_path="", - save_path="" + mode= args.mode, + input_format= args.input_format, + hand_pose_path=args.hand_path, + camera_pose_path=args.camera_path, + save_path=args.save_path ) diff --git a/vision_detect/vision_detect/VisionDetect/utils/data_tools.py b/vision_detect/vision_detect/VisionDetect/utils/data_tools.py index a0d85d4..1d91af9 100644 --- a/vision_detect/vision_detect/VisionDetect/utils/data_tools.py +++ b/vision_detect/vision_detect/VisionDetect/utils/data_tools.py @@ -1,19 +1,28 @@ +import argparse import json import transforms3d as tfs -def prepare_data(_data:list[float], save_path: str = "camera_pose_data.json", format: str = "quat", mm2m: bool = False): +def prepare_data(_data:list[float], save_path: str = "camera_pose_data.json", format: str = "quat_xyzw", mm2m: bool = False): new_list = [] - if format == "quat": + if format == "quat_wxyz": for i in range(0, len(_data), 7): - if mm2m: + if not mm2m: new = [_data[i], _data[i + 1], _data[i + 2], _data[i + 3], _data[i + 4], _data[i + 5], _data[i + 6]] else: new = [_data[i]/1e3, _data[i + 1]/1e3, _data[i + 2]/1e3, _data[i + 6], _data[i + 3], _data[i + 4], _data[i + 5], _data[i + 6]] new_list.extend(new) + elif format == "quat_xyzw": + for i in range(0, len(_data), 7): + if not mm2m: + new = [_data[i], _data[i + 1], _data[i + 2], _data[i + 6], _data[i + 3], _data[i + 4], _data[i + 5]] + else: + new = [_data[i] / 1e3, _data[i + 1] / 1e3, _data[i + 2] / 1e3, _data[i + 6], _data[i + 6], _data[i + 3], _data[i + 4], _data[i + 5]] + new_list.extend(new) + elif format == "euler": for i in range(0, len(_data), 6): rw, rx, ry, rz = tfs.euler.euler2quat(_data[i + 3], _data[i + 4], _data[i + 5], axes='sxyz') - if mm2m: + if not mm2m: new = [_data[i], _data[i + 1], _data[i + 2], rw, rx, ry, rz] else: new = [_data[i]/1e3, _data[i + 1]/1e3, _data[i + 2]/1e3, rw, rx, ry, rz] @@ -21,8 +30,8 @@ def prepare_data(_data:list[float], save_path: str = "camera_pose_data.json", fo elif format == "rotvertor": for i in range(0, len(_data), 6): - tfs.quaternions.rotvec2quat - if mm2m: + # tfs.quaternions.rotvec2quat + if not mm2m: new = [_data[i], _data[i + 1], _data[i + 2], _data[i + 3], _data[i + 4], _data[i + 5]] else: new = [_data[i]/1e3, _data[i + 1]/1e3, _data[i + 2]/1e3, _data[i + 6], _data[i + 3], _data[i + 4], _data[i + 5]] @@ -30,3 +39,16 @@ def prepare_data(_data:list[float], save_path: str = "camera_pose_data.json", fo else: pass with open(save_path, 'w') as f: json.dump(new_list, f, indent=4) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--data_path", type=str, required=True) + parser.add_argument("--save_path", type=str, required=True) + parser.add_argument("--format", type=str, default="quat_xyzw") + parser.add_argument("--mm2m", type=bool, default=False) + args = parser.parse_args() + + with open(args.data_path, "r") as f: + _data = json.load(f) + + prepare_data(_data, args.save_path, args.format, args.mm2m) diff --git a/vision_detect/vision_detect/VisionDetect/utils/error_calculation_tool.py b/vision_detect/vision_detect/VisionDetect/utils/error_calculation_tool.py new file mode 100644 index 0000000..56a53c9 --- /dev/null +++ b/vision_detect/vision_detect/VisionDetect/utils/error_calculation_tool.py @@ -0,0 +1,89 @@ +import argparse + +import numpy as np +import transforms3d as tfs +import json + + +def get_matrix_euler_radu(x, y, z, rx, ry, rz): + """从欧拉角构建齐次变换矩阵""" + '''构造旋转矩阵''' + rmat = tfs.euler.euler2mat( + # math.radians(rx), math.radians(ry), math.radians(rz) + rx, ry, rz + ) + + """构造齐次变换矩阵""" + rmat = tfs.affines.compose( + np.squeeze(np.asarray((x, y, z))), + rmat, + [1, 1, 1] + ) + return rmat + + +def get_matrix_quat(x, y, z, rw, rx, ry, rz): + """从单位四元数构建齐次变换矩阵""" + '''构造旋转矩阵''' + q = [rw, rx, ry, rz] + rmat = tfs.quaternions.quat2mat(q) + + """构造齐次变换矩阵""" + rmat = tfs.affines.compose( + np.squeeze(np.asarray((x, y, z))), + rmat, + [1, 1, 1] + ) + return rmat + +def calculate(mode, hand_path, camera_path, hand_eye_path): + with open(camera_path, 'r', encoding='utf-8') as f: + camera_all = json.load(f) + + with open(hand_path, 'r', encoding='utf-8') as f: + hand_all = json.load(f) + + with open(hand_eye_path, 'r', encoding='utf-8') as f: + data = json.load(f) + T_cam_in_hand = data["T"] + + sum_ = int(len(hand_all) / 7) + T_list = [] + for i in range(sum_): + hand = [hand_all[i*7], hand_all[i*7+1], hand_all[i*7+2], hand_all[i*7+3], hand_all[i*7+4], hand_all[i*7+5], hand_all[i*7+6]] + camera = [camera_all[i*7], camera_all[i*7+1], camera_all[i*7+2], camera_all[i*7+3], camera_all[i*7+4], camera_all[i*7+5], camera_all[i*7+6]] + T_hand_in_base = get_matrix_quat(hand[0], hand[1], hand[2], hand[3], hand[4], hand[5], hand[6]) + T_cal_in_camera = get_matrix_quat(camera[0], camera[1], camera[2], camera[3], camera[4], camera[5], camera[6]) + + if mode == "eye_to_hand": + T = np.linalg.inv(T_hand_in_base) @ T_cam_in_hand @ T_cal_in_camera + else: + T = T_hand_in_base @ T_cam_in_hand @ T_cal_in_camera + T_list.append(T) + print(T) + + sum_sub = np.zeros_like(T_list[0]) + for i in range(len(T_list)): + for j in range(i + 1, len(T_list)): + d = np.subtract(T_list[i], T_list[j]) + sum_sub = np.add(sum_sub, d) + + len_sub = len(T_list) * (len(T_list) - 1) / 2 + + p = np.divide(sum_sub, len_sub) + p[0:3 ,3:4] = p[0:3 ,3:4] * 1000 + np.set_printoptions(suppress=True, floatmode='fixed', precision=8) + print(p) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("mode", type=str) + parser.add_argument("hand_path", type=str) + parser.add_argument("camera_path", type=str) + parser.add_argument("hand_eye_path", type=str) + args = parser.parse_args() + + calculate(args.mode, args.hand_path, args.camera_path, args.hand_eye_path) + +