diff --git a/vision_detect/vision_detect/VisionDetect/utils/calculate_tools.py b/vision_detect/vision_detect/VisionDetect/utils/calculate_tools.py index 712faff..dacc50a 100644 --- a/vision_detect/vision_detect/VisionDetect/utils/calculate_tools.py +++ b/vision_detect/vision_detect/VisionDetect/utils/calculate_tools.py @@ -9,254 +9,6 @@ import transforms3d as tfs from .object_icp import object_icp -def _get_matrix_quat(x, y, z, rw, rx, ry, rz): - """从单位四元数构建齐次变换矩阵""" - '''构造旋转矩阵''' - q = [rw, rx, ry, rz] - rmat = tfs.quaternions.quat2mat(q) - - """构造齐次变换矩阵""" - rmat = tfs.affines.compose( - np.squeeze(np.asarray((x, y, z))), - rmat, - [1, 1, 1] - ) - return rmat - - -def _get_matrix_eular_radu(x, y, z, rx, ry, rz): - """从欧拉角构建齐次变换矩阵""" - '''构造旋转矩阵''' - rmat = tfs.euler.euler2mat( - # math.radians(rx), math.radians(ry), math.radians(rz) - rx, ry, rz - ) - - """构造齐次变换矩阵""" - rmat = tfs.affines.compose( - np.squeeze(np.asarray((x, y, z))), - rmat, - [1, 1, 1] - ) - return rmat - - -def _get_matrix_rotvector(x, y, z, rx, ry, rz): - """从旋转向量构建齐次变换矩阵""" - '''构造旋转矩阵''' - rvec = np.array([rx, ry, rz]) - theta = np.linalg.norm(rvec) - if theta < 1e-8: - rmat = np.eye(3) # 小角度直接返回单位矩阵 - else: - axis = rvec / theta - rmat = tfs.axangles.axangle2mat(axis, theta) - - """构造齐次变换矩阵""" - rmat = tfs.affines.compose( - np.squeeze(np.asarray((x, y, z))), - rmat, - [1, 1, 1] - ) - return rmat - - -def _skew(v): - return np.array([[0, -v[2], v[1]], - [v[2], 0, -v[0]], - [-v[1], v[0], 0]]) - - -def _R2P(T): - """旋转矩阵 --> 修正罗德里格斯向量""" - axis, angle= tfs.axangles.mat2axangle(T[0:3, 0:3]) - P = 2 * math.sin(angle / 2) * axis - return P - - -def _P2R(P): - """修正罗德里格斯向量 --> 旋转矩阵""" - P2 = np.dot(P.T, P) - part_1 = (1 - P2 / 2) * np.eye(3) - part_2 = np.add(np.dot(P, P.T), np.sqrt(4- P2) * _skew(P.flatten().T)) - R = np.add(part_1, np.divide(part_2, 2)) - return R - - -def _calculate_(Hgs, Hcs): - """计算标定矩阵""" - # H代表矩阵、h代表标量 - Hgijs = [] - Hcijs = [] - A = [] - B = [] - size = 0 - for i in range(len(Hgs)): - for j in range(i + 1, len(Hgs)): - size += 1 - Hgij = np.dot(np.linalg.inv(Hgs[j]), Hgs[i]) - Hgijs.append(Hgij) - Pgij = np.dot(2, _R2P(Hgij)) - - Hcij = np.dot(Hcs[j], np.linalg.inv(Hcs[i])) - Hcijs.append(Hcij) - Pcij = np.dot(2, _R2P(Hcij)) - - A.append(_skew(np.add(Pgij, Pcij))) - B.append(np.subtract(Pcij, Pgij).reshape(3, 1)) - - MA = np.vstack(A) - MB = np.vstack(B) - Pcg_ = np.dot(np.linalg.pinv(MA), MB) - pcg = np.sqrt(np.add(1, np.dot(Pcg_.T, Pcg_))) - Pcg = np.dot(np.dot(2, Pcg_), np.linalg.inv(pcg)) - Rcg = _P2R(Pcg).reshape(3, 3) - - A = [] - B = [] - id = 0 - for i in range(len(Hgs)): - for j in range(i + 1, len(Hgs)): - Hgij = Hgijs[id] - Hcij = Hcijs[id] - A.append(np.subtract(Hgij[0:3, 0:3], np.eye(3, 3))) - B.append(np.subtract(np.dot(Rcg, Hcij[0:3, 3:4]), Hgij[0:3, 3:4])) - id += 1 - - MA = np.asarray(A).reshape(size * 3, 3) - MB = np.asarray(B).reshape(size * 3, 1) - Tcg = np.dot(np.linalg.pinv(MA), MB).reshape(3, ) - - return tfs.affines.compose(Tcg, np.squeeze(Rcg), [1, 1, 1]) - - -class _Calibration: - def __init__(self, mode, input_format, hand_pose_path, camera_pose_path, save_path): - """ - matrix_name: str - mode: "eye_in_hand" or "eye_to_hand" - input_format: str, "eular" or "rotvertor" or "quat" - hand_pose_path: str - camera_pose_path: str - save_path: str - """ - self._Hgs, self._Hcs = [], [] - self._hand, self._camera = [], [] - self._calibration_matrix = None - self._done = False - - self._mode = mode - if self._mode not in ['eye_in_hand', 'eye_to_hand']: - raise ValueError("mode must be 'eye_in_hand' or 'eye_to_hand'") - - self._input = input_format - if self._input == 'eular': - self._function = _get_matrix_eular_radu - elif self._input == 'rotvertor': - self._function = _get_matrix_rotvector - elif self._input == 'quat': - self._function = _get_matrix_quat - else: - raise ValueError("INPUT ERROR") - - self._camera_pose_path = camera_pose_path - self._hand_pose_path = hand_pose_path - self._save_path = save_path - - self._get_pose() - - - - def _get_pose(self): - with open(f'{self._camera_pose_path}', 'r', encoding='utf-8') as f: - self._camera = json.load(f) - - with open(f'{self._hand_pose_path}', 'r', encoding='utf-8') as f: - self._hand = json.load(f) - - self._calculate_calibration() - - print(self._hand) - print(self._camera) - - logging.info(f"{self._calibration_matrix}") - hand_eye_result = { - "T": self._calibration_matrix.tolist() - } - with open(f"{self._save_path}", "w") as f: - json.dump(hand_eye_result, f, indent=4) - logging.info(f"Save as {self._save_path}") - - self._done = True - - def _calculate_data(self): - if self._input == 'quat': - for i in range(0, len(self._hand), 7): - self._Hgs.append( - np.linalg.inv( - self._function( - self._hand[i], self._hand[i + 1], self._hand[i + 2], - self._hand[i + 3], self._hand[i + 4], self._hand[i + 5], self._hand[i + 6] - ) - ) - if self._mode == 'eye_to_hand' else - self._function( - self._hand[i], self._hand[i + 1], self._hand[i + 2], - self._hand[i + 3], self._hand[i + 4], self._hand[i + 5], self._hand[i + 6] - ) - ) - self._Hcs.append( - self._function( - self._camera[i], self._camera[i + 1], self._camera[i + 2], - self._camera[i + 3], self._camera[i + 4], self._camera[i + 5], self._camera[i + 6] - ) - ) - - else: - for i in range(0, len(self._hand), 6): - self._Hgs.append( - np.linalg.inv( - self._function( - self._hand[i], self._hand[i + 1], self._hand[i + 2], - self._hand[i + 3], self._hand[i + 4], self._hand[i + 5] - ) - ) - if self._mode == 'eye_to_hand' else - self._function( - self._hand[i], self._hand[i + 1], self._hand[i + 2], - self._hand[i + 3], self._hand[i + 4], self._hand[i + 5] - ) - ) - self._Hcs.append( - self._function( - self._camera[i], self._camera[i + 1], self._camera[i + 2], - self._camera[i + 3], self._camera[i + 4], self._camera[i + 5] - ) - ) - - def _calculate_calibration(self): - self._calculate_data() - self._calibration_matrix = _calculate_(self._Hgs, self._Hcs) - - -def calculate_hand_eye_matrix( - mode, - input_format, - hand_pose_path, - camera_pose_path, - save_path -): - """ - matrix_name: str - mode: "eye_in_hand" or "eye_to_hand" - input_format: str, "eular" or "rotvertor" or "quat" - hand_pose_path: str - camera_pose_path: str - save_path: str - """ - a = _Calibration(mode, input_format, hand_pose_path, camera_pose_path, save_path) - - def pca(data: np.ndarray, sort=True): """主成分分析 """ center = np.mean(data, axis=0) diff --git a/vision_detect/vision_detect/VisionDetect/utils/calibration_tools.py b/vision_detect/vision_detect/VisionDetect/utils/calibration_tools.py new file mode 100644 index 0000000..150c5d2 --- /dev/null +++ b/vision_detect/vision_detect/VisionDetect/utils/calibration_tools.py @@ -0,0 +1,261 @@ +import math +import json +import logging + +import numpy as np +import transforms3d as tfs + + +def _get_matrix_quat(x, y, z, rw, rx, ry, rz): + """从单位四元数构建齐次变换矩阵""" + '''构造旋转矩阵''' + q = [rw, rx, ry, rz] + rmat = tfs.quaternions.quat2mat(q) + + """构造齐次变换矩阵""" + rmat = tfs.affines.compose( + np.squeeze(np.asarray((x, y, z))), + rmat, + [1, 1, 1] + ) + return rmat + + +def _get_matrix_eular_radu(x, y, z, rx, ry, rz): + """从欧拉角构建齐次变换矩阵""" + '''构造旋转矩阵''' + rmat = tfs.euler.euler2mat( + # math.radians(rx), math.radians(ry), math.radians(rz) + rx, ry, rz + ) + + """构造齐次变换矩阵""" + rmat = tfs.affines.compose( + np.squeeze(np.asarray((x, y, z))), + rmat, + [1, 1, 1] + ) + return rmat + + +def _get_matrix_rotvector(x, y, z, rx, ry, rz): + """从旋转向量构建齐次变换矩阵""" + '''构造旋转矩阵''' + rvec = np.array([rx, ry, rz]) + theta = np.linalg.norm(rvec) + if theta < 1e-8: + rmat = np.eye(3) # 小角度直接返回单位矩阵 + else: + axis = rvec / theta + rmat = tfs.axangles.axangle2mat(axis, theta) + + """构造齐次变换矩阵""" + rmat = tfs.affines.compose( + np.squeeze(np.asarray((x, y, z))), + rmat, + [1, 1, 1] + ) + return rmat + + +def _skew(v): + return np.array([[0, -v[2], v[1]], + [v[2], 0, -v[0]], + [-v[1], v[0], 0]]) + + +def _R2P(T): + """旋转矩阵 --> 修正罗德里格斯向量""" + axis, angle= tfs.axangles.mat2axangle(T[0:3, 0:3]) + P = 2 * math.sin(angle / 2) * axis + return P + + +def _P2R(P): + """修正罗德里格斯向量 --> 旋转矩阵""" + P2 = np.dot(P.T, P) + part_1 = (1 - P2 / 2) * np.eye(3) + part_2 = np.add(np.dot(P, P.T), np.sqrt(4- P2) * _skew(P.flatten().T)) + R = np.add(part_1, np.divide(part_2, 2)) + return R + + +def _calculate_(Hgs, Hcs): + """计算标定矩阵""" + # H代表矩阵、h代表标量 + Hgijs = [] + Hcijs = [] + A = [] + B = [] + size = 0 + for i in range(len(Hgs)): + for j in range(i + 1, len(Hgs)): + size += 1 + Hgij = np.dot(np.linalg.inv(Hgs[j]), Hgs[i]) + Hgijs.append(Hgij) + Pgij = np.dot(2, _R2P(Hgij)) + + Hcij = np.dot(Hcs[j], np.linalg.inv(Hcs[i])) + Hcijs.append(Hcij) + Pcij = np.dot(2, _R2P(Hcij)) + + A.append(_skew(np.add(Pgij, Pcij))) + B.append(np.subtract(Pcij, Pgij).reshape(3, 1)) + + MA = np.vstack(A) + MB = np.vstack(B) + Pcg_ = np.dot(np.linalg.pinv(MA), MB) + pcg = np.sqrt(np.add(1, np.dot(Pcg_.T, Pcg_))) + Pcg = np.dot(np.dot(2, Pcg_), np.linalg.inv(pcg)) + Rcg = _P2R(Pcg).reshape(3, 3) + + A = [] + B = [] + id = 0 + for i in range(len(Hgs)): + for j in range(i + 1, len(Hgs)): + Hgij = Hgijs[id] + Hcij = Hcijs[id] + A.append(np.subtract(Hgij[0:3, 0:3], np.eye(3, 3))) + B.append(np.subtract(np.dot(Rcg, Hcij[0:3, 3:4]), Hgij[0:3, 3:4])) + id += 1 + + MA = np.asarray(A).reshape(size * 3, 3) + MB = np.asarray(B).reshape(size * 3, 1) + Tcg = np.dot(np.linalg.pinv(MA), MB).reshape(3, ) + + return tfs.affines.compose(Tcg, np.squeeze(Rcg), [1, 1, 1]) + + +class _Calibration: + def __init__(self, mode, input_format, hand_pose_path, camera_pose_path, save_path): + """ + matrix_name: str + mode: "eye_in_hand" or "eye_to_hand" + input_format: str, "euler" or "rotvertor" or "quat" + hand_pose_path: str + camera_pose_path: str + save_path: str + """ + self._Hgs, self._Hcs = [], [] + self._hand, self._camera = [], [] + self._calibration_matrix = None + self._done = False + + self._mode = mode + if self._mode not in ['eye_in_hand', 'eye_to_hand']: + raise ValueError("mode must be 'eye_in_hand' or 'eye_to_hand'") + + self._input = input_format + if self._input == 'euler': + self._function = _get_matrix_eular_radu + elif self._input == 'rotvertor': + self._function = _get_matrix_rotvector + elif self._input == 'quat': + self._function = _get_matrix_quat + else: + raise ValueError("INPUT ERROR") + + self._camera_pose_path = camera_pose_path + self._hand_pose_path = hand_pose_path + self._save_path = save_path + + self._get_pose() + + def _get_pose(self): + with open(f'{self._camera_pose_path}', 'r', encoding='utf-8') as f: + self._camera = json.load(f) + + with open(f'{self._hand_pose_path}', 'r', encoding='utf-8') as f: + self._hand = json.load(f) + + self._calculate_calibration() + + print(self._hand) + print(self._camera) + + logging.info(f"{self._calibration_matrix}") + hand_eye_result = { + "T": self._calibration_matrix.tolist() + } + with open(f"{self._save_path}", "w") as f: + json.dump(hand_eye_result, f, indent=4) + logging.info(f"Save as {self._save_path}") + + self._done = True + + def _calculate_data(self): + if self._input == 'quat': + for i in range(0, len(self._hand), 7): + self._Hgs.append( + np.linalg.inv( + self._function( + self._hand[i], self._hand[i + 1], self._hand[i + 2], + self._hand[i + 3], self._hand[i + 4], self._hand[i + 5], self._hand[i + 6] + ) + ) + if self._mode == 'eye_to_hand' else + self._function( + self._hand[i], self._hand[i + 1], self._hand[i + 2], + self._hand[i + 3], self._hand[i + 4], self._hand[i + 5], self._hand[i + 6] + ) + ) + self._Hcs.append( + self._function( + self._camera[i], self._camera[i + 1], self._camera[i + 2], + self._camera[i + 3], self._camera[i + 4], self._camera[i + 5], self._camera[i + 6] + ) + ) + + else: + for i in range(0, len(self._hand), 6): + self._Hgs.append( + np.linalg.inv( + self._function( + self._hand[i], self._hand[i + 1], self._hand[i + 2], + self._hand[i + 3], self._hand[i + 4], self._hand[i + 5] + ) + ) + if self._mode == 'eye_to_hand' else + self._function( + self._hand[i], self._hand[i + 1], self._hand[i + 2], + self._hand[i + 3], self._hand[i + 4], self._hand[i + 5] + ) + ) + self._Hcs.append( + self._function( + self._camera[i], self._camera[i + 1], self._camera[i + 2], + self._camera[i + 3], self._camera[i + 4], self._camera[i + 5] + ) + ) + + def _calculate_calibration(self): + self._calculate_data() + self._calibration_matrix = _calculate_(self._Hgs, self._Hcs) + + +def calculate_hand_eye_matrix( + mode, + input_format, + hand_pose_path, + camera_pose_path, + save_path +): + """ + matrix_name: str + mode: "eye_in_hand" or "eye_to_hand" + input_format: str, "euler" or "rotvertor" or "quat" + hand_pose_path: str + camera_pose_path: str + save_path: str + """ + a = _Calibration(mode, input_format, hand_pose_path, camera_pose_path, save_path) + +if __name__ == "__main__": + calculate_hand_eye_matrix( + mode= "eye_in_hand", + input_format= "quat", + hand_pose_path="", + camera_pose_path="", + save_path="" + ) diff --git a/vision_detect/vision_detect/VisionDetect/utils/data_tools.py b/vision_detect/vision_detect/VisionDetect/utils/data_tools.py new file mode 100644 index 0000000..a0d85d4 --- /dev/null +++ b/vision_detect/vision_detect/VisionDetect/utils/data_tools.py @@ -0,0 +1,32 @@ +import json +import transforms3d as tfs + +def prepare_data(_data:list[float], save_path: str = "camera_pose_data.json", format: str = "quat", mm2m: bool = False): + new_list = [] + if format == "quat": + for i in range(0, len(_data), 7): + if mm2m: + new = [_data[i], _data[i + 1], _data[i + 2], _data[i + 3], _data[i + 4], _data[i + 5], _data[i + 6]] + else: + new = [_data[i]/1e3, _data[i + 1]/1e3, _data[i + 2]/1e3, _data[i + 6], _data[i + 3], _data[i + 4], _data[i + 5], _data[i + 6]] + new_list.extend(new) + elif format == "euler": + for i in range(0, len(_data), 6): + rw, rx, ry, rz = tfs.euler.euler2quat(_data[i + 3], _data[i + 4], _data[i + 5], axes='sxyz') + if mm2m: + new = [_data[i], _data[i + 1], _data[i + 2], rw, rx, ry, rz] + else: + new = [_data[i]/1e3, _data[i + 1]/1e3, _data[i + 2]/1e3, rw, rx, ry, rz] + new_list.extend(new) + + elif format == "rotvertor": + for i in range(0, len(_data), 6): + tfs.quaternions.rotvec2quat + if mm2m: + new = [_data[i], _data[i + 1], _data[i + 2], _data[i + 3], _data[i + 4], _data[i + 5]] + else: + new = [_data[i]/1e3, _data[i + 1]/1e3, _data[i + 2]/1e3, _data[i + 6], _data[i + 3], _data[i + 4], _data[i + 5]] + new_list.extend(new) + + else: pass + with open(save_path, 'w') as f: json.dump(new_list, f, indent=4)