Merge branch 'lyx_dev' into master
This commit is contained in:
@@ -28,7 +28,7 @@
|
||||
sudo apt install ros-humble-librealsense2*
|
||||
```
|
||||
|
||||
3. YOLOv11 Dependency:
|
||||
3. Dependency:
|
||||
|
||||
```
|
||||
"idge=3.2.1"
|
||||
@@ -46,6 +46,7 @@
|
||||
"psutil",
|
||||
"py-cpuinfo",
|
||||
"pandas>=1.1.4",
|
||||
"open3d",
|
||||
```
|
||||
|
||||
#### 使用说明
|
||||
|
||||
@@ -0,0 +1,392 @@
|
||||
import os
|
||||
from collections import defaultdict
|
||||
import time
|
||||
|
||||
import cv2
|
||||
import open3d as o3d
|
||||
import numpy as np
|
||||
import transforms3d as tfs
|
||||
from cv_bridge import CvBridge
|
||||
|
||||
import torch
|
||||
from detect_part.ultralytics import YOLO
|
||||
|
||||
import rclpy
|
||||
from rclpy.node import Node
|
||||
from message_filters import ApproximateTimeSynchronizer, Subscriber
|
||||
|
||||
from sensor_msgs.msg import Image, CameraInfo
|
||||
from geometry_msgs.msg import Pose, Point, Quaternion
|
||||
from interfaces.msg import PoseWithID, PoseWithIDArray
|
||||
|
||||
import detect_part
|
||||
|
||||
|
||||
def get_map(K, D, camera_size):
|
||||
h, w = camera_size[::-1]
|
||||
K = np.array(K).reshape(3, 3)
|
||||
D = np.array(D)
|
||||
new_K, _ = cv2.getOptimalNewCameraMatrix(K, D, (w, h), 1, (w, h))
|
||||
map1, map2 = cv2.initUndistortRectifyMap(K, D, None, new_K, (w, h), cv2.CV_32FC1)
|
||||
|
||||
return map1, map2, new_K.flatten()
|
||||
|
||||
|
||||
def pca(data, sort=True):
|
||||
center = np.mean(data, axis=0)
|
||||
centered_points = data - center # 去中心化
|
||||
|
||||
try:
|
||||
cov_matrix = np.cov(centered_points.T) # 转置
|
||||
eigenvalues, eigenvectors = np.linalg.eigh(cov_matrix)
|
||||
except np.linalg.LinAlgError:
|
||||
return None, None
|
||||
|
||||
if sort:
|
||||
sort = eigenvalues.argsort()[::-1] # 降序排列
|
||||
eigenvalues = eigenvalues[sort] # 索引
|
||||
eigenvectors = eigenvectors[:, sort]
|
||||
|
||||
return eigenvalues, eigenvectors
|
||||
|
||||
def calculate_pose_cpu(mask, depth_img: np.ndarray, intrinsics):
|
||||
"""计算位态"""
|
||||
depth_img_mask = np.zeros_like(depth_img)
|
||||
depth_img_mask[mask > 0] = depth_img[mask > 0]
|
||||
|
||||
depth_o3d = o3d.geometry.Image(depth_img_mask.astype(np.uint16))
|
||||
|
||||
point_cloud = o3d.geometry.PointCloud.create_from_depth_image(
|
||||
depth=depth_o3d,
|
||||
intrinsic=intrinsics,
|
||||
depth_scale=1000.0,
|
||||
depth_trunc=8.0,
|
||||
)
|
||||
|
||||
point_cloud = point_cloud.remove_non_finite_points()
|
||||
|
||||
down_pcd = point_cloud.voxel_down_sample(voxel_size=0.02)
|
||||
clean_pcd, _ = down_pcd.remove_statistical_outlier(nb_neighbors=20, std_ratio=2.0)
|
||||
|
||||
if len(clean_pcd.points) == 0:
|
||||
return 0.0, 0.0, 0.0, None, None, None
|
||||
center = clean_pcd.get_center()
|
||||
x, y, z = center
|
||||
|
||||
w, v = pca(np.asarray(clean_pcd.points))
|
||||
|
||||
if w is not None and v is not None:
|
||||
vx, vy, vz = v[:,0], v[:,1], v[:,2]
|
||||
|
||||
if vx[0] < 0:
|
||||
vx = -vx
|
||||
if vy[1] < 0:
|
||||
vy = -vy
|
||||
if not np.allclose(np.cross(vx, vy), vz):
|
||||
vz = -vz
|
||||
|
||||
R = np.column_stack((vx, vy, vz))
|
||||
rmat = tfs.affines.compose(np.squeeze(np.asarray((x, y, z))), R, [1, 1, 1])
|
||||
|
||||
roll, pitch, yaw = tfs.euler.mat2euler(rmat)
|
||||
|
||||
# point = [
|
||||
# [x, y, z], [x, y, z] + vx, [x, y, z] + vy, [x, y, z] + vz,
|
||||
# [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]
|
||||
# ] # 画点:原点、第一主成分、第二主成分
|
||||
# lines = [
|
||||
# [0, 1], [0, 2], [0, 3], [4, 5], [4, 6], [4, 7]
|
||||
# ] # 画出三点之间两两连线
|
||||
# colors = [
|
||||
# [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1]
|
||||
# ]
|
||||
#
|
||||
# # 构造open3d中的LineSet对象,用于主成分显示
|
||||
# line_set = o3d.geometry.LineSet(points=o3d.utility.Vector3dVector(point), lines=o3d.utility.Vector2iVector(lines))
|
||||
# line_set.colors = o3d.utility.Vector3dVector(colors)
|
||||
# o3d.visualization.draw_geometries([point_cloud, line_set])
|
||||
# # o3d.visualization.draw_geometries([clean_pcd, line_set])
|
||||
|
||||
return x, y, z, roll, pitch, yaw
|
||||
|
||||
else:
|
||||
return 0.0, 0.0, 0.0, None, None, None
|
||||
|
||||
|
||||
def draw_box(set_confidence, rgb_img, result):
|
||||
"""绘制目标检测边界框"""
|
||||
boxes = result.boxes.xywh.cpu().numpy()
|
||||
confidences = result.boxes.conf.cpu().numpy()
|
||||
class_ids = result.boxes.cls.cpu().numpy()
|
||||
labels = result.names
|
||||
|
||||
for i, box in enumerate(boxes):
|
||||
if confidences[i] >= set_confidence:
|
||||
x_center, y_center, width, height = box[:4]
|
||||
|
||||
p1 = [int((x_center - width / 2)), int((y_center - height / 2))]
|
||||
p2 = [int((x_center + width / 2)), int((y_center + height / 2))]
|
||||
cv2.rectangle(rgb_img, p1, p2, (255, 255, 0), 2)
|
||||
|
||||
cv2.putText(rgb_img, f'{labels[class_ids[i]]}: {confidences[i]*100:.2f}', (p1[0], p1[1] - 10),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
|
||||
|
||||
|
||||
def draw_mask(set_confidence, rgb_img, result):
|
||||
"""绘制实例分割mask"""
|
||||
masks = result.masks.data.cpu().numpy()
|
||||
orig_shape = result.masks.orig_shape
|
||||
confidences = result.boxes.conf.cpu().numpy()
|
||||
|
||||
for i, mask in enumerate(masks):
|
||||
if confidences[i] >= set_confidence:
|
||||
mask = cv2.resize(mask.astype(np.uint8), orig_shape[::-1], interpolation=cv2.INTER_NEAREST)
|
||||
rgb_img[mask > 0] = rgb_img[mask > 0] * 0.5 + np.array([0, 0, 255]) * 0.5
|
||||
else:
|
||||
continue
|
||||
|
||||
|
||||
def distortion_correction(color_image, depth_image, map1, map2):
|
||||
undistorted_color = cv2.remap(color_image, map1, map2, cv2.INTER_LINEAR)
|
||||
undistorted_color = undistorted_color.astype(color_image.dtype)
|
||||
|
||||
undistorted_depth = cv2.remap(depth_image, map1, map2, cv2.INTER_NEAREST)
|
||||
undistorted_depth = undistorted_depth.astype(depth_image.dtype)
|
||||
|
||||
return undistorted_color, undistorted_depth
|
||||
|
||||
|
||||
def crop_mask_bbox(depth_img, mask, box):
|
||||
"""
|
||||
输入:
|
||||
depth_img: H x W
|
||||
mask: H x W (0/1 或 bool)
|
||||
输出:
|
||||
depth_crop, mask_crop
|
||||
"""
|
||||
width, high = depth_img.shape
|
||||
x_center, y_center, w, h = box[:4]
|
||||
|
||||
x_min, x_max = int(round(x_center - w/2)), int(round(x_center + w/2))
|
||||
y_min, y_max = int(round(y_center - h/2)), int(round(y_center + h/2))
|
||||
|
||||
depth_crop = depth_img[max(0, y_min):min(y_max, high), max(0, x_min):min(x_max, width)]
|
||||
mask_crop = mask[max(0, y_min):min(y_max, high), max(0, x_min):min(x_max, width)]
|
||||
|
||||
return depth_crop, mask_crop, (max(0, x_min), max(0, y_min))
|
||||
|
||||
|
||||
class DetectNode(Node):
|
||||
def __init__(self, name, mode):
|
||||
super().__init__(name)
|
||||
self.checkpoint_path = None
|
||||
self.checkpoint_name = None
|
||||
self.function = None
|
||||
self.output_boxes = None
|
||||
self.output_masks = None
|
||||
self.K = self.D = None
|
||||
self.map1 = self.map2 = None
|
||||
self.intrinsics = None
|
||||
self.mode = mode
|
||||
self.device = None
|
||||
self.calculate_function = None
|
||||
self.fx = self.fy = 0.5
|
||||
|
||||
self.cv_bridge = CvBridge()
|
||||
|
||||
'''init'''
|
||||
self._init_param()
|
||||
|
||||
if mode == 'test':
|
||||
self.function = self._test_image
|
||||
else:
|
||||
self.function = None
|
||||
self.get_logger().error('Error: Mode Unknown')
|
||||
|
||||
if self.device == 'cpu':
|
||||
self.calculate_function = calculate_pose_cpu
|
||||
elif self.device == 'gpu':
|
||||
raise NotImplementedError('Function: calculate_pose_gpu not implemented')
|
||||
else:
|
||||
raise ValueError(f"device must be cpu or gpu, now {self.device}")
|
||||
|
||||
self._init_publisher()
|
||||
self._init_subscriber()
|
||||
|
||||
def _init_param(self):
|
||||
"""init parameter"""
|
||||
pkg_dir = os.path.dirname(detect_part.__file__)
|
||||
|
||||
self.declare_parameter('checkpoint_name', 'yolo11s-seg.pt')
|
||||
self.checkpoint_name = self.get_parameter('checkpoint_name').value
|
||||
self.checkpoint_path = os.path.join(pkg_dir, 'checkpoint', self.checkpoint_name)
|
||||
|
||||
self.declare_parameter('output_boxes', True)
|
||||
self.output_boxes = self.get_parameter('output_boxes').value
|
||||
|
||||
self.declare_parameter('output_masks', False)
|
||||
self.output_masks = self.get_parameter('output_masks').value
|
||||
|
||||
self.declare_parameter('set_confidence', 0.6)
|
||||
self.set_confidence = self.get_parameter('set_confidence').value
|
||||
|
||||
self.declare_parameter('color_image_topic', '/camera/color/image_raw')
|
||||
self.color_image_topic = self.get_parameter('color_image_topic').value
|
||||
|
||||
self.declare_parameter('depth_image_topic', '/camera/depth/image_raw')
|
||||
self.depth_image_topic = self.get_parameter('depth_image_topic').value
|
||||
|
||||
self.declare_parameter('camera_info_topic', '/camera/color/camera_info')
|
||||
self.camera_info_topic = self.get_parameter('camera_info_topic').value
|
||||
|
||||
self.declare_parameter('device', 'cpu')
|
||||
self.device = self.get_parameter('device').value
|
||||
|
||||
self.declare_parameter('width', 1280)
|
||||
self.declare_parameter('high', 720)
|
||||
self.expect_size = [self.get_parameter('width').value, self.get_parameter('high').value]
|
||||
|
||||
def _init_publisher(self):
|
||||
"""init_publisher"""
|
||||
self.pub_pose_list = self.create_publisher(PoseWithIDArray, '/pose/cv_detect_pose', 10)
|
||||
|
||||
if self.output_boxes or self.output_masks:
|
||||
self.pub_detect_image = self.create_publisher(Image, '/image/detect_image', 10)
|
||||
|
||||
def _init_subscriber(self):
|
||||
"""init_subscriber"""
|
||||
self.sub_camera_info = self.create_subscription(
|
||||
CameraInfo,
|
||||
self.camera_info_topic,
|
||||
self._camera_info_callback,
|
||||
10
|
||||
)
|
||||
|
||||
'''sync get color and depth img'''
|
||||
self.sub_color_image = Subscriber(self, Image, self.color_image_topic)
|
||||
self.sub_depth_image = Subscriber(self, Image, self.depth_image_topic)
|
||||
|
||||
self.sync_subscriber = ApproximateTimeSynchronizer(
|
||||
[self.sub_color_image, self.sub_depth_image],
|
||||
queue_size=10,
|
||||
slop=0.1
|
||||
)
|
||||
self.sync_subscriber.registerCallback(self._sync_callback)
|
||||
|
||||
def _camera_info_callback(self, msg: CameraInfo):
|
||||
"""Get camera info"""
|
||||
self.K = msg.k
|
||||
self.D = msg.d
|
||||
|
||||
self.camera_size = [msg.width, msg.height]
|
||||
|
||||
if self.K is not None and self.D is not None:
|
||||
self.map1, self.map2, self.K = get_map(msg.k, msg.d, self.camera_size)
|
||||
self.scale = [self.expect_size[0] / self.camera_size[0], self.expect_size[1] / self.camera_size[1]]
|
||||
self.intrinsics = o3d.camera.PinholeCameraIntrinsic(
|
||||
self.expect_size[0],
|
||||
self.expect_size[1],
|
||||
self.K[0] * self.scale[0],
|
||||
self.K[4] * self.scale[1],
|
||||
self.K[2] * self.scale[0],
|
||||
self.K[5] * self.scale[1]
|
||||
)
|
||||
|
||||
if len(self.D) != 0:
|
||||
self.destroy_subscription(self.sub_camera_info)
|
||||
else:
|
||||
self.D = [0, 0, 0, 0, 0, 0, 0, 0]
|
||||
self.destroy_subscription(self.sub_camera_info)
|
||||
else:
|
||||
raise "K and D are not defined"
|
||||
|
||||
|
||||
def _sync_callback(self, color_img_ros, depth_img_ros):
|
||||
"""同步回调函数"""
|
||||
color_img_cv = self.cv_bridge.imgmsg_to_cv2(color_img_ros, "bgr8")
|
||||
depth_img_cv = self.cv_bridge.imgmsg_to_cv2(depth_img_ros, '16UC1')
|
||||
if self.camera_size != self.expect_size:
|
||||
color_img_cv = cv2.resize(color_img_cv, self.expect_size)
|
||||
depth_img_cv = cv2.resize(depth_img_cv, self.expect_size)
|
||||
|
||||
color_img_cv, depth_img_cv = distortion_correction(color_img_cv, depth_img_cv, self.map1, self.map2)
|
||||
|
||||
img, pose_dict = self.function(color_img_cv, depth_img_cv)
|
||||
|
||||
"""masks为空,结束这一帧"""
|
||||
if img is None:
|
||||
img = self.cv_bridge.cv2_to_imgmsg(color_img_cv, "bgr8")
|
||||
|
||||
if self.output_boxes or self.output_masks:
|
||||
self.pub_detect_image.publish(img)
|
||||
|
||||
if pose_dict is not None:
|
||||
pose_list_all = PoseWithIDArray()
|
||||
for (class_id, class_name), pose_list in pose_dict.items():
|
||||
pose_list_all.objects.append(
|
||||
PoseWithID(
|
||||
class_name = class_name,
|
||||
class_id = class_id,
|
||||
pose_list = pose_list
|
||||
)
|
||||
)
|
||||
pose_list_all.header.stamp = self.get_clock().now().to_msg()
|
||||
pose_list_all.header.frame_id = "pose_list"
|
||||
self.pub_pose_list.publish(pose_list_all)
|
||||
|
||||
def _test_image(self, rgb_img, depth_img):
|
||||
pose_dict = defaultdict(list)
|
||||
rgb_img_gray = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
|
||||
pattern_size = (11, 8)
|
||||
# pattern_size = (15, 7)
|
||||
ret, corners = cv2.findChessboardCorners(rgb_img_gray, pattern_size)
|
||||
if ret:
|
||||
# 角点亚像素精确化(提高标定精度)
|
||||
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
|
||||
corners_subpix = cv2.cornerSubPix(rgb_img_gray, corners, (11, 11), (-1, -1), criteria)
|
||||
|
||||
corners_subpix = corners_subpix.reshape(pattern_size[1], pattern_size[0], 2)
|
||||
mask = np.zeros(rgb_img_gray.shape, dtype=np.uint8)
|
||||
|
||||
for i in range(0, pattern_size[1] - 1):
|
||||
for j in range(0, pattern_size[0] - 1):
|
||||
pts = np.array([
|
||||
corners_subpix[i, j],
|
||||
corners_subpix[i, j + 1],
|
||||
corners_subpix[i + 1, j + 1],
|
||||
corners_subpix[i + 1, j]
|
||||
], dtype=np.int32)
|
||||
cv2.fillConvexPoly(mask, pts, 1)
|
||||
|
||||
rgb_img[mask > 0] = rgb_img[mask > 0] * 0.5 + np.array([0, 0, 255]) * 0.5
|
||||
orig_shape = rgb_img_gray.shape
|
||||
mask = cv2.resize(mask.astype(np.uint8), orig_shape[::-1], interpolation=cv2.INTER_NEAREST)
|
||||
x, y, z, roll, pitch, yaw = self.calculate_function(mask, depth_img, self.intrinsics)
|
||||
|
||||
if (x, y, z) != (0.0, 0.0, 0.0):
|
||||
pose = Pose()
|
||||
pose.position = Point(x=x, y=y, z=z)
|
||||
pose.orientation = Quaternion(x=roll, y=pitch, z=yaw)
|
||||
pose_dict[int(99), 'crossboard'].append(pose)
|
||||
|
||||
cv2.putText(rgb_img, f'cs: x: {x:.3f}, y: {y:.3f}, z: {z:.3f}', (0, 0 + 30),cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
|
||||
cv2.putText(rgb_img, f'rpy: r: {roll:.3f}, p: {pitch:.3f}, y: {yaw:.3f}', (0, 0 + 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
|
||||
return self.cv_bridge.cv2_to_imgmsg(rgb_img, "bgr8"), pose_dict
|
||||
else:
|
||||
return self.cv_bridge.cv2_to_imgmsg(rgb_img, "bgr8"), None
|
||||
|
||||
|
||||
def main(args=None):
|
||||
rclpy.init(args=args)
|
||||
node = DetectNode('detect', 'test')
|
||||
try:
|
||||
rclpy.spin(node)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
finally:
|
||||
node.destroy_node()
|
||||
rclpy.shutdown()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,9 +1,12 @@
|
||||
import os
|
||||
import math
|
||||
import time
|
||||
import threading
|
||||
from collections import defaultdict
|
||||
|
||||
import cv2
|
||||
import open3d as o3d
|
||||
import numpy as np
|
||||
import transforms3d as tfs
|
||||
from cv_bridge import CvBridge
|
||||
|
||||
import torch
|
||||
@@ -11,144 +14,109 @@ from detect_part.ultralytics import YOLO
|
||||
|
||||
import rclpy
|
||||
from rclpy.node import Node
|
||||
from message_filters import ApproximateTimeSynchronizer, Subscriber
|
||||
|
||||
from sensor_msgs.msg import Image, CameraInfo
|
||||
from sensor_msgs.msg import Image
|
||||
from geometry_msgs.msg import Pose, Point, Quaternion
|
||||
from interfaces.msg import PoseWithID, PoseWithIDArray
|
||||
from interfaces.msg import PoseWithID
|
||||
from img_dev.msg import ImgMsg
|
||||
from interfaces.srv import PoseArray
|
||||
|
||||
import detect_part
|
||||
|
||||
|
||||
def iqr(depths, threshold: float = 1.5):
|
||||
"""iqr去除异常点"""
|
||||
if len(depths) < 7:
|
||||
return depths
|
||||
q1 = np.percentile(depths, 25)
|
||||
q3 = np.percentile(depths, 75)
|
||||
iqr_ = q3 - q1
|
||||
lower_bound = q1 - iqr_ * threshold
|
||||
upper_bound = q3 + iqr_ * threshold
|
||||
iqr_depths = depths[(depths >= lower_bound) & (depths <= upper_bound)]
|
||||
return iqr_depths
|
||||
def get_map(K, D, camera_size):
|
||||
h, w = camera_size[::-1]
|
||||
K = np.array(K).reshape(3, 3)
|
||||
D = np.array(D)
|
||||
new_K, _ = cv2.getOptimalNewCameraMatrix(K, D, (w, h), 1, (w, h))
|
||||
map1, map2 = cv2.initUndistortRectifyMap(K, D, None, new_K, (w, h), cv2.CV_32FC1)
|
||||
|
||||
return map1, map2, new_K.flatten()
|
||||
|
||||
|
||||
def calculate_coordinate(mask, depth_img: np.ndarray, camera_info, u, v):
|
||||
"""计算坐标"""
|
||||
u = int(round(u))
|
||||
v = int(round(v))
|
||||
def pca(data, sort=True):
|
||||
"""主成分分析 """
|
||||
center = np.mean(data, axis=0)
|
||||
centered_points = data - center # 去中心化
|
||||
|
||||
_cx, _cy, _fx, _fy = camera_info[2], camera_info[5], camera_info[0], camera_info[4]
|
||||
try:
|
||||
cov_matrix = np.cov(centered_points.T) # 转置
|
||||
eigenvalues, eigenvectors = np.linalg.eigh(cov_matrix)
|
||||
|
||||
if not (0 <= v < depth_img.shape[0] and 0 <= u < depth_img.shape[1]):
|
||||
print(f'Calculate coordinate error: u={u}, v={v}')
|
||||
return 0.0, 0.0, 0.0
|
||||
except np.linalg.LinAlgError:
|
||||
return None, None
|
||||
|
||||
window_size = 15
|
||||
half = window_size // 2
|
||||
if sort:
|
||||
sort = eigenvalues.argsort()[::-1] # 降序排列
|
||||
eigenvalues = eigenvalues[sort] # 索引
|
||||
eigenvectors = eigenvectors[:, sort]
|
||||
|
||||
"""获取mask质心patch"""
|
||||
patch = depth_img[max(0, v - half):v + half + 1, max(0, u - half):u + half + 1:2].flatten()
|
||||
valid_depths = patch[patch > 0]
|
||||
return eigenvalues, eigenvectors
|
||||
|
||||
if len(valid_depths) == 0:
|
||||
print(f'No valid depth in window at ({u}, {v})')
|
||||
return 0.0, 0.0, 0.0
|
||||
|
||||
"""过滤异常值,取中位数计算xy"""
|
||||
valid_depths = iqr(valid_depths)
|
||||
depth = np.median(valid_depths) / 1e3
|
||||
|
||||
x = -(u - _cx) * depth / _fx
|
||||
y = -(v - _cy) * depth / _fy
|
||||
|
||||
"""取mask中深度的中位数作为z"""
|
||||
depth_masked = depth_img[(mask > 0) & (depth_img > 0)]
|
||||
median_depth = np.median(depth_masked)
|
||||
|
||||
z = median_depth / 1e3
|
||||
|
||||
return x, y, z
|
||||
|
||||
|
||||
def calculate_pose(mask, orig_shape, depth_img: np.ndarray, camera_info):
|
||||
def calculate_pose_cpu(mask, depth_img: np.ndarray, intrinsics):
|
||||
"""计算位态"""
|
||||
rect = cv2.minAreaRect(max(cv2.findContours(mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0], key=cv2.contourArea))
|
||||
box = cv2.boxPoints(rect)
|
||||
depth_img_mask = np.zeros_like(depth_img)
|
||||
depth_img_mask[mask > 0] = depth_img[mask > 0]
|
||||
|
||||
len1 = np.linalg.norm(box[1] - box[0])
|
||||
len2 = np.linalg.norm(box[1] - box[2])
|
||||
depth_o3d = o3d.geometry.Image(depth_img_mask.astype(np.uint16))
|
||||
|
||||
point_cloud = o3d.geometry.PointCloud.create_from_depth_image(
|
||||
depth=depth_o3d,
|
||||
intrinsic=intrinsics,
|
||||
depth_scale=1000.0,
|
||||
depth_trunc=8.0,
|
||||
)
|
||||
|
||||
point_cloud = point_cloud.remove_non_finite_points()
|
||||
|
||||
down_pcd = point_cloud.voxel_down_sample(voxel_size=0.02)
|
||||
clean_pcd, _ = down_pcd.remove_statistical_outlier(nb_neighbors=20, std_ratio=2.0)
|
||||
|
||||
if len(clean_pcd.points) == 0:
|
||||
return 0.0, 0.0, 0.0, None, None, None
|
||||
center = clean_pcd.get_center()
|
||||
x, y, z = center
|
||||
|
||||
w, v = pca(np.asarray(clean_pcd.points))
|
||||
|
||||
if w is not None and v is not None:
|
||||
vx, vy, vz = v[:,0], v[:,1], v[:,2]
|
||||
|
||||
if vx[0] < 0:
|
||||
vx = -vx
|
||||
if vy[1] < 0:
|
||||
vy = -vy
|
||||
if not np.allclose(np.cross(vx, vy), vz):
|
||||
vz = -vz
|
||||
|
||||
R = np.column_stack((vx, vy, vz))
|
||||
rmat = tfs.affines.compose(np.squeeze(np.asarray((x, y, z))), R, [1, 1, 1])
|
||||
|
||||
roll, pitch, yaw = tfs.euler.mat2euler(rmat)
|
||||
|
||||
return x, y, z, roll, pitch, yaw
|
||||
|
||||
M = cv2.moments(mask.astype(np.uint8))
|
||||
if M["m00"] != 0:
|
||||
center_x = int(M["m10"] / M["m00"])
|
||||
center_y = int(M["m01"] / M["m00"])
|
||||
else:
|
||||
raise ValueError("Mask Is Empty")
|
||||
|
||||
mask = cv2.resize(mask.astype(np.uint8), orig_shape[::-1], interpolation=cv2.INTER_LINEAR)
|
||||
x, y, z = calculate_coordinate(mask, depth_img, camera_info, center_x, center_y)
|
||||
if len1 >= len2:
|
||||
vec = box[1] - box[0]
|
||||
else:
|
||||
vec = box[1] - box[2]
|
||||
|
||||
if vec[0] < 0:
|
||||
vec = -vec
|
||||
|
||||
angle = math.degrees(math.atan2(vec[1], vec[0]))
|
||||
if angle <= -90.0:
|
||||
angle += 180
|
||||
|
||||
yaw = angle/180.0 * math.pi
|
||||
pitch = 0.0
|
||||
roll = 0.0
|
||||
|
||||
return x, y, z, roll, pitch, yaw
|
||||
return 0.0, 0.0, 0.0, None, None, None
|
||||
|
||||
|
||||
def draw_box(set_confidence, rgb_img, result, post_list):
|
||||
def draw_box(set_confidence, rgb_img, result):
|
||||
"""绘制目标检测边界框"""
|
||||
boxes = result.boxes.xywh.cpu().numpy()
|
||||
confidences = result.boxes.conf.cpu().numpy()
|
||||
class_ids = result.boxes.cls.cpu().numpy()
|
||||
labels = result.names
|
||||
|
||||
if len(post_list):
|
||||
for i, box in enumerate(boxes):
|
||||
j = 0
|
||||
if confidences[i] < set_confidence:
|
||||
continue
|
||||
else:
|
||||
x_center, y_center, width, height = box[:4]
|
||||
for i, box in enumerate(boxes):
|
||||
if confidences[i] >= set_confidence:
|
||||
x_center, y_center, width, height = box[:4]
|
||||
|
||||
p1 = [int((x_center - width / 2)), int((y_center - height / 2))]
|
||||
p2 = [int((x_center + width / 2)), int((y_center + height / 2))]
|
||||
cv2.rectangle(rgb_img, p1, p2, (255, 255, 0), 2)
|
||||
p1 = [int((x_center - width / 2)), int((y_center - height / 2))]
|
||||
p2 = [int((x_center + width / 2)), int((y_center + height / 2))]
|
||||
cv2.rectangle(rgb_img, p1, p2, (255, 255, 0), 2)
|
||||
|
||||
cv2.putText(rgb_img, f'{labels[class_ids[i]]}: {confidences[i]*100:.2f}', (p1[0], p1[1] - 10),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
|
||||
|
||||
cv2.putText(rgb_img, f'cs: {post_list[j][0]:.3f} {post_list[j][1]:.3f} {post_list[j][2]:.3f}', (p1[0], p1[1] - 35)
|
||||
,cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
|
||||
cv2.putText(rgb_img, f'rpy: {post_list[j][3]:.3f} {post_list[j][4]:.3f} {post_list[j][5]:.3f}', (p1[0], p1[1] - 60)
|
||||
,cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
|
||||
j += 1
|
||||
else:
|
||||
for i, box in enumerate(boxes):
|
||||
j = 0
|
||||
if confidences[i] < set_confidence:
|
||||
continue
|
||||
else:
|
||||
x_center, y_center, width, height = box[:4]
|
||||
|
||||
p1 = [int((x_center - width / 2)), int((y_center - height / 2))]
|
||||
p2 = [int((x_center + width / 2)), int((y_center + height / 2))]
|
||||
cv2.rectangle(rgb_img, p1, p2, (255, 255, 0), 2)
|
||||
|
||||
cv2.putText(rgb_img, f'{labels[class_ids[i]]}: {confidences[i] * 100:.2f}', (p1[0], p1[1] - 10),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
|
||||
j += 1
|
||||
cv2.putText(rgb_img, f'{labels[class_ids[i]]}: {confidences[i]*100:.2f}', (p1[0], p1[1] - 10),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
|
||||
|
||||
|
||||
def draw_mask(set_confidence, rgb_img, result):
|
||||
@@ -158,14 +126,15 @@ def draw_mask(set_confidence, rgb_img, result):
|
||||
confidences = result.boxes.conf.cpu().numpy()
|
||||
|
||||
for i, mask in enumerate(masks):
|
||||
if confidences[i] < set_confidence:
|
||||
continue
|
||||
else:
|
||||
mask = cv2.resize(mask.astype(np.uint8), orig_shape[::-1], interpolation=cv2.INTER_LINEAR)
|
||||
if confidences[i] >= set_confidence:
|
||||
mask = cv2.resize(mask.astype(np.uint8), orig_shape[::-1], interpolation=cv2.INTER_NEAREST)
|
||||
rgb_img[mask > 0] = rgb_img[mask > 0] * 0.5 + np.array([0, 0, 255]) * 0.5
|
||||
else:
|
||||
continue
|
||||
|
||||
|
||||
def distortion_correction(color_image, depth_image, map1, map2):
|
||||
"""畸变矫正"""
|
||||
undistorted_color = cv2.remap(color_image, map1, map2, cv2.INTER_LINEAR)
|
||||
undistorted_color = undistorted_color.astype(color_image.dtype)
|
||||
|
||||
@@ -175,36 +144,65 @@ def distortion_correction(color_image, depth_image, map1, map2):
|
||||
return undistorted_color, undistorted_depth
|
||||
|
||||
|
||||
def crop_mask_bbox(depth_img, mask, box):
|
||||
"""
|
||||
输入:
|
||||
depth_img: H x W
|
||||
mask: H x W (0/1 或 bool)
|
||||
输出:
|
||||
depth_crop, mask_crop
|
||||
"""
|
||||
width, high = depth_img.shape
|
||||
x_center, y_center, w, h = box[:4]
|
||||
|
||||
x_min, x_max = int(round(x_center - w/2)), int(round(x_center + w/2))
|
||||
y_min, y_max = int(round(y_center - h/2)), int(round(y_center + h/2))
|
||||
|
||||
depth_crop = depth_img[max(0, y_min):min(y_max, high), max(0, x_min):min(x_max, width)]
|
||||
mask_crop = mask[max(0, y_min):min(y_max, high), max(0, x_min):min(x_max, width)]
|
||||
|
||||
return depth_crop, mask_crop, (max(0, x_min), max(0, y_min))
|
||||
|
||||
|
||||
class DetectNode(Node):
|
||||
def __init__(self, name, mode):
|
||||
super().__init__(name)
|
||||
self.mode = mode
|
||||
self.device = None
|
||||
self.checkpoint_path = None
|
||||
self.checkpoint_name = None
|
||||
self.function = None
|
||||
self.output_boxes = None
|
||||
self.output_masks = None
|
||||
self.K = self.D = self.R = self.P = None
|
||||
self.map1 = self.map2 = None
|
||||
self.mode = mode
|
||||
self.function = None
|
||||
self.calculate_function = None
|
||||
|
||||
self.K = None
|
||||
|
||||
self.fx = self.fy = 0.5
|
||||
self.camera_data = {}
|
||||
|
||||
self.camera_size = []
|
||||
self.cv_bridge = CvBridge()
|
||||
|
||||
self.lock = threading.Lock()
|
||||
|
||||
'''init'''
|
||||
self._init_param()
|
||||
|
||||
if mode == 'detect':
|
||||
self._init_model()
|
||||
elif mode == 'test':
|
||||
self.function = self._test_image
|
||||
else:
|
||||
self.function = None
|
||||
self.get_logger().error('Error: Mode Unknown')
|
||||
|
||||
if self.device == 'cpu':
|
||||
self.calculate_function = calculate_pose_cpu
|
||||
elif self.device == 'gpu':
|
||||
raise NotImplementedError('Function: calculate_pose_gpu not implemented')
|
||||
else:
|
||||
raise ValueError(f"device must be cpu or gpu, now {self.device}")
|
||||
|
||||
self._init_publisher()
|
||||
self._init_subscriber()
|
||||
|
||||
self._init_service()
|
||||
|
||||
def _init_param(self):
|
||||
"""init parameter"""
|
||||
@@ -217,26 +215,20 @@ class DetectNode(Node):
|
||||
self.declare_parameter('output_boxes', True)
|
||||
self.output_boxes = self.get_parameter('output_boxes').value
|
||||
|
||||
self.declare_parameter('output_masks', True)
|
||||
self.declare_parameter('output_masks', False)
|
||||
self.output_masks = self.get_parameter('output_masks').value
|
||||
|
||||
self.declare_parameter('set_confidence', 0.5)
|
||||
self.set_confidence = self.get_parameter('set_confidence').value
|
||||
|
||||
self.declare_parameter('color_image_topic', '/camera/color/image_raw')
|
||||
self.color_image_topic = self.get_parameter('color_image_topic').value
|
||||
|
||||
self.declare_parameter('depth_image_topic', '/camera/depth/image_raw')
|
||||
self.depth_image_topic = self.get_parameter('depth_image_topic').value
|
||||
|
||||
self.declare_parameter('camera_info_topic', '/camera/camera_info')
|
||||
self.camera_info_topic = self.get_parameter('camera_info_topic').value
|
||||
self.declare_parameter('device', 'cpu')
|
||||
self.device = self.get_parameter('device').value
|
||||
|
||||
def _init_model(self):
|
||||
"""init model"""
|
||||
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
device_model = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
try:
|
||||
self.model = YOLO(self.checkpoint_path).to(self.device)
|
||||
self.model = YOLO(self.checkpoint_path).to(device_model)
|
||||
except Exception as e:
|
||||
self.get_logger().error(f'Failed to load YOLO model: {e}')
|
||||
raise
|
||||
@@ -249,102 +241,93 @@ class DetectNode(Node):
|
||||
self.get_logger().error(f'Unknown checkpoint: {self.checkpoint_name}')
|
||||
|
||||
def _init_publisher(self):
|
||||
"""init_publisher"""
|
||||
self.pub_pose_list = self.create_publisher(PoseWithIDArray, '/pose/cv_detect_pose', 10)
|
||||
|
||||
"""init publisher"""
|
||||
if self.output_boxes or self.output_masks:
|
||||
self.pub_detect_image = self.create_publisher(Image, '/image/detect_image', 10)
|
||||
|
||||
def _init_service(self):
|
||||
"""init service server"""
|
||||
self.server = self.create_service(
|
||||
PoseArray,
|
||||
"/get_object_pose_list",
|
||||
self._service_callback
|
||||
)
|
||||
|
||||
def _init_subscriber(self):
|
||||
"""init_subscriber"""
|
||||
self.sub_camera_info = self.create_subscription(
|
||||
CameraInfo,
|
||||
self.camera_info_topic,
|
||||
self._camera_info_callback,
|
||||
"""init subscriber"""
|
||||
self.sub_img = self.create_subscription(
|
||||
ImgMsg,
|
||||
"/img_msg",
|
||||
self._sub_callback,
|
||||
10
|
||||
)
|
||||
|
||||
'''sync get color and depth img'''
|
||||
self.sub_color_image = Subscriber(self, Image, self.color_image_topic)
|
||||
self.sub_depth_image = Subscriber(self, Image, self.depth_image_topic)
|
||||
|
||||
self.sync_subscriber = ApproximateTimeSynchronizer(
|
||||
[self.sub_color_image, self.sub_depth_image],
|
||||
queue_size=10,
|
||||
slop=0.1
|
||||
)
|
||||
self.sync_subscriber.registerCallback(self._sync_callback)
|
||||
|
||||
def _camera_info_callback(self, msg: CameraInfo):
|
||||
"""Get camera info"""
|
||||
self.K = msg.k
|
||||
self.D = msg.d
|
||||
self.R = msg.r
|
||||
self.P = msg.p
|
||||
|
||||
self.camera_size.append(msg.width)
|
||||
self.camera_size.append(msg.height)
|
||||
|
||||
self.destroy_subscription(self.sub_camera_info)
|
||||
|
||||
self.map1, self.map2, self.K = self._get_map(msg.k)
|
||||
# self.get_logger().info(f'K: {self.K}')
|
||||
# self.get_logger().info(f'D: {self.D}')
|
||||
|
||||
def _get_map(self, K):
|
||||
h, w = self.camera_size[::-1]
|
||||
K = np.array(K).reshape(3, 3)
|
||||
D = np.array(self.D)
|
||||
new_K, _ = cv2.getOptimalNewCameraMatrix(K, D, (w, h), 1, (w, h))
|
||||
map1, map2 = cv2.initUndistortRectifyMap(K, D, None, new_K, (w, h), cv2.CV_32FC1)
|
||||
|
||||
return map1, map2, new_K.flatten()
|
||||
|
||||
def _sync_callback(self, color_img_ros, depth_img_ros):
|
||||
def _sub_callback(self, msg):
|
||||
"""同步回调函数"""
|
||||
if self.K is None:
|
||||
self.get_logger().warn('Camera intrinsics not yet received. Waiting...')
|
||||
return
|
||||
with self.lock:
|
||||
self.camera_data[msg.position] = [
|
||||
msg.image_color,
|
||||
msg.image_depth,
|
||||
msg.karr,
|
||||
msg.darr
|
||||
]
|
||||
|
||||
def _service_callback(self, request, response):
|
||||
with self.lock:
|
||||
if request.camera_position in self.camera_data:
|
||||
color_img_ros, depth_img_ros, self.K, D = self.camera_data[request.camera_position]
|
||||
else:
|
||||
response.success = False
|
||||
response.info = f"{request.camera_position} Camera data is empty or name is wrong"
|
||||
response.objects = []
|
||||
|
||||
return response
|
||||
|
||||
self.camera_size = [color_img_ros.width, color_img_ros.height]
|
||||
|
||||
color_img_cv = self.cv_bridge.imgmsg_to_cv2(color_img_ros, "bgr8")
|
||||
depth_img_cv = self.cv_bridge.imgmsg_to_cv2(depth_img_ros, '16UC1')
|
||||
|
||||
color_img_cv_new, depth_img_cv_new = distortion_correction(color_img_cv, depth_img_cv, self.map1, self.map2)
|
||||
map1, map2, self.K = get_map(self.K, D, self.camera_size)
|
||||
color_img_cv, depth_img_cv = distortion_correction(color_img_cv, depth_img_cv, map1, map2)
|
||||
|
||||
# diff = cv2.absdiff(color_img_cv, color_img_cv_new)
|
||||
# cv2.imshow('diff', diff)
|
||||
# cv2.waitKey(0)
|
||||
|
||||
img, pose_dict = self.function(color_img_cv_new, depth_img_cv_new)
|
||||
img, pose_dict = self.function(color_img_cv, depth_img_cv)
|
||||
|
||||
"""masks为空,结束这一帧"""
|
||||
if img is None and pose_dict is None:
|
||||
img = self.cv_bridge.cv2_to_imgmsg(color_img_cv_new, "bgr8")
|
||||
|
||||
if img is not None and (self.output_boxes or self.output_masks):
|
||||
if self.output_boxes or self.output_masks:
|
||||
if img is None:
|
||||
img = color_img_ros
|
||||
self.pub_detect_image.publish(img)
|
||||
|
||||
if pose_dict is not None:
|
||||
pose_list_all = PoseWithIDArray()
|
||||
response.info = "Success get pose"
|
||||
response.success = True
|
||||
for (class_id, class_name), pose_list in pose_dict.items():
|
||||
pose_list_all.objects.append(
|
||||
response.objects.append(
|
||||
PoseWithID(
|
||||
class_name = class_name,
|
||||
class_id = class_id,
|
||||
pose_list = pose_list
|
||||
)
|
||||
)
|
||||
self.pub_pose_list.publish(pose_list_all)
|
||||
|
||||
return response
|
||||
|
||||
else:
|
||||
response.info = "pose dict is empty"
|
||||
response.success = False
|
||||
response.objects = []
|
||||
|
||||
return response
|
||||
|
||||
def _seg_image(self, rgb_img: np.ndarray, depth_img: np.ndarray):
|
||||
"""Use segmentation model"""
|
||||
pose_dict = defaultdict(list)
|
||||
|
||||
'''Define border'''
|
||||
border_x, border_y = rgb_img.shape[:2][0] * 0.10, rgb_img.shape[:2][1] * 0.08
|
||||
|
||||
'''Get Predict Results'''
|
||||
time1 = time.time()
|
||||
results = self.model(rgb_img)
|
||||
time2 = time.time()
|
||||
result = results[0]
|
||||
|
||||
'''Get masks'''
|
||||
@@ -354,40 +337,68 @@ class DetectNode(Node):
|
||||
orig_shape = result.masks.orig_shape
|
||||
|
||||
'''Get boxes'''
|
||||
boxes = result.boxes.xywh.cpu().numpy()
|
||||
boxes = result.boxes.data.cpu().numpy()
|
||||
confidences = result.boxes.conf.cpu().numpy()
|
||||
class_ids = result.boxes.cls.cpu().numpy()
|
||||
labels = result.names
|
||||
|
||||
post_list = []
|
||||
time3 = time.time()
|
||||
|
||||
for i, (mask, box) in enumerate(zip(masks, boxes)):
|
||||
'''Confidence Filter'''
|
||||
if confidences[i] < self.set_confidence:
|
||||
continue
|
||||
else:
|
||||
x_center, y_center= box[:2]
|
||||
|
||||
'''Border Filter'''
|
||||
if not (border_x <= x_center <= rgb_img.shape[:2][1] - border_x
|
||||
and border_y <= y_center <= rgb_img.shape[:2][0] - border_y):
|
||||
if confidences[i] >= self.set_confidence:
|
||||
mask = cv2.resize(mask.astype(np.uint8), orig_shape[::-1], interpolation=cv2.INTER_NEAREST)
|
||||
depth_crop, mask_crop, (x_min, y_min) = crop_mask_bbox(depth_img, mask, box)
|
||||
|
||||
if depth_crop is None:
|
||||
continue
|
||||
|
||||
x, y, z, roll, pitch, yaw = calculate_pose(mask, orig_shape, depth_img, self.K)
|
||||
if mask.shape[0] >= (orig_shape[0] * 0.2) and mask.shape[1] >= (orig_shape[1] * 0.2):
|
||||
mask_crop = cv2.resize(mask_crop, None, fx=self.fx, fy=self.fy, interpolation=cv2.INTER_NEAREST)
|
||||
depth_crop = cv2.resize(depth_crop, None, fx=self.fx, fy=self.fy, interpolation=cv2.INTER_NEAREST)
|
||||
intrinsics = o3d.camera.PinholeCameraIntrinsic(
|
||||
int(self.camera_size[0] * self.fx),
|
||||
int(self.camera_size[1] * self.fy),
|
||||
self.K[0] * self.fx,
|
||||
self.K[4] * self.fy,
|
||||
(self.K[2] - x_min) * self.fx,
|
||||
(self.K[5] - y_min) * self.fy
|
||||
)
|
||||
else:
|
||||
intrinsics = o3d.camera.PinholeCameraIntrinsic(
|
||||
int(self.camera_size[0]),
|
||||
int(self.camera_size[1]),
|
||||
self.K[0],
|
||||
self.K[4],
|
||||
self.K[2] - x_min,
|
||||
self.K[5] - y_min
|
||||
)
|
||||
|
||||
x, y, z, roll, pitch, yaw = self.calculate_function(mask_crop, depth_crop, intrinsics)
|
||||
|
||||
if (x, y, z) != (0.0, 0.0, 0.0):
|
||||
pose = Pose()
|
||||
pose.position = Point(x=x, y=y, z=z)
|
||||
pose.orientation = Quaternion(x=roll, y=pitch, z=yaw)
|
||||
pose_dict[int(class_ids[i]), labels[class_ids[i]]].append(pose)
|
||||
post_list.append([x, y, z, roll, pitch, yaw])
|
||||
|
||||
time4 = time.time()
|
||||
|
||||
'''mask_img and box_img is or not output'''
|
||||
if self.output_boxes and self.output_masks:
|
||||
draw_box(self.set_confidence, rgb_img, result, post_list)
|
||||
draw_mask(self.set_confidence, rgb_img, result)
|
||||
if self.output_boxes and not self.output_masks:
|
||||
draw_box(self.set_confidence, rgb_img, result)
|
||||
time5 = time.time()
|
||||
self.get_logger().info(f'start')
|
||||
self.get_logger().info(f'{(time2 - time1)*1000} ms, model predict')
|
||||
self.get_logger().info(f'{(time3 - time2)*1000} ms, get mask and some param')
|
||||
self.get_logger().info(f'{(time4 - time3)*1000} ms, calculate all mask PCA')
|
||||
self.get_logger().info(f'{(time5 - time4)*1000} ms, draw box')
|
||||
self.get_logger().info(f'{(time5 - time1)*1000} ms, completing a picture entire process')
|
||||
self.get_logger().info(f'end')
|
||||
return self.cv_bridge.cv2_to_imgmsg(rgb_img, "bgr8"), pose_dict
|
||||
elif self.output_boxes and not self.output_masks:
|
||||
draw_box(self.set_confidence, rgb_img, result, post_list)
|
||||
elif self.output_boxes and self.output_masks:
|
||||
draw_box(self.set_confidence, rgb_img, result)
|
||||
draw_mask(self.set_confidence, rgb_img, result)
|
||||
return self.cv_bridge.cv2_to_imgmsg(rgb_img, "bgr8"), pose_dict
|
||||
elif not self.output_boxes and self.output_masks:
|
||||
draw_mask(self.set_confidence, rgb_img, result)
|
||||
@@ -395,36 +406,6 @@ class DetectNode(Node):
|
||||
else:
|
||||
return None, pose_dict
|
||||
|
||||
def _test_image(self, rgb_img, depth_img):
|
||||
rgb_img_gray = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
|
||||
pattern_size = (11, 8)
|
||||
ret, corners = cv2.findChessboardCorners(rgb_img_gray, pattern_size)
|
||||
if ret:
|
||||
# 角点亚像素精确化(提高标定精度)
|
||||
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
|
||||
corners_subpix = cv2.cornerSubPix(rgb_img_gray, corners, (11, 11), (-1, -1), criteria)
|
||||
|
||||
corners_subpix = corners_subpix.reshape(pattern_size[1], pattern_size[0], 2)
|
||||
mask = np.zeros(rgb_img_gray.shape, dtype=np.uint8)
|
||||
for i in [0, pattern_size[1] - 1]:
|
||||
for j in [0, pattern_size[0] - 1]:
|
||||
pts = np.array([
|
||||
corners_subpix[i, j],
|
||||
corners_subpix[i, j + 1],
|
||||
corners_subpix[i + 1, j + 1],
|
||||
corners_subpix[i + 1, j]
|
||||
], dtype=np.int32)
|
||||
cv2.fillConvexPoly(mask, pts, 1)
|
||||
|
||||
rgb_img[mask > 0] = rgb_img[mask > 0] * 0.5 + np.array([0, 0, 255]) * 0.5
|
||||
orig_shape = rgb_img_gray.shape
|
||||
x, y, z, roll, pitch, yaw = calculate_pose(mask, orig_shape, depth_img, self.K)
|
||||
|
||||
cv2.putText(rgb_img, f'cs: x: {x:.2f}, y: {y:.2f}, z: {z:.2f}', (0, 0 + 15),cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
|
||||
return self.cv_bridge.cv2_to_imgmsg(rgb_img, "bgr8"), None
|
||||
else:
|
||||
return self.cv_bridge.cv2_to_imgmsg(rgb_img, "bgr8"), None
|
||||
|
||||
|
||||
def main(args=None):
|
||||
rclpy.init(args=args)
|
||||
@@ -438,17 +419,5 @@ def main(args=None):
|
||||
rclpy.shutdown()
|
||||
|
||||
|
||||
def test_node(args=None):
|
||||
rclpy.init(args=args)
|
||||
node = DetectNode('detect', 'test')
|
||||
try:
|
||||
rclpy.spin(node)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
finally:
|
||||
node.destroy_node()
|
||||
rclpy.shutdown()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
@@ -7,7 +7,11 @@ import json
|
||||
|
||||
import rclpy
|
||||
from rclpy.node import Node
|
||||
from geometry_msgs.msg import Pose
|
||||
from rclpy.parameter import Parameter
|
||||
|
||||
from tf2_ros import Buffer
|
||||
from tf2_msgs.msg import TFMessage
|
||||
from interfaces.msg import PoseWithIDArray
|
||||
from message_filters import ApproximateTimeSynchronizer, Subscriber
|
||||
|
||||
|
||||
@@ -30,8 +34,8 @@ def get_matrix_eular_radu(x, y, z, rx, ry, rz):
|
||||
"""从欧拉角构建齐次变换矩阵"""
|
||||
'''构造旋转矩阵'''
|
||||
rmat = tfs.euler.euler2mat(
|
||||
math.radians(rx), math.radians(ry), math.radians(rz)
|
||||
# rx, ry, rz
|
||||
# math.radians(rx), math.radians(ry), math.radians(rz)
|
||||
rx, ry, rz
|
||||
)
|
||||
|
||||
"""构造齐次变换矩阵"""
|
||||
@@ -107,7 +111,6 @@ def calculate(Hgs, Hcs):
|
||||
A.append(skew(np.add(Pgij, Pcij)))
|
||||
B.append(np.subtract(Pcij, Pgij).reshape(3, 1))
|
||||
|
||||
|
||||
MA = np.vstack(A)
|
||||
MB = np.vstack(B)
|
||||
Pcg_ = np.dot(np.linalg.pinv(MA), MB)
|
||||
@@ -136,27 +139,42 @@ def calculate(Hgs, Hcs):
|
||||
class Calibration(Node):
|
||||
def __init__(self, name):
|
||||
super(Calibration, self).__init__(name)
|
||||
self.sync_subscriber = None
|
||||
self.sub_camera_pose = None
|
||||
self.sub_hand_pose = None
|
||||
self.Hgs, self.Hcs = [], []
|
||||
self.hand, self.camera = [], []
|
||||
self.calibration_matrix = None
|
||||
self.calculate = False
|
||||
self.collect = False
|
||||
self.base_name = 'base_link'
|
||||
|
||||
self.tf_buffer = Buffer()
|
||||
|
||||
self.declare_parameter('start_collect_once', False)
|
||||
self.collect = self.get_parameter('start_collect_once').value
|
||||
|
||||
self.declare_parameter('start_calculate', False)
|
||||
self.calculate = self.get_parameter('start_calculate').value
|
||||
self.declare_parameter('base_name', 'base_link')
|
||||
|
||||
self.declare_parameter('end_name', 'Link6')
|
||||
self.declare_parameter('class_name', 'crossboard')
|
||||
self.declare_parameter('matrix_name', 'eye_in_hand')
|
||||
|
||||
self.end_name = self.get_parameter('end_name').value
|
||||
self.class_name = self.get_parameter('class_name').value
|
||||
self.matrix_name = self.get_parameter('matrix_name').value
|
||||
|
||||
self.declare_parameter('mode', 'eye_in_hand')
|
||||
self.mode = self.get_parameter('mode').value.lower()
|
||||
|
||||
if self.mode not in ['eye_in_hand', 'eye_to_hand']:
|
||||
raise ValueError("mode must be 'eye_in_hand' or 'eye_to_hand'")
|
||||
|
||||
self.declare_parameter('input', 'rotvertor')
|
||||
self.declare_parameter('input', 'eular')
|
||||
self.input = self.get_parameter('input').value.lower()
|
||||
if self.input == 'rotvertor':
|
||||
self.function = get_matrix_rotvector
|
||||
elif self.input == 'eular':
|
||||
if self.input == 'eular':
|
||||
self.function = get_matrix_eular_radu
|
||||
elif self.input == 'rotvertor':
|
||||
self.function = get_matrix_rotvector
|
||||
elif self.input == 'quat':
|
||||
self.function = get_matrix_quat
|
||||
else:
|
||||
@@ -166,72 +184,103 @@ class Calibration(Node):
|
||||
self._init_sub()
|
||||
|
||||
def _init_sub(self):
|
||||
self.sub_hand_pose = Subscriber(self, Pose, '/calibration/hand/pose')
|
||||
self.sub_camera_pose = Subscriber(self, Pose, '/calibration/camera/pose')
|
||||
self.sub_hand_pose = Subscriber(self, TFMessage, '/tf')
|
||||
self.sub_camera_pose = Subscriber(self, PoseWithIDArray, '/pose/cv_detect_pose')
|
||||
|
||||
self.sync_subscriber = ApproximateTimeSynchronizer(
|
||||
[self.sub_hand_pose, self.sub_camera_pose],
|
||||
queue_size=10,
|
||||
slop=0.1
|
||||
slop=0.1,
|
||||
allow_headerless = True
|
||||
)
|
||||
self.sync_subscriber.registerCallback(self.get_pose_callback)
|
||||
|
||||
def get_pose_callback(self, hand_pose, camera_pose):
|
||||
def get_pose_callback(self, hand_tf, camera_pose):
|
||||
self.collect = self.get_parameter('start_collect_once').value
|
||||
self.calculate = self.get_parameter('start_calculate').value
|
||||
self.base_name = self.get_parameter('base_name').value
|
||||
self.end_name = self.get_parameter('end_name').value
|
||||
self.class_name = self.get_parameter('class_name').value
|
||||
self.matrix_name = self.get_parameter('matrix_name').value
|
||||
self.mode = self.get_parameter('mode').value.lower()
|
||||
|
||||
if self.collect:
|
||||
_hand = [
|
||||
hand_pose.position.x,
|
||||
hand_pose.position.y,
|
||||
hand_pose.position.z,
|
||||
hand_pose.orientation.x,
|
||||
hand_pose.orientation.y,
|
||||
hand_pose.orientation.z,
|
||||
]
|
||||
_camera = [
|
||||
camera_pose.position.x,
|
||||
camera_pose.position.y,
|
||||
camera_pose.position.z,
|
||||
camera_pose.orientation.x,
|
||||
camera_pose.orientation.y,
|
||||
camera_pose.orientation.z,
|
||||
]
|
||||
_hand, _camera = None, None
|
||||
|
||||
for transform in hand_tf.transforms:
|
||||
self.tf_buffer.set_transform(transform, "default_authority")
|
||||
|
||||
if self.base_name in self.tf_buffer.all_frames_as_string() and self.end_name in self.tf_buffer.all_frames_as_string():
|
||||
|
||||
trans = self.tf_buffer.lookup_transform(
|
||||
self.base_name,
|
||||
self.end_name,
|
||||
rclpy.time.Time()
|
||||
)
|
||||
|
||||
t = trans.transform.translation
|
||||
r = trans.transform.rotation
|
||||
quat = [r.w, r.x, r.y, r.z]
|
||||
roll, pitch, yaw = tfs.euler.quat2euler(quat)
|
||||
_hand = [
|
||||
t.x, t.y, t.z,
|
||||
roll, pitch, yaw,
|
||||
]
|
||||
else: return
|
||||
|
||||
pose_dict = {}
|
||||
for object in camera_pose.objects:
|
||||
pose_dict[object.class_name] = object.pose_list
|
||||
|
||||
if self.class_name in pose_dict:
|
||||
_camera = [
|
||||
pose_dict[self.class_name][-1].position.x,
|
||||
pose_dict[self.class_name][-1].position.y,
|
||||
pose_dict[self.class_name][-1].position.z,
|
||||
pose_dict[self.class_name][-1].orientation.x,
|
||||
pose_dict[self.class_name][-1].orientation.y,
|
||||
pose_dict[self.class_name][-1].orientation.z,
|
||||
]
|
||||
else:
|
||||
self.set_parameters([Parameter('start_collect_once', Parameter.Type.BOOL, False)])
|
||||
self.get_logger().info(f"Have not camera data")
|
||||
|
||||
if _hand is None or _camera is None:
|
||||
_hand, _camera = None, None
|
||||
self.get_logger().info("Have not camera data or end data")
|
||||
return
|
||||
|
||||
self.get_logger().info(f"add hand: {_hand}")
|
||||
self.get_logger().info(f"add camera: {_camera}")
|
||||
|
||||
self.hand.extend(_hand)
|
||||
self.camera.extend(_camera)
|
||||
self.collect = False
|
||||
self.set_parameters([Parameter('start_collect_once', Parameter.Type.BOOL, False)])
|
||||
|
||||
if self.calculate:
|
||||
self.sync_subscriber.registerCallback(None)
|
||||
self.sub_hand_pose = None
|
||||
self.sub_camera_pose = None
|
||||
self.sync_subscriber = None
|
||||
print("已停止订阅")
|
||||
|
||||
self.calculate_calibration()
|
||||
self.get_logger().info(self.calibration_matrix)
|
||||
|
||||
print(self.hand)
|
||||
print(self.camera)
|
||||
|
||||
self.get_logger().info(f"{self.calibration_matrix}")
|
||||
hand_eye_result = {
|
||||
"T": self.calibration_matrix.tolist()
|
||||
}
|
||||
with open(f"{self.matrix_name}_matrix.json", "w") as f:
|
||||
json.dump(hand_eye_result, f, indent=4)
|
||||
self.get_logger().info(f"Save as {self.matrix_name}_matrix.json")
|
||||
|
||||
with open(f"hand_pose_data.json", "w") as f:
|
||||
json.dump(self.hand, f, indent=4)
|
||||
|
||||
with open(f"camera_pose_data.json", "w") as f:
|
||||
json.dump(self.camera, f, indent=4)
|
||||
|
||||
self.done = True
|
||||
|
||||
def calculate_data(self):
|
||||
if self.input == 'quat':
|
||||
for i in range(0, len(self.hand), 7):
|
||||
self.Hgs.append(
|
||||
np.linalg.inv(
|
||||
self.function(
|
||||
self.hand[i], self.hand[i + 1], self.hand[i + 2],
|
||||
self.hand[i + 3], self.hand[i + 4], self.hand[i + 5], self.hand[i + 6]
|
||||
)
|
||||
)
|
||||
if self.mode == 'eye_to_hand' else
|
||||
self.function(
|
||||
self.hand[i], self.hand[i + 1], self.hand[i + 2],
|
||||
self.hand[i + 3], self.hand[i + 4], self.hand[i + 5], self.hand[i + 6]
|
||||
)
|
||||
)
|
||||
self.Hcs.append(
|
||||
self.function(
|
||||
self.camera[i], self.camera[i + 1], self.camera[i + 2],
|
||||
self.camera[i + 3], self.camera[i + 4], self.camera[i + 5], self.camera[i + 6]
|
||||
)
|
||||
)
|
||||
else:
|
||||
if self.input != 'quat':
|
||||
for i in range(0, len(self.hand), 6):
|
||||
self.Hgs.append(
|
||||
np.linalg.inv(
|
||||
@@ -252,7 +301,27 @@ class Calibration(Node):
|
||||
self.camera[i + 3], self.camera[i + 4], self.camera[i + 5]
|
||||
)
|
||||
)
|
||||
|
||||
else:
|
||||
for i in range(0, len(self.hand), 7):
|
||||
self.Hgs.append(
|
||||
np.linalg.inv(
|
||||
self.function(
|
||||
self.hand[i], self.hand[i + 1], self.hand[i + 2],
|
||||
self.hand[i + 3], self.hand[i + 4], self.hand[i + 5], self.hand[i + 6]
|
||||
)
|
||||
)
|
||||
if self.mode == 'eye_to_hand' else
|
||||
self.function(
|
||||
self.hand[i], self.hand[i + 1], self.hand[i + 2],
|
||||
self.hand[i + 3], self.hand[i + 4], self.hand[i + 5], self.hand[i + 6]
|
||||
)
|
||||
)
|
||||
self.Hcs.append(
|
||||
self.function(
|
||||
self.camera[i], self.camera[i + 1], self.camera[i + 2],
|
||||
self.camera[i + 3], self.camera[i + 4], self.camera[i + 5], self.camera[i + 6]
|
||||
)
|
||||
)
|
||||
|
||||
def calculate_calibration(self):
|
||||
self.calculate_data()
|
||||
@@ -266,15 +335,16 @@ class Calibration(Node):
|
||||
hand_eye_result = {
|
||||
"T": self.calibration_matrix.tolist()
|
||||
}
|
||||
with open("calibration_matrix.json", "w") as f:
|
||||
with open(f"{self.matrix_name}_matrix.json", "w") as f:
|
||||
json.dump(hand_eye_result, f, indent=4)
|
||||
self.get_logger().info(f"Save as {self.matrix_name}_matrix.json")
|
||||
|
||||
self.done = True
|
||||
|
||||
|
||||
def main(args=None):
|
||||
rclpy.init(args=args)
|
||||
node = Calibration('Calibration')
|
||||
node = Calibration('calibration')
|
||||
try:
|
||||
while rclpy.ok() and not node.done:
|
||||
rclpy.spin_once(node)
|
||||
@@ -303,26 +373,26 @@ if __name__ == '__main__':
|
||||
# 175.2059707654342
|
||||
# ]
|
||||
|
||||
hand = [
|
||||
0.400422, -0.0262847, 0.365594, 3.12734, -0.0857978, -0.0168582,
|
||||
0.423347, -0.16857, 0.340184, -2.73844, 0.089013, 0.123284,
|
||||
0.42540, 0.19543, 0.29062, 2.44351, -0.1777, -0.0722,
|
||||
0.58088, -0.0541, 0.32633, -2.9303, 0.06957, 0.98985,
|
||||
0.2760, -0.033, 0.3301, 3.0813, 0.0724, 0.6077,
|
||||
0.54011, 0.09071, 0.34623, 2.62279, 0.75876, -0.6927,
|
||||
0.57732, -0.1346, 0.37990, -2.6764, 1.04868, 0.82177,
|
||||
0.27844, -0.1371, 0.28799, -0.8233, 2.26319, 0.36110
|
||||
]
|
||||
camera = [
|
||||
0.11725, 0.05736, 0.32176, 0.02860, -0.1078, 3.06934,
|
||||
0.07959, 0.06930, 0.29829, 0.66126, -0.2127, 2.99042,
|
||||
0.14434, 0.03227, 0.40377, -0.9465, -0.1744, 2.80668,
|
||||
0.11008, 0.05605, 0.35730, 0.12422, -1.0665, 2.87604,
|
||||
0.1131, 0.0438171, 0.299624, -0.058278, -0.514154, -3.06309,
|
||||
0.057039, 0.109464, 0.415275, 0.414777, 0.750109, -2.49495,
|
||||
0.13702, 0.00520, 0.38190, 0.26431, -0.7554, 2.26885,
|
||||
- 0.03670, -0.04433, 0.237292, 0.807501, -0.27347, 0.614594
|
||||
]
|
||||
# hand = [
|
||||
# 0.400422, -0.0262847, 0.365594, 3.12734, -0.0857978, -0.0168582,
|
||||
# 0.423347, -0.16857, 0.340184, -2.73844, 0.089013, 0.123284,
|
||||
# 0.42540, 0.19543, 0.29062, 2.44351, -0.1777, -0.0722,
|
||||
# 0.58088, -0.0541, 0.32633, -2.9303, 0.06957, 0.98985,
|
||||
# 0.2760, -0.033, 0.3301, 3.0813, 0.0724, 0.6077,
|
||||
# 0.54011, 0.09071, 0.34623, 2.62279, 0.75876, -0.6927,
|
||||
# 0.57732, -0.1346, 0.37990, -2.6764, 1.04868, 0.82177,
|
||||
# 0.27844, -0.1371, 0.28799, -0.8233, 2.26319, 0.36110
|
||||
# ]
|
||||
# camera = [
|
||||
# 0.11725, 0.05736, 0.32176, 0.02860, -0.1078, 3.06934,
|
||||
# 0.07959, 0.06930, 0.29829, 0.66126, -0.2127, 2.99042,
|
||||
# 0.14434, 0.03227, 0.40377, -0.9465, -0.1744, 2.80668,
|
||||
# 0.11008, 0.05605, 0.35730, 0.12422, -1.0665, 2.87604,
|
||||
# 0.1131, 0.0438171, 0.299624, -0.058278, -0.514154, -3.06309,
|
||||
# 0.057039, 0.109464, 0.415275, 0.414777, 0.750109, -2.49495,
|
||||
# 0.13702, 0.00520, 0.38190, 0.26431, -0.7554, 2.26885,
|
||||
# - 0.03670, -0.04433, 0.237292, 0.807501, -0.27347, 0.614594
|
||||
# ]
|
||||
|
||||
# hand = [
|
||||
# 0.00036945715011716274, -0.8839821223981196, 0.5523193840441242,
|
||||
@@ -363,6 +433,46 @@ if __name__ == '__main__':
|
||||
# 0.4362292324, -0.0778107597, -0.1007970399, 0.4568889439, -0.3144470665, -0.5302104578, -0.6412896427,
|
||||
# ]
|
||||
|
||||
# hand = [
|
||||
# 0.508533, -0.260803, 0.292825, -0.0002, 0.2843, 0.8712, 0.4003,
|
||||
# 0.527713, -0.230407, 0.300856, 0.0012, 0.2461, 0.8979, 0.365,
|
||||
# 0.544016, -0.192872, 0.326135, 0.0075, 0.1894, 0.9311, 0.3117,
|
||||
# 0.552381, -0.149794, 0.342366, 0.0208, 0.113, 0.9692, 0.2178,
|
||||
# 0.564071, -0.118316, 0.356974, 0.0196, 0.0855, 0.9829, 0.1617,
|
||||
# 0.574263, -0.073508, 0.354265, 0.0123, 0.0659, 0.991, 0.1162,
|
||||
# 0.573574, -0.038738, 0.363898, 0.01, 0.0166, 0.9961, 0.0866,
|
||||
# 0.563025, -0.000004, 0.432336, 0, 0, 1, 0,
|
||||
# 0.561599, 0.018044, 0.376489, 0.0398, 0.0384, 0.9984, 0.0135,
|
||||
# 0.553613, 0.117616, 0.380410, 0.0523, 0.0278, 0.993, -0.102,
|
||||
# 0.546303, 0.163889, 0.350932, 0.0606, 0.0419, 0.9854, -0.1537,
|
||||
# 0.527343, 0.191386, 0.356761, 0.0660, -0.0058, 0.9719, -0.2261,
|
||||
# 0.525386, 0.218939, 0.352163, 0.0683, -0.0293, 0.9638, -0.2561,
|
||||
# 0.510583, 0.260870, 0.306870, 0.0742, -0.0232, 0.9467, -0.3125,
|
||||
# 0.494977, 0.290977, 0.257741, 0.0708, -0.089, 0.9264, -0.359,
|
||||
# 0.464551, 0.322355, 0.214358, 0.0712, -0.1525, 0.8982, -0.4062
|
||||
# ]
|
||||
#
|
||||
# camera = [
|
||||
# 0.01963879, -0.009811901, 0.205815151, -0.472094888, 0.101775888, -0.345924179, 0.804428087,
|
||||
# 0.018775674, -0.008453256, 0.187205523, -0.486293263, 0.154739671, -0.358078652, 0.781891409,
|
||||
# 0.012794545, -0.004475131, 0.181938171, 0.50586501, -0.228167491, 0.37478512, -0.742681831,
|
||||
# 0.032269519, -0.00216203, 0.173868418, 0.520070883, -0.334713601, 0.409799027, -0.670490746,
|
||||
# 0.036921501, -0.00520581, 0.173751414, 0.509143837, -0.389764156, 0.430712138, -0.635093308,
|
||||
# 0.028183416, -0.003799309, 0.157465994, -0.493999273, 0.434277098, -0.443106472, 0.609118031,
|
||||
# 0.011437201, 0.00248707, 0.160097286, 0.505064681, -0.477579273, 0.433387075, -0.573588135,
|
||||
# 0.032602217, 0.021788951, 0.224987134, -0.45692465, 0.54759083, -0.46897897, 0.520982603,
|
||||
# 0.012125032, -0.002286719, 0.17284067, -0.469627705, 0.49778925, -0.506302662, 0.524703054,
|
||||
# 0.005448693, -0.004084263, 0.196215734, 0.420676917, -0.55355367, 0.57085096, -0.43673613,
|
||||
# -0.001541587, -0.000362108, 0.187365457, 0.386895866, -0.565030889, 0.607491241, -0.402499782,
|
||||
# 0.0283868, 0.00129835, 0.21059823, 0.367601287, -0.614671423, 0.616489404, -0.327091961,
|
||||
# 0.026122695, -0.006088101, 0.222446054, -0.360286232, 0.636446394, -0.615347513, 0.294070155,
|
||||
# 0.016605999, 0.002255499, 0.214847937, -0.322620688, 0.64815307, -0.642728715, 0.250426214,
|
||||
# -0.004833174, -0.00322185, 0.206448808, -0.318973207, 0.693907003, -0.618815043, 0.183894282,
|
||||
# -0.023912154, -0.006630629, 0.213570014, -0.312473203, 0.731833827, -0.595186057, 0.111952241
|
||||
# ]
|
||||
|
||||
hand = [0.04890273791428963, 0.0004504761907745383, 0.46790796699482806, 2.967753235921256, 1.5649208364836507, 2.9711085514601705, 0.04768898265766111, 0.3788857791109763, 0.4682035239248168, 3.0163736185861207, 1.5649619541531186, 3.0195308212592677, 0.32625883264819266, -0.3634617778416731, 0.421593216446394, 3.002534938295004, 1.5650081880969766, 3.005742650784514, 0.32642805113795487, -0.007846684520101184, 0.6583907692758129, 2.996473884875977, 1.5651519582726476, 2.999788236184719, 0.007604499072515341, -0.008909246604230057, 0.6602164092084706, 2.9924337455581487, 1.5649434437838572, 2.99570599683423, -0.2302903973439573, -0.009521593068327942, 0.4022385512893329, 2.9886192283787567, 1.5649211710513136, 2.991960149294687, -0.030611858539142292, -0.2092965687518696, 0.27301661161858565, 2.99903518423652, 1.5650012464590883, 3.002295456064355, 0.22992574555084527, -0.32587224492584066, 0.42019996455240305, 2.60021191431934, 1.564801015912219, 3.0010891138129234, 0.22914021799488096, -0.3257848537525279, 0.29567134663812733, 2.6229698917887316, 1.5648394832607442, 3.0236519049507193, -0.018509517255106828, -0.344831152028245, 0.29713078506399687, 2.6096285094986245, 1.5650188606597932, 3.0104368518847386, 0.1698533780982791, -0.11718557996859491, 0.5113763666064552, 2.7225081455128275, 1.5648607260116965, 3.0021865087579798, 0.175266590966759, -0.09802586931231358, 0.5113880312740355, -3.0434691693702765, 1.5648081710494146, 3.020248933802683, 0.1763485544276374, -0.09820143573344496, 0.6922771478399123, -3.004344843586712, 1.5648396624929917, 3.0026422760274967, 0.2522208720502347, 0.174735137646454, 0.4499592173076682, -3.0008324946538187, 1.5648049556855685, 3.006118661437582, 0.28000301357116675, 0.24123452069853957, 0.5236425399901001, -2.6962801721735037, 1.5647475690740913, 3.015531984738412, 0.3023396516892056, 0.16318483399567205, 0.6023445096549458, 3.138635021721811, 1.2804483880680635, 3.1279400073671018, 0.46097378643641157, 0.3125800774390137, 0.23477319486468914, 0.0024673940256319383, 1.211771253061176, -0.008496477760364144, 0.45596846797279444, -0.14796089380015007, 0.23432410483220117, 0.0025808687475727533, 1.2118402453241934, -0.008466486492046992, 0.5123129164168688, -0.14841464344335334, 0.13135535856297814, 0.0013911813156320745, 0.9402365531147562, -0.009537151493219735, -0.06802417757547158, -0.18056646142903404, 0.15742012930905253, 0.0017593343032344946, 1.1904912242883334, -0.00939271314135415, -0.005631825572634169, -0.24897343592445517, 0.42678854905231567, 1.67975648494546, 0.8905275031158919, 1.6772475024721079, 0.10485600227751225, -0.18170582469233998, 0.47832287703430654, 1.7241462360020892, 1.106325860118641, 1.7296478042137053, 0.08016413402919303, -0.2590631561375101, 0.6196861497045569, -1.5870460137855227, 0.7739390582898122, -1.5718025854894164, 0.08013941146558534, -0.2590737374639341, 0.6196689571600216, -1.6047235120773313, 1.2190706210025097, -1.5923237326954953, 0.07601339905852489, 0.02603469000425977, 0.5150607085687227, 1.5901569600159358, 0.9226417096915809, 1.5963532553193982]
|
||||
camera =[0.016627581180906383, -0.06008860420526803, 1.2546251322637956, 0.007920735534361645, -0.0044938171316247985, -0.01583109584892951, 0.3946734430727695, -0.06001849745979192, 1.2546461286633859, 0.00792950436880294, -0.004275131010815468, -0.009382335201147032, -0.34727024002964096, -0.10396859980513673, 0.9781854685650921, 0.007860849364144471, -0.004289994609492879, -0.020103434790088342, 0.008656422192253778, 0.13255349538482636, 0.9782177211920444, 0.007669276032617759, -0.004400674052395294, -0.011961340904789737, 0.007703254395672724, 0.13219937916863395, 1.2970702489359116, 0.007822208581600262, -0.004332910354657888, -0.007797342430181252, 0.007702096258629524, -0.12744409516854172, 1.533465623522901, -0.007910546977282943, -0.004492274201712114, -0.0018150035612668667, -0.1927178122090693, -0.2548192832185877, 1.3337124403234724, 0.007872240191786216, -0.00436070416710282, -0.019811567426399057, 0.16685051573686113, -0.10619757681231899, 1.1012407943208185, -0.008718288085287535, 0.40204003526433674, -0.026001280203442934, 0.16686699015593348, -0.23051331080143456, 1.1012306901800453, -0.008710041362401081, 0.40184926890630007, -0.02403314010797454, 0.23955552137939512, -0.23082270918815057, 1.3394234811518526, -0.008452894164052559, 0.4019316455149511, -0.006657961508889506, 0.2392800372761273, -0.015518871224688613, 1.1154081836326577, -0.00825236607466278, 0.2808465612850971, -0.00485579021046654, -0.3508251278886656, -0.015552278798700825, 1.0740235262012592, -0.00829522204258177, -0.2185247927542375, 0.004781859822461862, -0.4168337276552685, 0.1650843246586101, 1.0496767253431094, -0.00833991904750478, -0.2752981051112181, 0.0039304434412982476, -0.13375822629863435, -0.07688876301483868, 1.0496075961175266, -0.008353942542516835, -0.2753192561639382, 0.00505791884160581, -0.3888095658610325, -0.0030800545949429287, 0.9546727258673529, -0.009038580128611329, -0.5707579905400695, 0.006194193730309511, 0.16328869274007693, -0.21048890444852295, 0.9714159446024816, 0.2927615099519847, 0.009726373027021925, -0.01707352415432453, 0.31304895605719507, 0.03771051883437302, 0.9049738113980995, -0.3570496663480505, 0.009731135606951519, 0.02271614403666748, -0.14720843860704255, 0.037567112722341686, 0.905021520832809, -0.3569831264071773, 0.0098148326615404, 0.02247627050751473, -0.14657109389258477, 0.16675719039856332, 0.8848761718997081, -0.628525695609367, 0.009558819653555794, 0.022860898888523083, -0.18546311473458751, 0.17593249161178773, 1.4209417430917681, -0.3782748481179483, 0.009944971710266764, 0.03698328329209474, -0.29954387900231777, 0.005868208019799593, 1.302432504133018, 0.06931059870237366, 0.026519475271678805, -0.6848636764689539, -0.19943769852930376, -0.03726937010016136, 1.194296982096587, 0.07332163133599615, 0.011007154237114129, -0.45064904908755204, -0.19071838461885932, -0.10743668712554683, 1.2271929327385571, 0.0014044222476118763, -0.01783184488981163, 0.8032197321924955, -0.23452184831905082, -0.004370359915430269, 1.2272036048043269, 0.00911836141205128, -0.015503544959362016, 0.3577725606201448, 0.006674891701210744, -0.028505333147130864, 1.2271490673736012, 0.017750045241185106, -0.0006343459583163935, -0.6378209534631959]
|
||||
rclpy.init(args=None)
|
||||
node = Calibration('Calibration')
|
||||
node.get_data_test(hand, camera)
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
import rclpy
|
||||
from rclpy.node import Node
|
||||
|
||||
from interfaces.srv import PoseArray
|
||||
|
||||
|
||||
class Client(Node):
|
||||
def __init__(self, name):
|
||||
super().__init__(name)
|
||||
self.client_ = self.create_client(PoseArray, "/get_object_pose_list",)
|
||||
self.timer_ = self.create_timer(2.0, self._timer_callback)
|
||||
|
||||
def _timer_callback(self):
|
||||
self.send_request("head")
|
||||
|
||||
def send_request(self, camera_position):
|
||||
while not self.client_.wait_for_service(timeout_sec=1.0):
|
||||
self.get_logger().info('service not available, waiting again...')
|
||||
request = PoseArray.Request()
|
||||
request.camera_position = camera_position
|
||||
self.client_.call_async(request).add_done_callback(self._result_callback)
|
||||
|
||||
def _result_callback(self, result):
|
||||
response = result.result()
|
||||
if response.success:
|
||||
pose_dict = {}
|
||||
for obj in response.objects:
|
||||
pose_dict[obj.class_name] = {
|
||||
"class_id": obj.class_id,
|
||||
"pose_list": obj.pose_list,
|
||||
}
|
||||
self.get_logger().info(f"{pose_dict}")
|
||||
else:
|
||||
self.get_logger().error(f"{response.info}")
|
||||
|
||||
|
||||
def main(args=None):
|
||||
rclpy.init(args=args)
|
||||
node = Client('get_camera_service_client')
|
||||
try:
|
||||
rclpy.spin(node)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
finally:
|
||||
node.destroy_node()
|
||||
rclpy.shutdown()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,54 @@
|
||||
from launch import LaunchDescription
|
||||
from launch_ros.actions import Node
|
||||
from launch.actions import DeclareLaunchArgument, OpaqueFunction, LogInfo
|
||||
from launch.substitutions import LaunchConfiguration
|
||||
|
||||
|
||||
def generate_launch_description():
|
||||
args_detect = [
|
||||
DeclareLaunchArgument('checkpoint_name', default_value='yolo11s-seg.pt'),
|
||||
DeclareLaunchArgument('output_boxes', default_value='True'),
|
||||
DeclareLaunchArgument('output_masks', default_value='False'),
|
||||
DeclareLaunchArgument('set_confidence', default_value='0.5'),
|
||||
DeclareLaunchArgument('color_image_topic', default_value='/camera/camera/color/image_raw'),
|
||||
DeclareLaunchArgument('depth_image_topic', default_value='/camera/camera/depth/image_rect_raw'),
|
||||
DeclareLaunchArgument('camera_info_topic', default_value='/camera/camera/color/camera_info'),
|
||||
DeclareLaunchArgument('width', default_value='1280'),
|
||||
DeclareLaunchArgument('high', default_value='720'),
|
||||
DeclareLaunchArgument('device', default_value='cpu')
|
||||
]
|
||||
|
||||
def create_detect_node(context):
|
||||
checkpoint = LaunchConfiguration('checkpoint_name').perform(context)
|
||||
output_boxes = LaunchConfiguration('output_boxes').perform(context)
|
||||
output_masks = LaunchConfiguration('output_masks').perform(context)
|
||||
conf = LaunchConfiguration('set_confidence').perform(context)
|
||||
color_image_topic = LaunchConfiguration('color_image_topic').perform(context)
|
||||
depth_image_topic = LaunchConfiguration('depth_image_topic').perform(context)
|
||||
camera_info_topic = LaunchConfiguration('camera_info_topic').perform(context)
|
||||
width = LaunchConfiguration('width').perform(context)
|
||||
high = LaunchConfiguration('high').perform(context)
|
||||
device = LaunchConfiguration('device').perform(context)
|
||||
|
||||
return [
|
||||
Node(
|
||||
package='detect_part',
|
||||
executable='detect_node',
|
||||
parameters=[{
|
||||
'checkpoint_name': checkpoint,
|
||||
'output_boxes': output_boxes.lower() == 'true',
|
||||
'output_masks': output_masks.lower() == 'true',
|
||||
'set_confidence': float(conf),
|
||||
'color_image_topic': color_image_topic,
|
||||
'depth_image_topic': depth_image_topic,
|
||||
'camera_info_topic': camera_info_topic,
|
||||
'width': int(width),
|
||||
'high': int(high),
|
||||
'device': device,
|
||||
}]
|
||||
)
|
||||
]
|
||||
|
||||
return LaunchDescription(args_detect + [
|
||||
OpaqueFunction(function=create_detect_node),
|
||||
])
|
||||
@@ -0,0 +1,350 @@
|
||||
import os
|
||||
import yaml
|
||||
from launch import LaunchDescription
|
||||
from launch.actions import DeclareLaunchArgument, OpaqueFunction, GroupAction
|
||||
from launch.substitutions import LaunchConfiguration
|
||||
from launch_ros.actions import PushRosNamespace, ComposableNodeContainer, Node
|
||||
from launch_ros.descriptions import ComposableNode
|
||||
|
||||
|
||||
def load_yaml(file_path):
|
||||
with open(file_path, 'r') as f:
|
||||
return yaml.safe_load(f)
|
||||
|
||||
|
||||
def merge_params(default_params, yaml_params):
|
||||
for key, value in yaml_params.items():
|
||||
if key in default_params:
|
||||
default_params[key] = value
|
||||
return default_params
|
||||
|
||||
|
||||
def convert_value(value):
|
||||
if isinstance(value, str):
|
||||
try:
|
||||
return int(value)
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
return float(value)
|
||||
except ValueError:
|
||||
pass
|
||||
if value.lower() == 'true':
|
||||
return True
|
||||
elif value.lower() == 'false':
|
||||
return False
|
||||
return value
|
||||
|
||||
|
||||
def load_parameters(context, args):
|
||||
default_params = {arg.name: LaunchConfiguration(arg.name).perform(context) for arg in args}
|
||||
config_file_path = LaunchConfiguration('config_file_path').perform(context)
|
||||
if config_file_path:
|
||||
yaml_params = load_yaml(config_file_path)
|
||||
default_params = merge_params(default_params, yaml_params)
|
||||
skip_convert = {'config_file_path', 'usb_port', 'serial_number'}
|
||||
return {
|
||||
key: (value if key in skip_convert else convert_value(value))
|
||||
for key, value in default_params.items()
|
||||
}
|
||||
|
||||
|
||||
def generate_launch_description():
|
||||
args_detect = [
|
||||
DeclareLaunchArgument('checkpoint_name', default_value='yolo11s-seg.pt'),
|
||||
DeclareLaunchArgument('output_boxes', default_value='True'),
|
||||
DeclareLaunchArgument('output_masks', default_value='False'),
|
||||
DeclareLaunchArgument('set_confidence', default_value='0.5'),
|
||||
DeclareLaunchArgument('color_image_topic', default_value='/camera/color/image_raw'),
|
||||
DeclareLaunchArgument('depth_image_topic', default_value='/camera/depth/image_raw'),
|
||||
DeclareLaunchArgument('camera_info_topic', default_value='/camera/color/camera_info'),
|
||||
DeclareLaunchArgument('width', default_value='1280'),
|
||||
DeclareLaunchArgument('high', default_value='720'),
|
||||
DeclareLaunchArgument('device', default_value='cpu')
|
||||
]
|
||||
|
||||
args_camera = [
|
||||
DeclareLaunchArgument('camera_name', default_value='camera'),
|
||||
DeclareLaunchArgument('depth_registration', default_value='true'),
|
||||
DeclareLaunchArgument('serial_number', default_value=''),
|
||||
DeclareLaunchArgument('usb_port', default_value=''),
|
||||
DeclareLaunchArgument('device_num', default_value='1'),
|
||||
DeclareLaunchArgument('upgrade_firmware', default_value=''),
|
||||
DeclareLaunchArgument('preset_firmware_path', default_value=''),
|
||||
DeclareLaunchArgument('load_config_json_file_path', default_value=''),
|
||||
DeclareLaunchArgument('export_config_json_file_path', default_value=''),
|
||||
DeclareLaunchArgument('uvc_backend', default_value='libuvc'),#libuvc or v4l2
|
||||
DeclareLaunchArgument('point_cloud_qos', default_value='default'),
|
||||
DeclareLaunchArgument('enable_point_cloud', default_value='true'),
|
||||
DeclareLaunchArgument('enable_colored_point_cloud', default_value='false'),
|
||||
DeclareLaunchArgument('cloud_frame_id', default_value=''),
|
||||
DeclareLaunchArgument('connection_delay', default_value='10'),
|
||||
DeclareLaunchArgument('color_width', default_value='0'),
|
||||
DeclareLaunchArgument('color_height', default_value='0'),
|
||||
DeclareLaunchArgument('color_fps', default_value='0'),
|
||||
DeclareLaunchArgument('color_format', default_value='ANY'),
|
||||
DeclareLaunchArgument('enable_color', default_value='true'),
|
||||
DeclareLaunchArgument('color_qos', default_value='default'),
|
||||
DeclareLaunchArgument('color_camera_info_qos', default_value='default'),
|
||||
DeclareLaunchArgument('enable_color_auto_exposure_priority', default_value='false'),
|
||||
DeclareLaunchArgument('color_rotation', default_value='-1'),#color rotation degree : 0, 90, 180, 270
|
||||
DeclareLaunchArgument('color_flip', default_value='false'),
|
||||
DeclareLaunchArgument('color_mirror', default_value='false'),
|
||||
DeclareLaunchArgument('color_ae_roi_left', default_value='-1'),
|
||||
DeclareLaunchArgument('color_ae_roi_left', default_value='-1'),
|
||||
DeclareLaunchArgument('color_ae_roi_right', default_value='-1'),
|
||||
DeclareLaunchArgument('color_ae_roi_top', default_value='-1'),
|
||||
DeclareLaunchArgument('color_ae_roi_bottom', default_value='-1'),
|
||||
DeclareLaunchArgument('color_exposure', default_value='-1'),
|
||||
DeclareLaunchArgument('color_gain', default_value='-1'),
|
||||
DeclareLaunchArgument('enable_color_auto_white_balance', default_value='true'),
|
||||
DeclareLaunchArgument('color_white_balance', default_value='-1'),
|
||||
DeclareLaunchArgument('enable_color_auto_exposure', default_value='true'),
|
||||
DeclareLaunchArgument('color_ae_max_exposure', default_value='-1'),
|
||||
DeclareLaunchArgument('color_brightness', default_value='-1'),
|
||||
DeclareLaunchArgument('color_sharpness', default_value='-1'),
|
||||
DeclareLaunchArgument('color_gamma', default_value='-1'),
|
||||
DeclareLaunchArgument('color_saturation', default_value='-1'),
|
||||
DeclareLaunchArgument('color_constrast', default_value='-1'),
|
||||
DeclareLaunchArgument('color_hue', default_value='-1'),
|
||||
DeclareLaunchArgument('enable_color_backlight_compenstation', default_value='false'),
|
||||
DeclareLaunchArgument('color_powerline_freq', default_value=''),#disable ,50hz ,60hz ,auto
|
||||
DeclareLaunchArgument('enable_color_decimation_filter', default_value='false'),
|
||||
DeclareLaunchArgument('color_decimation_filter_scale', default_value='-1'),
|
||||
DeclareLaunchArgument('depth_width', default_value='0'),
|
||||
DeclareLaunchArgument('depth_height', default_value='0'),
|
||||
DeclareLaunchArgument('depth_fps', default_value='0'),
|
||||
DeclareLaunchArgument('depth_format', default_value='ANY'),
|
||||
DeclareLaunchArgument('enable_depth', default_value='true'),
|
||||
DeclareLaunchArgument('depth_qos', default_value='default'),
|
||||
DeclareLaunchArgument('depth_camera_info_qos', default_value='default'),
|
||||
DeclareLaunchArgument('enable_depth_auto_exposure_priority', default_value='false'),
|
||||
DeclareLaunchArgument('depth_precision', default_value=''),
|
||||
DeclareLaunchArgument('depth_rotation', default_value='-1'),#depth rotation degree : 0, 90, 180, 270
|
||||
DeclareLaunchArgument('depth_flip', default_value='false'),
|
||||
DeclareLaunchArgument('depth_mirror', default_value='false'),
|
||||
DeclareLaunchArgument('depth_ae_roi_left', default_value='-1'),
|
||||
DeclareLaunchArgument('depth_ae_roi_right', default_value='-1'),
|
||||
DeclareLaunchArgument('depth_ae_roi_top', default_value='-1'),
|
||||
DeclareLaunchArgument('depth_ae_roi_bottom', default_value='-1'),
|
||||
DeclareLaunchArgument('depth_brightness', default_value='-1'),
|
||||
DeclareLaunchArgument('left_ir_width', default_value='0'),
|
||||
DeclareLaunchArgument('left_ir_height', default_value='0'),
|
||||
DeclareLaunchArgument('left_ir_fps', default_value='0'),
|
||||
DeclareLaunchArgument('left_ir_format', default_value='ANY'),
|
||||
DeclareLaunchArgument('enable_left_ir', default_value='false'),
|
||||
DeclareLaunchArgument('left_ir_qos', default_value='default'),
|
||||
DeclareLaunchArgument('left_ir_camera_info_qos', default_value='default'),
|
||||
DeclareLaunchArgument('left_ir_rotation', default_value='-1'),#left_ir rotation degree : 0, 90, 180, 270
|
||||
DeclareLaunchArgument('left_ir_flip', default_value='false'),
|
||||
DeclareLaunchArgument('left_ir_mirror', default_value='false'),
|
||||
DeclareLaunchArgument('enable_left_ir_sequence_id_filter', default_value='false'),
|
||||
DeclareLaunchArgument('left_ir_sequence_id_filter_id', default_value='-1'),
|
||||
DeclareLaunchArgument('right_ir_width', default_value='0'),
|
||||
DeclareLaunchArgument('right_ir_height', default_value='0'),
|
||||
DeclareLaunchArgument('right_ir_fps', default_value='0'),
|
||||
DeclareLaunchArgument('right_ir_format', default_value='ANY'),
|
||||
DeclareLaunchArgument('enable_right_ir', default_value='false'),
|
||||
DeclareLaunchArgument('right_ir_qos', default_value='default'),
|
||||
DeclareLaunchArgument('right_ir_camera_info_qos', default_value='default'),
|
||||
DeclareLaunchArgument('right_ir_rotation', default_value='-1'),#right_ir rotation degree : 0, 90, 180, 270
|
||||
DeclareLaunchArgument('right_ir_flip', default_value='false'),
|
||||
DeclareLaunchArgument('right_ir_mirror', default_value='false'),
|
||||
DeclareLaunchArgument('enable_right_ir_sequence_id_filter', default_value='false'),
|
||||
DeclareLaunchArgument('right_ir_sequence_id_filter_id', default_value='-1'),
|
||||
DeclareLaunchArgument('enable_ir_auto_exposure', default_value='true'),
|
||||
DeclareLaunchArgument('ir_exposure', default_value='-1'),
|
||||
DeclareLaunchArgument('ir_gain', default_value='-1'),
|
||||
DeclareLaunchArgument('ir_ae_max_exposure', default_value='-1'),
|
||||
DeclareLaunchArgument('ir_brightness', default_value='-1'),
|
||||
DeclareLaunchArgument('enable_sync_output_accel_gyro', default_value='false'),
|
||||
DeclareLaunchArgument('enable_accel', default_value='false'),
|
||||
DeclareLaunchArgument('enable_accel_data_correction', default_value='true'),
|
||||
DeclareLaunchArgument('accel_rate', default_value='200hz'),
|
||||
DeclareLaunchArgument('accel_range', default_value='4g'),
|
||||
DeclareLaunchArgument('enable_gyro', default_value='false'),
|
||||
DeclareLaunchArgument('enable_gyro_data_correction', default_value='true'),
|
||||
DeclareLaunchArgument('gyro_rate', default_value='200hz'),
|
||||
DeclareLaunchArgument('gyro_range', default_value='1000dps'),
|
||||
DeclareLaunchArgument('liner_accel_cov', default_value='0.01'),
|
||||
DeclareLaunchArgument('angular_vel_cov', default_value='0.01'),
|
||||
DeclareLaunchArgument('publish_tf', default_value='true'),
|
||||
DeclareLaunchArgument('tf_publish_rate', default_value='0.0'),
|
||||
DeclareLaunchArgument('ir_info_url', default_value=''),
|
||||
DeclareLaunchArgument('color_info_url', default_value=''),
|
||||
# Network device settings: default enumerate_net_device is set to true, which will automatically enumerate network devices
|
||||
# If you do not want to automatically enumerate network devices,
|
||||
# you can set enumerate_net_device to true, net_device_ip to the device's IP address, and net_device_port to the default value of 8090
|
||||
DeclareLaunchArgument('enumerate_net_device', default_value='true'),
|
||||
DeclareLaunchArgument('net_device_ip', default_value=''),
|
||||
DeclareLaunchArgument('net_device_port', default_value='0'),
|
||||
DeclareLaunchArgument('exposure_range_mode', default_value='default'),#default, ultimate or regular
|
||||
DeclareLaunchArgument('log_level', default_value='none'),
|
||||
DeclareLaunchArgument('enable_publish_extrinsic', default_value='false'),
|
||||
DeclareLaunchArgument('enable_d2c_viewer', default_value='false'),
|
||||
DeclareLaunchArgument('disaparity_to_depth_mode', default_value='HW'),
|
||||
DeclareLaunchArgument('enable_ldp', default_value='true'),
|
||||
DeclareLaunchArgument('ldp_power_level', default_value='-1'),
|
||||
DeclareLaunchArgument('sync_mode', default_value='standalone'),
|
||||
DeclareLaunchArgument('depth_delay_us', default_value='0'),
|
||||
DeclareLaunchArgument('color_delay_us', default_value='0'),
|
||||
DeclareLaunchArgument('trigger2image_delay_us', default_value='0'),
|
||||
DeclareLaunchArgument('trigger_out_delay_us', default_value='0'),
|
||||
DeclareLaunchArgument('trigger_out_enabled', default_value='true'),
|
||||
DeclareLaunchArgument('software_trigger_enabled', default_value='true'),
|
||||
DeclareLaunchArgument('frames_per_trigger', default_value='2'),
|
||||
DeclareLaunchArgument('software_trigger_period', default_value='33'), # ms
|
||||
DeclareLaunchArgument('enable_ptp_config', default_value='false'),#Only for Gemini 335Le
|
||||
DeclareLaunchArgument('enable_frame_sync', default_value='true'),
|
||||
DeclareLaunchArgument('ordered_pc', default_value='false'),
|
||||
DeclareLaunchArgument('enable_depth_scale', default_value='true'),
|
||||
DeclareLaunchArgument('enable_decimation_filter', default_value='false'),
|
||||
DeclareLaunchArgument('enable_hdr_merge', default_value='false'),
|
||||
DeclareLaunchArgument('enable_sequence_id_filter', default_value='false'),
|
||||
DeclareLaunchArgument('enable_threshold_filter', default_value='false'),
|
||||
DeclareLaunchArgument('enable_hardware_noise_removal_filter', default_value='false'),
|
||||
DeclareLaunchArgument('enable_noise_removal_filter', default_value='true'),
|
||||
DeclareLaunchArgument('enable_spatial_filter', default_value='false'),
|
||||
DeclareLaunchArgument('enable_temporal_filter', default_value='false'),
|
||||
DeclareLaunchArgument('enable_disaparity_to_depth', default_value='true'),
|
||||
DeclareLaunchArgument('enable_hole_filling_filter', default_value='false'),
|
||||
DeclareLaunchArgument('decimation_filter_scale', default_value='-1'),
|
||||
DeclareLaunchArgument('sequence_id_filter_id', default_value='-1'),
|
||||
DeclareLaunchArgument('threshold_filter_max', default_value='-1'),
|
||||
DeclareLaunchArgument('threshold_filter_min', default_value='-1'),
|
||||
DeclareLaunchArgument('hardware_noise_removal_filter_threshold', default_value='-1.0'),
|
||||
DeclareLaunchArgument('noise_removal_filter_min_diff', default_value='256'),
|
||||
DeclareLaunchArgument('noise_removal_filter_max_size', default_value='80'),
|
||||
DeclareLaunchArgument('spatial_filter_alpha', default_value='-1.0'),
|
||||
DeclareLaunchArgument('spatial_filter_diff_threshold', default_value='-1'),
|
||||
DeclareLaunchArgument('spatial_filter_magnitude', default_value='-1'),
|
||||
DeclareLaunchArgument('spatial_filter_radius', default_value='-1'),
|
||||
DeclareLaunchArgument('temporal_filter_diff_threshold', default_value='-1.0'),
|
||||
DeclareLaunchArgument('temporal_filter_weight', default_value='-1.0'),
|
||||
DeclareLaunchArgument('hole_filling_filter_mode', default_value=''),
|
||||
DeclareLaunchArgument('hdr_merge_exposure_1', default_value='-1'),
|
||||
DeclareLaunchArgument('hdr_merge_gain_1', default_value='-1'),
|
||||
DeclareLaunchArgument('hdr_merge_exposure_2', default_value='-1'),
|
||||
DeclareLaunchArgument('hdr_merge_gain_2', default_value='-1'),
|
||||
DeclareLaunchArgument('align_mode', default_value='SW'),
|
||||
DeclareLaunchArgument('align_target_stream', default_value='COLOR'),# COLOR or DEPTH
|
||||
DeclareLaunchArgument('diagnostic_period', default_value='1.0'),
|
||||
DeclareLaunchArgument('enable_laser', default_value='true'),
|
||||
DeclareLaunchArgument('depth_precision', default_value=''),
|
||||
DeclareLaunchArgument('device_preset', default_value='Default'),
|
||||
DeclareLaunchArgument('retry_on_usb3_detection_failure', default_value='false'),
|
||||
DeclareLaunchArgument('laser_energy_level', default_value='-1'),
|
||||
DeclareLaunchArgument('enable_sync_host_time', default_value='true'),
|
||||
DeclareLaunchArgument('time_domain', default_value='global'),# global, device, system
|
||||
DeclareLaunchArgument('enable_color_undistortion', default_value='false'),
|
||||
DeclareLaunchArgument('config_file_path', default_value=''),
|
||||
DeclareLaunchArgument('enable_heartbeat', default_value='false'),
|
||||
DeclareLaunchArgument('gmsl_trigger_fps', default_value='3000'),
|
||||
DeclareLaunchArgument('enable_gmsl_trigger', default_value='false'),
|
||||
DeclareLaunchArgument('disparity_range_mode', default_value='-1'),
|
||||
DeclareLaunchArgument('disparity_search_offset', default_value='-1'),
|
||||
DeclareLaunchArgument('disparity_offset_config', default_value='false'),
|
||||
DeclareLaunchArgument('offset_index0', default_value='-1'),
|
||||
DeclareLaunchArgument('offset_index1', default_value='-1'),
|
||||
DeclareLaunchArgument('frame_aggregate_mode', default_value='ANY'), # full_frame, color_frame, ANY or disable
|
||||
DeclareLaunchArgument('interleave_ae_mode', default_value='laser'), # 'hdr' or 'laser'
|
||||
DeclareLaunchArgument('interleave_frame_enable', default_value='false'),
|
||||
DeclareLaunchArgument('interleave_skip_enable', default_value='false'),
|
||||
DeclareLaunchArgument('interleave_skip_index', default_value='1'), # 0:skip pattern ir 1: skip flood ir
|
||||
|
||||
DeclareLaunchArgument('hdr_index1_laser_control', default_value='1'),#interleave_hdr_param
|
||||
DeclareLaunchArgument('hdr_index1_depth_exposure', default_value='1'),
|
||||
DeclareLaunchArgument('hdr_index1_depth_gain', default_value='16'),
|
||||
DeclareLaunchArgument('hdr_index1_ir_brightness', default_value='20'),
|
||||
DeclareLaunchArgument('hdr_index1_ir_ae_max_exposure', default_value='2000'),
|
||||
DeclareLaunchArgument('hdr_index0_laser_control', default_value='1'),
|
||||
DeclareLaunchArgument('hdr_index0_depth_exposure', default_value='7500'),
|
||||
DeclareLaunchArgument('hdr_index0_depth_gain', default_value='16'),
|
||||
DeclareLaunchArgument('hdr_index0_ir_brightness', default_value='60'),
|
||||
DeclareLaunchArgument('hdr_index0_ir_ae_max_exposure', default_value='10000'),
|
||||
|
||||
DeclareLaunchArgument('laser_index1_laser_control', default_value='0'),#interleave_laser_param
|
||||
DeclareLaunchArgument('laser_index1_depth_exposure', default_value='3000'),
|
||||
DeclareLaunchArgument('laser_index1_depth_gain', default_value='16'),
|
||||
DeclareLaunchArgument('laser_index1_ir_brightness', default_value='60'),
|
||||
DeclareLaunchArgument('laser_index1_ir_ae_max_exposure', default_value='17000'),
|
||||
DeclareLaunchArgument('laser_index0_laser_control', default_value='1'),
|
||||
DeclareLaunchArgument('laser_index0_depth_exposure', default_value='3000'),
|
||||
DeclareLaunchArgument('laser_index0_depth_gain', default_value='16'),
|
||||
DeclareLaunchArgument('laser_index0_ir_brightness', default_value='60'),
|
||||
DeclareLaunchArgument('laser_index0_ir_ae_max_exposure', default_value='30000'),
|
||||
]
|
||||
|
||||
args = args_camera + args_detect
|
||||
|
||||
def get_params(context, args):
|
||||
return [load_parameters(context, args)]
|
||||
|
||||
def create_node_action(context, args):
|
||||
params = get_params(context, args)
|
||||
ros_distro = os.environ.get("ROS_DISTRO", "humble")
|
||||
if ros_distro == "foxy":
|
||||
return [
|
||||
Node(
|
||||
package="orbbec_camera",
|
||||
executable="orbbec_camera_node",
|
||||
name="ob_camera_node",
|
||||
namespace=LaunchConfiguration("camera_name"),
|
||||
parameters=params,
|
||||
output="screen",
|
||||
)
|
||||
]
|
||||
else:
|
||||
return [
|
||||
GroupAction([
|
||||
PushRosNamespace(LaunchConfiguration("camera_name")),
|
||||
ComposableNodeContainer(
|
||||
name="camera_container",
|
||||
namespace="",
|
||||
package="rclcpp_components",
|
||||
executable="component_container",
|
||||
composable_node_descriptions=[
|
||||
ComposableNode(
|
||||
package="orbbec_camera",
|
||||
plugin="orbbec_camera::OBCameraNodeDriver",
|
||||
name=LaunchConfiguration("camera_name"),
|
||||
parameters=params,
|
||||
),
|
||||
],
|
||||
output="screen",
|
||||
)
|
||||
])
|
||||
]
|
||||
|
||||
def create_detect_node(context):
|
||||
checkpoint = LaunchConfiguration('checkpoint_name').perform(context)
|
||||
output_boxes = LaunchConfiguration('output_boxes').perform(context)
|
||||
output_masks = LaunchConfiguration('output_masks').perform(context)
|
||||
conf = LaunchConfiguration('set_confidence').perform(context)
|
||||
width = LaunchConfiguration('width').perform(context)
|
||||
high = LaunchConfiguration('high').perform(context)
|
||||
device = LaunchConfiguration('device').perform(context)
|
||||
|
||||
return [
|
||||
Node(
|
||||
package='detect_part',
|
||||
executable='crossboard_detect_node',
|
||||
parameters=[{
|
||||
'checkpoint_name': checkpoint,
|
||||
'output_boxes': output_boxes.lower() == 'true',
|
||||
'output_masks': output_masks.lower() == 'true',
|
||||
'set_confidence': float(conf),
|
||||
'width': int(width),
|
||||
'high': int(high),
|
||||
'device': device
|
||||
}]
|
||||
)
|
||||
]
|
||||
|
||||
return LaunchDescription(
|
||||
args + [
|
||||
OpaqueFunction(function=lambda context: create_node_action(context, args_camera)),
|
||||
OpaqueFunction(function=create_detect_node)
|
||||
]
|
||||
)
|
||||
|
||||
@@ -54,10 +54,13 @@ def generate_launch_description():
|
||||
DeclareLaunchArgument('checkpoint_name', default_value='yolo11s-seg.pt'),
|
||||
DeclareLaunchArgument('output_boxes', default_value='True'),
|
||||
DeclareLaunchArgument('output_masks', default_value='False'),
|
||||
DeclareLaunchArgument('set_confidence', default_value='0.6'),
|
||||
DeclareLaunchArgument('set_confidence', default_value='0.5'),
|
||||
DeclareLaunchArgument('color_image_topic', default_value='/camera/color/image_raw'),
|
||||
DeclareLaunchArgument('depth_image_topic', default_value='/camera/depth/image_raw'),
|
||||
DeclareLaunchArgument('camera_info_topic', default_value='/camera/camera_info'),
|
||||
DeclareLaunchArgument('camera_info_topic', default_value='/camera/color/camera_info'),
|
||||
DeclareLaunchArgument('width', default_value='1280'),
|
||||
DeclareLaunchArgument('high', default_value='720'),
|
||||
DeclareLaunchArgument('device', default_value='cpu')
|
||||
]
|
||||
|
||||
args_camera = [
|
||||
@@ -321,6 +324,9 @@ def generate_launch_description():
|
||||
color_image_topic = LaunchConfiguration('color_image_topic').perform(context)
|
||||
depth_image_topic = LaunchConfiguration('depth_image_topic').perform(context)
|
||||
camera_info_topic = LaunchConfiguration('camera_info_topic').perform(context)
|
||||
width = LaunchConfiguration('width').perform(context)
|
||||
high = LaunchConfiguration('high').perform(context)
|
||||
device = LaunchConfiguration('device').perform(context)
|
||||
|
||||
return [
|
||||
Node(
|
||||
@@ -334,6 +340,9 @@ def generate_launch_description():
|
||||
'color_image_topic': color_image_topic,
|
||||
'depth_image_topic': depth_image_topic,
|
||||
'camera_info_topic': camera_info_topic,
|
||||
'width': int(width),
|
||||
'high': int(high),
|
||||
'device': device
|
||||
}]
|
||||
)
|
||||
]
|
||||
|
||||
@@ -146,10 +146,13 @@ def generate_launch_description():
|
||||
DeclareLaunchArgument('checkpoint_name', default_value='yolo11s-seg.pt'),
|
||||
DeclareLaunchArgument('output_boxes', default_value='True'),
|
||||
DeclareLaunchArgument('output_masks', default_value='False'),
|
||||
DeclareLaunchArgument('set_confidence', default_value='0.6'),
|
||||
DeclareLaunchArgument('set_confidence', default_value='0.5'),
|
||||
DeclareLaunchArgument('color_image_topic', default_value='/camera/camera/color/image_raw'),
|
||||
DeclareLaunchArgument('depth_image_topic', default_value='/camera/camera/depth/image_rect_raw'),
|
||||
DeclareLaunchArgument('camera_info_topic', default_value='/camera/camera/color/camera_info'),
|
||||
DeclareLaunchArgument('width', default_value='1280'),
|
||||
DeclareLaunchArgument('high', default_value='720'),
|
||||
DeclareLaunchArgument('device', default_value='cpu')
|
||||
]
|
||||
|
||||
def create_detect_node(context):
|
||||
@@ -160,6 +163,9 @@ def generate_launch_description():
|
||||
color_image_topic = LaunchConfiguration('color_image_topic').perform(context)
|
||||
depth_image_topic = LaunchConfiguration('depth_image_topic').perform(context)
|
||||
camera_info_topic = LaunchConfiguration('camera_info_topic').perform(context)
|
||||
width = LaunchConfiguration('width').perform(context)
|
||||
high = LaunchConfiguration('high').perform(context)
|
||||
device = LaunchConfiguration('device').perform(context)
|
||||
|
||||
return [
|
||||
Node(
|
||||
@@ -173,6 +179,9 @@ def generate_launch_description():
|
||||
'color_image_topic': color_image_topic,
|
||||
'depth_image_topic': depth_image_topic,
|
||||
'camera_info_topic': camera_info_topic,
|
||||
'width': int(width),
|
||||
'high': int(high),
|
||||
'device': device
|
||||
}]
|
||||
)
|
||||
]
|
||||
@@ -28,9 +28,10 @@ setup(
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'detect_node = detect_part.detect:main',
|
||||
'test_node = detect_part.detect:test_node',
|
||||
'sub_pose_node = detect_part.sub_pose:main',
|
||||
'calibration_node = detect_part.hand_eye_calibration:main',
|
||||
'crossboard_detect_node = detect_part.crossboard_detect:main',
|
||||
'service_client_node = detect_part.service_client_node:main',
|
||||
],
|
||||
},
|
||||
)
|
||||
|
||||
41
HiveCoreR0/src/robot_vision/img_dev/CMakeLists.txt
Normal file
41
HiveCoreR0/src/robot_vision/img_dev/CMakeLists.txt
Normal file
@@ -0,0 +1,41 @@
|
||||
cmake_minimum_required(VERSION 3.8)
|
||||
project(img_dev)
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
||||
add_compile_options(-Wall -Wextra -Wpedantic)
|
||||
endif()
|
||||
|
||||
# find dependencies
|
||||
find_package(ament_cmake REQUIRED)
|
||||
find_package(sensor_msgs REQUIRED)
|
||||
find_package(rclcpp REQUIRED)
|
||||
find_package(rosidl_default_generators REQUIRED)
|
||||
find_package(message_filters REQUIRED)
|
||||
|
||||
rosidl_generate_interfaces(${PROJECT_NAME}
|
||||
"msg/ImgMsg.msg"
|
||||
DEPENDENCIES sensor_msgs
|
||||
)
|
||||
include_directories(
|
||||
include
|
||||
${CMAKE_CURRENT_BINARY_DIR}
|
||||
${CMAKE_CURRENT_BINARY_DIR}/rosidl_generator_cpp
|
||||
)
|
||||
|
||||
add_executable(img_dev_node src/main.cpp src/img_dev_node.cpp)
|
||||
add_dependencies(img_dev_node ${PROJECT_NAME}__rosidl_typesupport_cpp)
|
||||
|
||||
target_link_libraries(img_dev_node
|
||||
${PROJECT_NAME}__rosidl_typesupport_cpp
|
||||
${PROJECT_NAME}__rosidl_typesupport_c
|
||||
message_filters::message_filters
|
||||
)
|
||||
|
||||
|
||||
ament_target_dependencies(img_dev_node rclcpp sensor_msgs )
|
||||
install(TARGETS img_dev_node DESTINATION lib/${PROJECT_NAME})
|
||||
install(DIRECTORY msg/ DESTINATION share/${PROJECT_NAME})
|
||||
install(DIRECTORY launch/ DESTINATION share/${PROJECT_NAME}/launch})
|
||||
ament_export_dependencies(rosidl_default_runtime)
|
||||
|
||||
ament_package()
|
||||
@@ -0,0 +1,56 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include<array>
|
||||
using namespace std;
|
||||
namespace img_dev{
|
||||
class ImgCfg{
|
||||
public:
|
||||
ImgCfg() = default;
|
||||
ImgCfg(const string& source, const string& type,const string& position,const string& c_img_topic, const string& d_img_topic){
|
||||
this->source = source;
|
||||
this->type = type;
|
||||
this->position = position;
|
||||
this->c_img_topic = c_img_topic;
|
||||
this->d_img_topic = d_img_topic;
|
||||
this->k_arr.clear();
|
||||
this->d_arr.clear();
|
||||
this->arr_copy=0;
|
||||
};
|
||||
ImgCfg(const ImgCfg& other) = default;
|
||||
ImgCfg(ImgCfg&& other) = default;
|
||||
ImgCfg& operator=(const ImgCfg& other) = default;
|
||||
ImgCfg& operator=(ImgCfg&& other) = default;
|
||||
string str() const{
|
||||
return "soure:"+source+",topic:"+c_img_topic+"&"+d_img_topic;
|
||||
}
|
||||
string getColorTopic() const{
|
||||
return c_img_topic;
|
||||
}
|
||||
string getDepthTopic() const{
|
||||
return d_img_topic;
|
||||
}
|
||||
string getPosition() const{
|
||||
return position;
|
||||
}
|
||||
string getType() const{
|
||||
return type;
|
||||
}
|
||||
string getSource() const{
|
||||
return source;
|
||||
}
|
||||
int arr_copy;
|
||||
vector<double> k_arr;
|
||||
vector<double> d_arr;
|
||||
private:
|
||||
string source;
|
||||
string position;
|
||||
string type;
|
||||
string c_img_topic;
|
||||
string d_img_topic;
|
||||
|
||||
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
#pragma once
|
||||
|
||||
#include <rclcpp/rclcpp.hpp>
|
||||
#include <sensor_msgs/msg/image.hpp>
|
||||
#include "message_filters/subscriber.h"
|
||||
#include "message_filters/time_synchronizer.h"
|
||||
#include "message_filters/sync_policies/approximate_time.hpp"
|
||||
#include "img_dev/msg/img_msg.hpp"
|
||||
using namespace std;
|
||||
namespace img_dev{
|
||||
|
||||
class ImageSubscriber : public rclcpp::Node {
|
||||
public:
|
||||
ImageSubscriber() ;
|
||||
void pub_msg(const sensor_msgs::msg::Image& c_img,const sensor_msgs::msg::Image& d_img,const ImgCfg& cfg);
|
||||
private:
|
||||
rclcpp::Publisher<img_dev::msg::ImgMsg>::SharedPtr img_pub;
|
||||
std::shared_ptr<img_dev::msg::ImgMsg> img_msg;
|
||||
};
|
||||
}
|
||||
35
HiveCoreR0/src/robot_vision/img_dev/launch/img_dev_launch.py
Normal file
35
HiveCoreR0/src/robot_vision/img_dev/launch/img_dev_launch.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from launch import LaunchDescription
|
||||
from launch_ros.actions import Node
|
||||
from launch.actions import ExecuteProcess
|
||||
from launch.substitutions import LaunchConfiguration
|
||||
from launch.conditions import IfCondition
|
||||
|
||||
def generate_launch_description():
|
||||
return LaunchDescription([
|
||||
# 启动orbbec_camera
|
||||
ExecuteProcess(
|
||||
cmd=[
|
||||
'ros2',
|
||||
'launch',
|
||||
'orbbec_camera',
|
||||
'gemini_330_series.launch.py'
|
||||
],
|
||||
output='screen'
|
||||
),
|
||||
|
||||
# 等待一段时间确保相机启动完成
|
||||
ExecuteProcess(
|
||||
cmd=['sleep', '3'],
|
||||
output='screen'
|
||||
),
|
||||
|
||||
# 启动img_dev_node
|
||||
Node(
|
||||
package='img_dev',
|
||||
executable='img_dev_node',
|
||||
name='img_dev_node',
|
||||
output='screen',
|
||||
parameters=[]
|
||||
)
|
||||
])
|
||||
|
||||
7
HiveCoreR0/src/robot_vision/img_dev/msg/ImgMsg.msg
Normal file
7
HiveCoreR0/src/robot_vision/img_dev/msg/ImgMsg.msg
Normal file
@@ -0,0 +1,7 @@
|
||||
sensor_msgs/Image image_depth
|
||||
sensor_msgs/Image image_color
|
||||
float64[] karr
|
||||
float64[] darr
|
||||
string source
|
||||
string position
|
||||
string type
|
||||
21
HiveCoreR0/src/robot_vision/img_dev/package.xml
Normal file
21
HiveCoreR0/src/robot_vision/img_dev/package.xml
Normal file
@@ -0,0 +1,21 @@
|
||||
<?xml version="1.0"?>
|
||||
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
|
||||
<package format="3">
|
||||
<name>img_dev</name>
|
||||
<version>0.1.0</version>
|
||||
<description>功能包用于实现图像采集、处理及自定义消息的封装与发布</description>
|
||||
<maintainer email="h@example.com">h</maintainer>
|
||||
<license>Apache-2.0</license>
|
||||
<buildtool_depend>ament_cmake</buildtool_depend>
|
||||
<depend>sensor_msgs</depend>
|
||||
<depend>rclcpp</depend>
|
||||
<exec_depend>rosidl_default_runtime</exec_depend>
|
||||
<exec_depend>message_filters</exec_depend>
|
||||
<build_depend>rosidl_default_generators</build_depend>
|
||||
<member_of_group>rosidl_interface_packages</member_of_group>
|
||||
<test_depend>ament_lint_auto</test_depend>
|
||||
<test_depend>ament_lint_common</test_depend>
|
||||
<export>
|
||||
<build_type>ament_cmake</build_type>
|
||||
</export>
|
||||
</package>
|
||||
28
HiveCoreR0/src/robot_vision/img_dev/src/img_dev_node.cpp
Normal file
28
HiveCoreR0/src/robot_vision/img_dev/src/img_dev_node.cpp
Normal file
@@ -0,0 +1,28 @@
|
||||
#include <rclcpp/rclcpp.hpp>
|
||||
#include <sensor_msgs/msg/image.hpp>
|
||||
#include "img_dev/msg/img_msg.hpp"// 自定义消息头文件
|
||||
#include "img_dev/img_cfg.hpp"
|
||||
#include <message_filters/subscriber.h>
|
||||
#include <message_filters/sync_policies/approximate_time.hpp>
|
||||
#include <message_filters/synchronizer.h>
|
||||
#include"img_dev/img_dev_node.hpp"
|
||||
namespace img_dev{
|
||||
ImageSubscriber::ImageSubscriber() : Node("image_subscriber") {
|
||||
img_msg= std::make_shared<img_dev::msg::ImgMsg>();
|
||||
img_pub = this->create_publisher<img_dev::msg::ImgMsg>("/img_msg", 10);
|
||||
}
|
||||
static long long pub_cnt=0;
|
||||
void ImageSubscriber::pub_msg(const sensor_msgs::msg::Image& c_img,const sensor_msgs::msg::Image& d_img,const ImgCfg& cfg){
|
||||
img_msg->source = cfg.getSource();
|
||||
img_msg->type = cfg.getType();
|
||||
img_msg->position=cfg.getPosition();
|
||||
img_msg->image_color = std::move(c_img);
|
||||
img_msg->image_depth = std::move(d_img);
|
||||
img_msg->karr=cfg.k_arr;
|
||||
img_msg->darr=cfg.d_arr;
|
||||
img_pub->publish(*img_msg);
|
||||
pub_cnt+=1;
|
||||
std::cout<<"pub msg["<<pub_cnt<<"]:"<<img_msg->darr[0]<<","<<img_msg->darr[1]<<",k:"<<img_msg->karr[0]<<","<<img_msg->karr[1]<<std::endl;
|
||||
}
|
||||
|
||||
}
|
||||
48
HiveCoreR0/src/robot_vision/img_dev/src/main.cpp
Normal file
48
HiveCoreR0/src/robot_vision/img_dev/src/main.cpp
Normal file
@@ -0,0 +1,48 @@
|
||||
#include <rclcpp/rclcpp.hpp>
|
||||
#include <sensor_msgs/msg/image.hpp>
|
||||
#include <sensor_msgs/msg/camera_info.hpp>
|
||||
#include "img_dev/msg/img_msg.hpp"
|
||||
#include "img_dev/img_cfg.hpp"
|
||||
#include <message_filters/subscriber.h>
|
||||
#include <message_filters/sync_policies/approximate_time.hpp>
|
||||
#include <message_filters/synchronizer.h>
|
||||
#include"img_dev/img_dev_node.hpp"
|
||||
#include <vector>
|
||||
#include<unordered_map>
|
||||
using namespace std;
|
||||
using namespace img_dev;
|
||||
using SyncPolicy=message_filters::sync_policies::ApproximateTime<sensor_msgs::msg::Image,sensor_msgs::msg::Image>;
|
||||
static shared_ptr<ImageSubscriber> cur_node=nullptr;
|
||||
ImgCfg cfg0=ImgCfg("abzg", "myType","head","/camera/color/image_raw","/camera/depth/image_raw");
|
||||
void sync_cb0(const sensor_msgs::msg::Image& c_img, const sensor_msgs::msg::Image& d_img) {
|
||||
cur_node->pub_msg(c_img,d_img,cfg0);
|
||||
}
|
||||
int main(int argc, char* argv[]) {
|
||||
rclcpp::init(argc, argv);
|
||||
cur_node=make_shared<ImageSubscriber>();
|
||||
if(!cur_node){
|
||||
RCLCPP_ERROR(rclcpp::get_logger("main"),"img_dev node create error!!!!");
|
||||
return -1;
|
||||
}
|
||||
rclcpp::Subscription<sensor_msgs::msg::CameraInfo>::SharedPtr camera_info_sub_=cur_node->create_subscription<sensor_msgs::msg::CameraInfo>("/camera/color/camera_info", 10,
|
||||
[&](const sensor_msgs::msg::CameraInfo& info){
|
||||
if(info.k.empty()||info.d.empty())
|
||||
return;
|
||||
if(cfg0.k_arr.size()==0||cfg0.d_arr.size()==0){
|
||||
cfg0.k_arr.assign(info.k.begin(), info.k.end());
|
||||
cfg0.d_arr.assign(info.d.begin(), info.d.end());
|
||||
cfg0.arr_copy+=1;
|
||||
cout<<"copy:"<<cfg0.arr_copy<<"k:"<<info.k[0]<<"d:"<<info.d[0]<<endl;
|
||||
}else{
|
||||
cfg0.arr_copy=10;
|
||||
////cout<<"copy finish:"<<cfg0.arr_copy<<"k:"<<cfg0.k_arr.size()<<"d:"<<cfg0.d_arr.size()<<endl;
|
||||
}
|
||||
});
|
||||
message_filters::Subscriber<sensor_msgs::msg::Image> c_sub0(cur_node, cfg0.getColorTopic());
|
||||
message_filters::Subscriber<sensor_msgs::msg::Image> d_sub0(cur_node,cfg0.getDepthTopic());
|
||||
auto sync = make_shared<message_filters::Synchronizer<SyncPolicy>>(SyncPolicy(10), c_sub0, d_sub0);
|
||||
sync->registerCallback(&sync_cb0);
|
||||
rclcpp::spin(cur_node);
|
||||
rclcpp::shutdown();
|
||||
return 0;
|
||||
}
|
||||
@@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.8)
|
||||
project(interfaces)
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
||||
add_compile_options(-Wall -Wextra -Wpedantic)
|
||||
add_compile_options(-Wall -Wextra -Wpedantic)
|
||||
endif()
|
||||
|
||||
# find dependencies
|
||||
@@ -15,8 +15,10 @@ find_package(geometry_msgs REQUIRED)
|
||||
|
||||
rosidl_generate_interfaces(${PROJECT_NAME}
|
||||
"msg/PoseWithID.msg"
|
||||
"msg/PoseWithIDArray.msg"
|
||||
"msg/PoseWithIDArray.msg"
|
||||
"srv/PoseArray.srv"
|
||||
DEPENDENCIES geometry_msgs
|
||||
DEPENDENCIES sensor_msgs
|
||||
)
|
||||
|
||||
if(BUILD_TESTING)
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
std_msgs/Header header
|
||||
PoseWithID[] objects
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
<buildtool_depend>ament_cmake</buildtool_depend>
|
||||
<buildtool_depend>geometry_msgs</buildtool_depend>
|
||||
|
||||
|
||||
<build_depend>rosidl_default_generators</build_depend>
|
||||
<exec_depend>rosidl_default_runtime</exec_depend>
|
||||
<member_of_group>rosidl_interface_packages</member_of_group>
|
||||
|
||||
7
HiveCoreR0/src/robot_vision/interfaces/srv/PoseArray.srv
Normal file
7
HiveCoreR0/src/robot_vision/interfaces/srv/PoseArray.srv
Normal file
@@ -0,0 +1,7 @@
|
||||
string camera_position
|
||||
|
||||
---
|
||||
|
||||
string info
|
||||
bool success
|
||||
interfaces/PoseWithID[] objects
|
||||
Reference in New Issue
Block a user