!4 增加realsenseSDK并与检测节点结合

Merge pull request !4 from yuxuan_vision_dev
This commit is contained in:
Ray
2025-08-27 07:35:13 +00:00
committed by Gitee
156 changed files with 20700 additions and 151 deletions

View File

@@ -4,7 +4,7 @@
#### 安装教程
1. Camera SDK:
1. Gemini SDK:
> 安装 deb 依赖项:
```
@@ -21,8 +21,14 @@
sudo bash install_udev_rules.sh
sudo udevadm control --reload-rules && sudo udevadm trigger
```
2. RealSense SDK:
```
sudo apt install ros-humble-librealsense2*
```
2. YOLOv11 Dependency:
3. YOLOv11 Dependency:
```
"idge=3.2.1"

View File

@@ -21,6 +21,7 @@ import detect_part
def iqr(depths, threshold: float = 1.5):
"""iqr去除异常点"""
if len(depths) < 7:
return depths
q1 = np.percentile(depths, 25)
@@ -32,64 +33,61 @@ def iqr(depths, threshold: float = 1.5):
return iqr_depths
def get_nine_patch_samples(depth_img, u, v, width, height):
ws = [int(round(-width/4)), 0, int(round(width/4))]
hs = [int(round(-height/4)), 0, int(round(height/4))]
window_size = 25
half = window_size // 2
patch = []
for w in ws:
for h in hs:
patch.append(
depth_img[int(max(0, v + h - half)):int(min(v + h + half + 1, depth_img.shape[0])):2,
int(max(0, u + w - half)):int(min(u + w + half + 1, depth_img.shape[1])):2].flatten())
return np.concatenate(patch)
def calculate_coordinate(self, depth_img: np.ndarray, camera_info, u, v, width: int, height: int) -> tuple[float, float, float]:
def calculate_coordinate(mask, depth_img: np.ndarray, camera_info, u, v):
"""计算坐标"""
u = int(round(u))
v = int(round(v))
_cx, _cy, _fx, _fy = camera_info
_cx, _cy, _fx, _fy = camera_info[2], camera_info[5], camera_info[0], camera_info[4]
if not (0 <= v < depth_img.shape[0] and 0 <= u < depth_img.shape[1]):
self.get_logger().warning(f'Calculate coordinate error: u={u}, v={v}')
print(f'Calculate coordinate error: u={u}, v={v}')
return 0.0, 0.0, 0.0
window_size = 39
window_size = 15
half = window_size // 2
"""获取mask质心patch"""
patch = depth_img[max(0, v - half):v + half + 1, max(0, u - half):u + half + 1:2].flatten()
valid_depths = patch[patch > 0]
if len(valid_depths) == 0:
patch = get_nine_patch_samples(depth_img, u, v, width, height)
valid_depths = patch[patch > 0]
if len(valid_depths) == 0:
self.get_logger().warning(f'No valid depth in window at ({u}, {v})')
return 0.0, 0.0, 0.0
print(f'No valid depth in window at ({u}, {v})')
return 0.0, 0.0, 0.0
"""过滤异常值取中位数计算xy"""
valid_depths = iqr(valid_depths)
depth = np.median(valid_depths) / 1e3
z = depth
x = -(u - _cx) * z / _fx
y = -(v - _cy) * z / _fy
x = -(u - _cx) * depth / _fx
y = -(v - _cy) * depth / _fy
"""取mask中深度的中位数作为z"""
depth_masked = depth_img[(mask > 0) & (depth_img > 0)]
median_depth = np.median(depth_masked)
z = median_depth / 1e3
return x, y, z
def calculate_rpy(mask, rgb_img):
def calculate_pose(mask, orig_shape, depth_img: np.ndarray, camera_info):
"""计算位态"""
rect = cv2.minAreaRect(max(cv2.findContours(mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0], key=cv2.contourArea))
box = cv2.boxPoints(rect)
len1 = np.linalg.norm(box[1] - box[0])
len2 = np.linalg.norm(box[1] - box[2])
M = cv2.moments(mask.astype(np.uint8))
if M["m00"] != 0:
center_x = int(M["m10"] / M["m00"])
center_y = int(M["m01"] / M["m00"])
else:
raise ValueError("Mask Is Empty")
mask = cv2.resize(mask.astype(np.uint8), orig_shape[::-1], interpolation=cv2.INTER_LINEAR)
x, y, z = calculate_coordinate(mask, depth_img, camera_info, center_x, center_y)
if len1 >= len2:
vec = box[1] - box[0]
else:
@@ -102,28 +100,63 @@ def calculate_rpy(mask, rgb_img):
if angle <= -90.0:
angle += 180
y = angle/180.0 * math.pi
p = 0.0
r = 0.0
return r, p, y
yaw = angle/180.0 * math.pi
pitch = 0.0
roll = 0.0
return x, y, z, roll, pitch, yaw
def draw_box(set_confidence, rgb_img, boxes, confidences, class_ids, labels):
for i, box in enumerate(boxes):
if confidences[i] < set_confidence:
continue
else:
x_center, y_center, width, height = box[:4]
def draw_box(set_confidence, rgb_img, result, post_list):
"""绘制目标检测边界框"""
boxes = result.boxes.xywh.cpu().numpy()
confidences = result.boxes.conf.cpu().numpy()
class_ids = result.boxes.cls.cpu().numpy()
labels = result.names
p1 = [int((x_center - width / 2)), int((y_center - height / 2))]
p2 = [int((x_center + width / 2)), int((y_center + height / 2))]
cv2.rectangle(rgb_img, p1, p2, (255, 255, 0), 2)
if len(post_list):
for i, box in enumerate(boxes):
j = 0
if confidences[i] < set_confidence:
continue
else:
x_center, y_center, width, height = box[:4]
cv2.putText(rgb_img, f'{labels[class_ids[i]]}: {confidences[i]*100:.2f}', (p1[0], p1[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
p1 = [int((x_center - width / 2)), int((y_center - height / 2))]
p2 = [int((x_center + width / 2)), int((y_center + height / 2))]
cv2.rectangle(rgb_img, p1, p2, (255, 255, 0), 2)
cv2.putText(rgb_img, f'{labels[class_ids[i]]}: {confidences[i]*100:.2f}', (p1[0], p1[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
cv2.putText(rgb_img, f'cs: {post_list[j][0]:.3f} {post_list[j][1]:.3f} {post_list[j][2]:.3f}', (p1[0], p1[1] - 35)
,cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
cv2.putText(rgb_img, f'rpy: {post_list[j][3]:.3f} {post_list[j][4]:.3f} {post_list[j][5]:.3f}', (p1[0], p1[1] - 60)
,cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
j += 1
else:
for i, box in enumerate(boxes):
j = 0
if confidences[i] < set_confidence:
continue
else:
x_center, y_center, width, height = box[:4]
p1 = [int((x_center - width / 2)), int((y_center - height / 2))]
p2 = [int((x_center + width / 2)), int((y_center + height / 2))]
cv2.rectangle(rgb_img, p1, p2, (255, 255, 0), 2)
cv2.putText(rgb_img, f'{labels[class_ids[i]]}: {confidences[i] * 100:.2f}', (p1[0], p1[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
j += 1
def draw_mask(set_confidence, rgb_img, masks, confidences, orig_shape):
def draw_mask(set_confidence, rgb_img, result):
"""绘制实例分割mask"""
masks = result.masks.data.cpu().numpy()
orig_shape = result.masks.orig_shape
confidences = result.boxes.conf.cpu().numpy()
for i, mask in enumerate(masks):
if confidences[i] < set_confidence:
continue
@@ -132,8 +165,18 @@ def draw_mask(set_confidence, rgb_img, masks, confidences, orig_shape):
rgb_img[mask > 0] = rgb_img[mask > 0] * 0.5 + np.array([0, 0, 255]) * 0.5
def distortion_correction(color_image, depth_image, map1, map2):
undistorted_color = cv2.remap(color_image, map1, map2, cv2.INTER_LINEAR)
undistorted_color = undistorted_color.astype(color_image.dtype)
undistorted_depth = cv2.remap(depth_image, map1, map2, cv2.INTER_NEAREST)
undistorted_depth = undistorted_depth.astype(depth_image.dtype)
return undistorted_color, undistorted_depth
class DetectNode(Node):
def __init__(self, name):
def __init__(self, name, mode):
super().__init__(name)
self.device = None
self.checkpoint_path = None
@@ -141,37 +184,56 @@ class DetectNode(Node):
self.function = None
self.output_boxes = None
self.output_masks = None
self.K = self.D = self.R = self.P = None
self.map1 = self.map2 = None
self.mode = mode
self.camera_info = []
self.camera_size = []
self.cv_bridge = CvBridge()
'''Init'''
'''init'''
self._init_param()
self._init_model()
if mode == 'detect':
self._init_model()
elif mode == 'test':
self.function = self._test_image
else:
self.function = None
self.get_logger().error('Error: Mode Unknown')
self._init_publisher()
self._init_subscriber()
self.cv_bridge = CvBridge()
def _init_param(self):
"""init parameter"""
pkg_dir = os.path.dirname(detect_part.__file__)
self.declare_parameter('checkpoint_name', 'yolo11s-seg.pt')
self.checkpoint_name = self.get_parameter('checkpoint_name').value
self.checkpoint_path = os.path.join(pkg_dir, 'checkpoint', self.checkpoint_name)
self.declare_parameter('output_boxes', True)
self.output_boxes = self.get_parameter('output_boxes').value
self.declare_parameter('output_masks', False)
self.declare_parameter('output_masks', True)
self.output_masks = self.get_parameter('output_masks').value
self.declare_parameter('set_confidence', 0.5)
self.set_confidence = self.get_parameter('set_confidence').value
self.declare_parameter('color_image_topic', '/camera/color/image_raw')
self.color_image_topic = self.get_parameter('color_image_topic').value
self.declare_parameter('depth_image_topic', '/camera/depth/image_raw')
self.depth_image_topic = self.get_parameter('depth_image_topic').value
self.declare_parameter('camera_info_topic', '/camera/camera_info')
self.camera_info_topic = self.get_parameter('camera_info_topic').value
def _init_model(self):
"""init model"""
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
try:
self.model = YOLO(self.checkpoint_path).to(self.device)
@@ -182,29 +244,29 @@ class DetectNode(Node):
if self.checkpoint_name.endswith('-seg.pt'):
self.function = self._seg_image
elif self.checkpoint_name.endswith('.pt'):
self.function = self._detect_image
else:
self.function = None
self.get_logger().error(f'Unknown checkpoint: {self.checkpoint_name}')
def _init_publisher(self):
"""init_publisher"""
self.pub_pose_list = self.create_publisher(PoseWithIDArray, '/pose/cv_detect_pose', 10)
if self.output_boxes or self.output_masks:
self.pub_detect_image = self.create_publisher(Image, '/image/detect_image', 10)
def _init_subscriber(self):
"""init_subscriber"""
self.sub_camera_info = self.create_subscription(
CameraInfo,
'/camera # x = depth_img[v, u] / 1e3/color/camera_info',
self.camera_info_topic,
self._camera_info_callback,
10
)
'''Sync get color and depth img'''
self.sub_color_image = Subscriber(self, Image, '/camera/color/image_raw')
self.sub_depth_image = Subscriber(self, Image, '/camera/depth/image_raw')
'''sync get color and depth img'''
self.sub_color_image = Subscriber(self, Image, self.color_image_topic)
self.sub_depth_image = Subscriber(self, Image, self.depth_image_topic)
self.sync_subscriber = ApproximateTimeSynchronizer(
[self.sub_color_image, self.sub_depth_image],
@@ -213,90 +275,71 @@ class DetectNode(Node):
)
self.sync_subscriber.registerCallback(self._sync_callback)
def _camera_info_callback(self, msg: CameraInfo):
"""Get camera info"""
self.K = msg.k
self.D = msg.d
self.R = msg.r
self.P = msg.p
self.camera_size.append(msg.width)
self.camera_size.append(msg.height)
self.destroy_subscription(self.sub_camera_info)
self.map1, self.map2, self.K = self._get_map(msg.k)
# self.get_logger().info(f'K: {self.K}')
# self.get_logger().info(f'D: {self.D}')
def _get_map(self, K):
h, w = self.camera_size[::-1]
K = np.array(K).reshape(3, 3)
D = np.array(self.D)
new_K, _ = cv2.getOptimalNewCameraMatrix(K, D, (w, h), 1, (w, h))
map1, map2 = cv2.initUndistortRectifyMap(K, D, None, new_K, (w, h), cv2.CV_32FC1)
return map1, map2, new_K.flatten()
def _sync_callback(self, color_img_ros, depth_img_ros):
if not self.camera_info:
"""同步回调函数"""
if self.K is None:
self.get_logger().warn('Camera intrinsics not yet received. Waiting...')
return
color_img_cv = self.cv_bridge.imgmsg_to_cv2(color_img_ros, "bgr8")
depth_img_cv = self.cv_bridge.imgmsg_to_cv2(depth_img_ros, '16UC1')
if self.output_boxes:
img, pose_dict = self.function(color_img_cv, depth_img_cv)
color_img_cv_new, depth_img_cv_new = distortion_correction(color_img_cv, depth_img_cv, self.map1, self.map2)
# diff = cv2.absdiff(color_img_cv, color_img_cv_new)
# cv2.imshow('diff', diff)
# cv2.waitKey(0)
img, pose_dict = self.function(color_img_cv_new, depth_img_cv_new)
"""masks为空结束这一帧"""
if img is None and pose_dict is None:
img = self.cv_bridge.cv2_to_imgmsg(color_img_cv_new, "bgr8")
if img is not None and (self.output_boxes or self.output_masks):
self.pub_detect_image.publish(img)
else:
pose_dict = self.function(color_img_cv, depth_img_cv)
pose_list_all = PoseWithIDArray()
for (class_id, class_name), pose_list in pose_dict.items():
pose_list_all.objects.append(
PoseWithID(
class_name = class_name,
class_id = class_id,
pose_list = pose_list
if pose_dict is not None:
pose_list_all = PoseWithIDArray()
for (class_id, class_name), pose_list in pose_dict.items():
pose_list_all.objects.append(
PoseWithID(
class_name = class_name,
class_id = class_id,
pose_list = pose_list
)
)
)
self.pub_pose_list.publish(pose_list_all)
self.pub_pose_list.publish(pose_list_all)
'''Get camera info'''
def _camera_info_callback(self, msg: CameraInfo):
self.camera_size.append(msg.width)
self.camera_size.append(msg.height)
self.camera_info.append(msg.k[2])
self.camera_info.append(msg.k[5])
self.camera_info.append(msg.k[0])
self.camera_info.append(msg.k[4])
self.destroy_subscription(self.sub_camera_info)
'''Use detection model'''
def _detect_image(self, rgb_img: np.ndarray, depth_img: np.ndarray):
pose_dict = defaultdict(list)
img_h, img_w = rgb_img.shape[:2]
border_x = img_w * 0.10
border_y = img_h * 0.08
results = self.model(rgb_img)
result = results[0]
boxes = result.boxes.xywh.cpu().numpy()
confidences = result.boxes.conf.cpu().numpy()
class_ids = result.boxes.cls.cpu().numpy()
labels = result.names
for i, box in enumerate(boxes):
if confidences[i] < self.set_confidence:
continue
else:
x_center, y_center, width, height = box[:4]
if not (border_x <= x_center <= img_w - border_x and border_y <= y_center <= img_h - border_y):
# self.get_logger().info(f'Skipping object near edge at ({x_center},{y_center})')
continue
x, y, z = calculate_coordinate(self, depth_img, self.camera_info, x_center, y_center, width, height)
if (x, y, z) != (0.0, 0.0, 0.0):
pose = Pose()
pose.position = Point(x=x, y=y, z=z)
pose.orientation = Quaternion(x=0, y=0, z=0)
pose_dict[int(class_ids[i]), labels[class_ids[i]]].append(Point(x=x, y=y, z=z))
if self.output_boxes:
draw_box(self.set_confidence, rgb_img, boxes, confidences, class_ids, labels)
return self.cv_bridge.cv2_to_imgmsg(rgb_img, "bgr8"), pose_dict
else:
return pose_dict
'''Use segmentation model'''
def _seg_image(self, rgb_img: np.ndarray, depth_img: np.ndarray):
"""Use segmentation model"""
pose_dict = defaultdict(list)
'''Define border'''
border_x, border_y = rgb_img.shape[:2][0] * 0.10, rgb_img.shape[:2][1] * 0.08
@@ -305,6 +348,8 @@ class DetectNode(Node):
result = results[0]
'''Get masks'''
if result.masks is None or len(result.masks) == 0:
return None, None
masks = result.masks.data.cpu().numpy()
orig_shape = result.masks.orig_shape
@@ -314,51 +359,88 @@ class DetectNode(Node):
class_ids = result.boxes.cls.cpu().numpy()
labels = result.names
post_list = []
for i, (mask, box) in enumerate(zip(masks, boxes)):
'''Confidence Filter'''
if confidences[i] < self.set_confidence:
continue
else:
x_center, y_center, width, height = box[:4]
x_center, y_center= box[:2]
'''Border Filter'''
if not (border_x <= x_center <= rgb_img.shape[:2][1] - border_x
and border_y <= y_center <= rgb_img.shape[:2][0] - border_y):
# self.get_logger().info(f'Skipping object near edge at ({x_center},{y_center})')
continue
mask = cv2.resize(mask, orig_shape[::-1], cv2.INTER_LINEAR)
rgb_masked = rgb_img.copy()
rgb_masked[mask == 0] = 0
roll, pitch, yaw = calculate_rpy(mask, rgb_img)
x, y, z = calculate_coordinate(self, depth_img, self.camera_info, x_center, y_center, width, height)
x, y, z, roll, pitch, yaw = calculate_pose(mask, orig_shape, depth_img, self.K)
if (x, y, z) != (0.0, 0.0, 0.0):
pose = Pose()
pose.position = Point(x=x, y=y, z=z)
pose.orientation = Quaternion(x=roll, y=pitch, z=yaw)
pose_dict[int(class_ids[i]), labels[class_ids[i]]].append(pose)
post_list.append([x, y, z, roll, pitch, yaw])
'''mask_img and box_img is or not output'''
if self.output_boxes and self.output_masks:
draw_box(self.set_confidence, rgb_img, boxes, confidences, class_ids, labels)
draw_mask(self.set_confidence, rgb_img, masks, confidences, orig_shape)
draw_box(self.set_confidence, rgb_img, result, post_list)
draw_mask(self.set_confidence, rgb_img, result)
return self.cv_bridge.cv2_to_imgmsg(rgb_img, "bgr8"), pose_dict
elif self.output_boxes and not self.output_masks:
draw_box(self.set_confidence, rgb_img, boxes, confidences, class_ids, labels)
draw_box(self.set_confidence, rgb_img, result, post_list)
return self.cv_bridge.cv2_to_imgmsg(rgb_img, "bgr8"), pose_dict
elif not self.output_boxes and self.output_masks:
draw_mask(self.set_confidence, rgb_img, masks, confidences, orig_shape)
draw_mask(self.set_confidence, rgb_img, result)
return self.cv_bridge.cv2_to_imgmsg(rgb_img, "bgr8"), pose_dict
else:
return pose_dict
return None, pose_dict
def _test_image(self, rgb_img, depth_img):
rgb_img_gray = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
pattern_size = (11, 8)
ret, corners = cv2.findChessboardCorners(rgb_img_gray, pattern_size)
if ret:
# 角点亚像素精确化(提高标定精度)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
corners_subpix = cv2.cornerSubPix(rgb_img_gray, corners, (11, 11), (-1, -1), criteria)
corners_subpix = corners_subpix.reshape(pattern_size[1], pattern_size[0], 2)
mask = np.zeros(rgb_img_gray.shape, dtype=np.uint8)
for i in [0, pattern_size[1] - 1]:
for j in [0, pattern_size[0] - 1]:
pts = np.array([
corners_subpix[i, j],
corners_subpix[i, j + 1],
corners_subpix[i + 1, j + 1],
corners_subpix[i + 1, j]
], dtype=np.int32)
cv2.fillConvexPoly(mask, pts, 1)
rgb_img[mask > 0] = rgb_img[mask > 0] * 0.5 + np.array([0, 0, 255]) * 0.5
orig_shape = rgb_img_gray.shape
x, y, z, roll, pitch, yaw = calculate_pose(mask, orig_shape, depth_img, self.K)
cv2.putText(rgb_img, f'cs: x: {x:.2f}, y: {y:.2f}, z: {z:.2f}', (0, 0 + 15),cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
return self.cv_bridge.cv2_to_imgmsg(rgb_img, "bgr8"), None
else:
return self.cv_bridge.cv2_to_imgmsg(rgb_img, "bgr8"), None
def main(args=None):
rclpy.init(args=args)
node = DetectNode('detect')
node = DetectNode('detect', 'detect')
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.shutdown()
def test_node(args=None):
rclpy.init(args=args)
node = DetectNode('detect', 'test')
try:
rclpy.spin(node)
except KeyboardInterrupt:
@@ -370,4 +452,3 @@ def main(args=None):
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,377 @@
#!/usr/bin/env python
# coding: utf-8
import transforms3d as tfs
import numpy as np
import math
import json
import rclpy
from rclpy.node import Node
from geometry_msgs.msg import Pose
from message_filters import ApproximateTimeSynchronizer, Subscriber
def get_matrix_quat(x, y, z, rx, ry, rz, rw):
"""从单位四元数构建齐次变换矩阵"""
'''构造旋转矩阵'''
q = [rw, rx, ry, rz]
rmat = tfs.quaternions.quat2mat(q)
"""构造齐次变换矩阵"""
rmat = tfs.affines.compose(
np.squeeze(np.asarray((x, y, z))),
rmat,
[1, 1, 1]
)
return rmat
def get_matrix_eular_radu(x, y, z, rx, ry, rz):
"""从欧拉角构建齐次变换矩阵"""
'''构造旋转矩阵'''
rmat = tfs.euler.euler2mat(
math.radians(rx), math.radians(ry), math.radians(rz)
# rx, ry, rz
)
"""构造齐次变换矩阵"""
rmat = tfs.affines.compose(
np.squeeze(np.asarray((x, y, z))),
rmat,
[1, 1, 1]
)
return rmat
def get_matrix_rotvector(x, y, z, rx, ry, rz):
"""从旋转向量构建齐次变换矩阵"""
'''构造旋转矩阵'''
rvec = np.array([rx, ry, rz])
theta = np.linalg.norm(rvec)
if theta < 1e-8:
rmat = np.eye(3) # 小角度直接返回单位矩阵
else:
axis = rvec / theta
rmat = tfs.axangles.axangle2mat(axis, theta)
"""构造齐次变换矩阵"""
rmat = tfs.affines.compose(
np.squeeze(np.asarray((x, y, z))),
rmat,
[1, 1, 1]
)
return rmat
def skew(v):
return np.array([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
def R2P(R):
"""旋转矩阵 --> 修正罗德里格斯向量"""
axis, angle= tfs.axangles.mat2axangle(R[0:3, 0:3])
P = 2 * math.sin(angle / 2) * axis
return P
def P2R(P):
"""修正罗德里格斯向量 --> 旋转矩阵"""
P2 = np.dot(P.T, P)
part_1 = (1 - P2 / 2) * np.eye(3)
part_2 = np.add(np.dot(P, P.T), np.sqrt(4- P2) * skew(P.flatten().T))
R = np.add(part_1, np.divide(part_2, 2))
return R
def calculate(Hgs, Hcs):
"""计算标定矩阵"""
# H代表矩阵、h代表标量
Hgijs = []
Hcijs = []
A = []
B = []
size = 0
for i in range(len(Hgs)):
for j in range(i + 1, len(Hgs)):
size += 1
Hgij = np.dot(np.linalg.inv(Hgs[j]), Hgs[i])
Hgijs.append(Hgij)
Pgij = np.dot(2, R2P(Hgij))
Hcij = np.dot(Hcs[j], np.linalg.inv(Hcs[i]))
Hcijs.append(Hcij)
Pcij = np.dot(2, R2P(Hcij))
A.append(skew(np.add(Pgij, Pcij)))
B.append(np.subtract(Pcij, Pgij).reshape(3, 1))
MA = np.vstack(A)
MB = np.vstack(B)
Pcg_ = np.dot(np.linalg.pinv(MA), MB)
pcg = np.sqrt(np.add(1, np.dot(Pcg_.T, Pcg_)))
Pcg = np.dot(np.dot(2, Pcg_), np.linalg.inv(pcg))
Rcg = P2R(Pcg).reshape(3, 3)
A = []
B = []
id = 0
for i in range(len(Hgs)):
for j in range(i + 1, len(Hgs)):
Hgij = Hgijs[id]
Hcij = Hcijs[id]
A.append(np.subtract(Hgij[0:3, 0:3], np.eye(3, 3)))
B.append(np.subtract(np.dot(Rcg, Hcij[0:3, 3:4]), Hgij[0:3, 3:4]))
id += 1
MA = np.asarray(A).reshape(size * 3, 3)
MB = np.asarray(B).reshape(size * 3, 1)
Tcg = np.dot(np.linalg.pinv(MA), MB).reshape(3, )
return tfs.affines.compose(Tcg, np.squeeze(Rcg), [1, 1, 1])
class Calibration(Node):
def __init__(self, name):
super(Calibration, self).__init__(name)
self.Hgs, self.Hcs = [], []
self.hand, self.camera = [], []
self.calibration_matrix = None
self.declare_parameter('start_collect_once', False)
self.collect = self.get_parameter('start_collect_once').value
self.declare_parameter('start_calculate', False)
self.calculate = self.get_parameter('start_calculate').value
self.declare_parameter('mode', 'eye_in_hand')
self.mode = self.get_parameter('mode').value.lower()
if self.mode not in ['eye_in_hand', 'eye_to_hand']:
raise ValueError("mode must be 'eye_in_hand' or 'eye_to_hand'")
self.declare_parameter('input', 'rotvertor')
self.input = self.get_parameter('input').value.lower()
if self.input == 'rotvertor':
self.function = get_matrix_rotvector
elif self.input == 'eular':
self.function = get_matrix_eular_radu
elif self.input == 'quat':
self.function = get_matrix_quat
else:
raise ValueError("INPUT ERROR")
self.done = False
self._init_sub()
def _init_sub(self):
self.sub_hand_pose = Subscriber(self, Pose, '/calibration/hand/pose')
self.sub_camera_pose = Subscriber(self, Pose, '/calibration/camera/pose')
self.sync_subscriber = ApproximateTimeSynchronizer(
[self.sub_hand_pose, self.sub_camera_pose],
queue_size=10,
slop=0.1
)
self.sync_subscriber.registerCallback(self.get_pose_callback)
def get_pose_callback(self, hand_pose, camera_pose):
if self.collect:
_hand = [
hand_pose.position.x,
hand_pose.position.y,
hand_pose.position.z,
hand_pose.orientation.x,
hand_pose.orientation.y,
hand_pose.orientation.z,
]
_camera = [
camera_pose.position.x,
camera_pose.position.y,
camera_pose.position.z,
camera_pose.orientation.x,
camera_pose.orientation.y,
camera_pose.orientation.z,
]
self.hand.extend(_hand)
self.camera.extend(_camera)
self.collect = False
if self.calculate:
self.sync_subscriber.registerCallback(None)
self.sub_hand_pose = None
self.sub_camera_pose = None
self.sync_subscriber = None
print("已停止订阅")
self.calculate_calibration()
self.get_logger().info(self.calibration_matrix)
self.done = True
def calculate_data(self):
if self.input == 'quat':
for i in range(0, len(self.hand), 7):
self.Hgs.append(
np.linalg.inv(
self.function(
self.hand[i], self.hand[i + 1], self.hand[i + 2],
self.hand[i + 3], self.hand[i + 4], self.hand[i + 5], self.hand[i + 6]
)
)
if self.mode == 'eye_to_hand' else
self.function(
self.hand[i], self.hand[i + 1], self.hand[i + 2],
self.hand[i + 3], self.hand[i + 4], self.hand[i + 5], self.hand[i + 6]
)
)
self.Hcs.append(
self.function(
self.camera[i], self.camera[i + 1], self.camera[i + 2],
self.camera[i + 3], self.camera[i + 4], self.camera[i + 5], self.camera[i + 6]
)
)
else:
for i in range(0, len(self.hand), 6):
self.Hgs.append(
np.linalg.inv(
self.function(
self.hand[i], self.hand[i + 1], self.hand[i + 2],
self.hand[i + 3], self.hand[i + 4], self.hand[i + 5]
)
)
if self.mode == 'eye_to_hand' else
self.function(
self.hand[i], self.hand[i + 1], self.hand[i + 2],
self.hand[i + 3], self.hand[i + 4], self.hand[i + 5]
)
)
self.Hcs.append(
self.function(
self.camera[i], self.camera[i + 1], self.camera[i + 2],
self.camera[i + 3], self.camera[i + 4], self.camera[i + 5]
)
)
def calculate_calibration(self):
self.calculate_data()
self.calibration_matrix = calculate(self.Hgs, self.Hcs)
def get_data_test(self, hand, camera):
self.hand, self.camera = hand, camera
self.calculate_calibration()
print(self.calibration_matrix)
hand_eye_result = {
"T": self.calibration_matrix.tolist()
}
with open("calibration_matrix.json", "w") as f:
json.dump(hand_eye_result, f, indent=4)
self.done = True
def main(args=None):
rclpy.init(args=args)
node = Calibration('Calibration')
try:
while rclpy.ok() and not node.done:
rclpy.spin_once(node)
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
# hand = [
# 1.1988093940033604, -0.42405585264804424, 0.18828251788562061, 151.3390418721659, -18.612399542280507,
# 153.05074895025035,
# 1.1684831621733476, -0.183273375514656, 0.12744868246620855, -161.57083804238462, 9.07159838346732,
# 89.1641128844487,
# 1.1508343174145468, -0.22694301453461405, 0.26625166858469146, 177.8815855486261, 0.8991159570568988,
# 77.67286224959672
# ]
# camera = [
# -0.16249272227287292, -0.047310635447502136, 0.4077761471271515, -56.98037030812389, -6.16739631361851,
# -115.84333735802369,
# 0.03955405578017235, -0.013497642241418362, 0.33975949883461, -100.87129330834215, -17.192685528625265,
# -173.07354634882094,
# -0.08517949283123016, 0.00957852229475975, 0.46546608209609985, -90.85270962096058, 0.9315977976503153,
# 175.2059707654342
# ]
hand = [
0.400422, -0.0262847, 0.365594, 3.12734, -0.0857978, -0.0168582,
0.423347, -0.16857, 0.340184, -2.73844, 0.089013, 0.123284,
0.42540, 0.19543, 0.29062, 2.44351, -0.1777, -0.0722,
0.58088, -0.0541, 0.32633, -2.9303, 0.06957, 0.98985,
0.2760, -0.033, 0.3301, 3.0813, 0.0724, 0.6077,
0.54011, 0.09071, 0.34623, 2.62279, 0.75876, -0.6927,
0.57732, -0.1346, 0.37990, -2.6764, 1.04868, 0.82177,
0.27844, -0.1371, 0.28799, -0.8233, 2.26319, 0.36110
]
camera = [
0.11725, 0.05736, 0.32176, 0.02860, -0.1078, 3.06934,
0.07959, 0.06930, 0.29829, 0.66126, -0.2127, 2.99042,
0.14434, 0.03227, 0.40377, -0.9465, -0.1744, 2.80668,
0.11008, 0.05605, 0.35730, 0.12422, -1.0665, 2.87604,
0.1131, 0.0438171, 0.299624, -0.058278, -0.514154, -3.06309,
0.057039, 0.109464, 0.415275, 0.414777, 0.750109, -2.49495,
0.13702, 0.00520, 0.38190, 0.26431, -0.7554, 2.26885,
- 0.03670, -0.04433, 0.237292, 0.807501, -0.27347, 0.614594
]
# hand = [
# 0.00036945715011716274, -0.8839821223981196, 0.5523193840441242,
# 177.27956424635218, 0.23589536602025146, 107.82169371956397,
# 0.11896749459717415, -0.8718906380613968, 0.5523143801701358,
# -157.77906314717302, -8.024173057288115, 106.21079406179818,
# 0.1189648210427768, -0.814346693785594, 0.5523175381728943,
# 168.53772599122402, 1.9258975408714558, 108.31277860632433,
# ]
# camera = [
# -0.02678103931248188, 0.016525425016880035, 0.4080054759979248,
# -92.63997644065388, 0.2218862615062928, 114.13933860507045,
# 0.027912182733416557, 0.0497259683907032, 0.4265890121459961,
# -66.88363156672801, -1.4961574658157981, 113.78281426313359,
# -0.21827486157417297, 0.03443644195795059, 0.37940269708633423,
# -101.57441674818881, 3.3466880152822167, 114.46663884305094,
# ]
# hand = [
# 0.4285232296, 0.1977596461, -0.5595823047, 0.4738016082, 0.3209333657, -0.2268795901, -0.7880605703,
# 0.2265712272, 0.0261943502, -0.6661910850, 0.5222343809, 0.0461668455, -0.1315038186, -0.8413362107,
# 0.1058885008, 0.0527681997, -0.7419267756, 0.5081638565, -0.0263721340, -0.1696570449, -0.8439730402,
# 0.1127224767, -0.0420359377, -0.7413071786, 0.5101706595, -0.0322252464, -0.1864402870, -0.8390038445,
# 0.2592932751, -0.0329068529, -0.7162865014, 0.5101641882, 0.1265739325, -0.0153077050, -0.8505746380,
# -0.1724239544, 0.0084144761, -0.6998332592, 0.4989369998, -0.0973210400, -0.1244194561, -0.8521210503,
# -0.0534271258, 0.0771779706, -0.6845820453, 0.5195499916, 0.0511217081, -0.1691595167, -0.8359661686,
# 0.2598500029, 0.0933230213, -0.6638257382, 0.5673912325, 0.1686973705, -0.1427337629, -0.7932436318,
# ]
# camera = [
# 0.4277459616, -0.0267754900, 0.0822384718, 0.3555198802, -0.2313832954, -0.6738796808, -0.6049409568,
# 0.4610891450, -0.0089776615, 0.0394863697, -0.4168581229, 0.4389318994, 0.4953132086, 0.6230833961,
# 0.4159190432, 0.0112235849, 0.0112504715, -0.3559197420, 0.4898053987, 0.4542453721, 0.6535081871,
# 0.4143974465, 0.0094036803, 0.1073298638, -0.3438949365, 0.4875942762, 0.4507613906, 0.6639294113,
# 0.3473267442, -0.1866610227, 0.0651366494, 0.4966101920, -0.4291949558, -0.5361047392, -0.5308123169,
# 0.3883165305, -0.2249779584, 0.0093256602, -0.3595692001, 0.5529429756, 0.4090559739, 0.6305848604,
# 0.4309364801, -0.2354261800, -0.0925819097, -0.3831460128, 0.4244707870, 0.5043032275, 0.6470718186,
# 0.4362292324, -0.0778107597, -0.1007970399, 0.4568889439, -0.3144470665, -0.5302104578, -0.6412896427,
# ]
rclpy.init(args=None)
node = Calibration('Calibration')
node.get_data_test(hand, camera)
try:
while rclpy.ok() and not node.done:
rclpy.spin_once(node)
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.shutdown()

View File

@@ -55,6 +55,9 @@ def generate_launch_description():
DeclareLaunchArgument('output_boxes', default_value='True'),
DeclareLaunchArgument('output_masks', default_value='False'),
DeclareLaunchArgument('set_confidence', default_value='0.6'),
DeclareLaunchArgument('color_image_topic', default_value='/camera/color/image_raw'),
DeclareLaunchArgument('depth_image_topic', default_value='/camera/depth/image_raw'),
DeclareLaunchArgument('camera_info_topic', default_value='/camera/camera_info'),
]
args_camera = [
@@ -315,6 +318,9 @@ def generate_launch_description():
output_boxes = LaunchConfiguration('output_boxes').perform(context)
output_masks = LaunchConfiguration('output_masks').perform(context)
conf = LaunchConfiguration('set_confidence').perform(context)
color_image_topic = LaunchConfiguration('color_image_topic').perform(context)
depth_image_topic = LaunchConfiguration('depth_image_topic').perform(context)
camera_info_topic = LaunchConfiguration('camera_info_topic').perform(context)
return [
Node(
@@ -324,7 +330,10 @@ def generate_launch_description():
'checkpoint_name': checkpoint,
'output_boxes': output_boxes.lower() == 'true',
'output_masks': output_masks.lower() == 'true',
'set_confidence': float(conf)
'set_confidence': float(conf),
'color_image_topic': color_image_topic,
'depth_image_topic': depth_image_topic,
'camera_info_topic': camera_info_topic,
}]
)
]

View File

@@ -0,0 +1,185 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch realsense2_camera node."""
import os
import yaml
from launch import LaunchDescription
import launch_ros.actions
from launch_ros.actions import Node
from launch.actions import DeclareLaunchArgument, OpaqueFunction, LogInfo
from launch.substitutions import LaunchConfiguration
configurable_parameters = [{'name': 'camera_name', 'default': 'camera', 'description': 'camera unique name'},
{'name': 'camera_namespace', 'default': 'camera', 'description': 'namespace for camera'},
{'name': 'serial_no', 'default': "''", 'description': 'choose device by serial number'},
{'name': 'usb_port_id', 'default': "''", 'description': 'choose device by usb port id'},
{'name': 'device_type', 'default': "''", 'description': 'choose device by type'},
{'name': 'config_file', 'default': "''", 'description': 'yaml config file'},
{'name': 'json_file_path', 'default': "''", 'description': 'allows advanced configuration'},
{'name': 'initial_reset', 'default': 'false', 'description': "''"},
{'name': 'accelerate_gpu_with_glsl', 'default': "false", 'description': 'enable GPU acceleration with GLSL'},
{'name': 'rosbag_filename', 'default': "''", 'description': 'A realsense bagfile to run from as a device'},
{'name': 'rosbag_loop', 'default': 'false', 'description': 'Enable loop playback when playing a bagfile'},
{'name': 'log_level', 'default': 'info', 'description': 'debug log level [DEBUG|INFO|WARN|ERROR|FATAL]'},
{'name': 'output', 'default': 'screen', 'description': 'pipe node output [screen|log]'},
{'name': 'enable_color', 'default': 'true', 'description': 'enable color stream'},
{'name': 'rgb_camera.color_profile', 'default': '0,0,0', 'description': 'color stream profile'},
{'name': 'rgb_camera.color_format', 'default': 'RGB8', 'description': 'color stream format'},
{'name': 'rgb_camera.enable_auto_exposure', 'default': 'true', 'description': 'enable/disable auto exposure for color image'},
{'name': 'enable_depth', 'default': 'true', 'description': 'enable depth stream'},
{'name': 'enable_infra', 'default': 'false', 'description': 'enable infra0 stream'},
{'name': 'enable_infra1', 'default': 'false', 'description': 'enable infra1 stream'},
{'name': 'enable_infra2', 'default': 'false', 'description': 'enable infra2 stream'},
{'name': 'depth_module.depth_profile', 'default': '0,0,0', 'description': 'depth stream profile'},
{'name': 'depth_module.depth_format', 'default': 'Z16', 'description': 'depth stream format'},
{'name': 'depth_module.infra_profile', 'default': '0,0,0', 'description': 'infra streams (0/1/2) profile'},
{'name': 'depth_module.infra_format', 'default': 'RGB8', 'description': 'infra0 stream format'},
{'name': 'depth_module.infra1_format', 'default': 'Y8', 'description': 'infra1 stream format'},
{'name': 'depth_module.infra2_format', 'default': 'Y8', 'description': 'infra2 stream format'},
{'name': 'depth_module.color_profile', 'default': '0,0,0', 'description': 'Depth module color stream profile for d405'},
{'name': 'depth_module.color_format', 'default': 'RGB8', 'description': 'color stream format for d405'},
{'name': 'depth_module.exposure', 'default': '8500', 'description': 'Depth module manual exposure value'},
{'name': 'depth_module.gain', 'default': '16', 'description': 'Depth module manual gain value'},
{'name': 'depth_module.hdr_enabled', 'default': 'false', 'description': 'Depth module hdr enablement flag. Used for hdr_merge filter'},
{'name': 'depth_module.enable_auto_exposure', 'default': 'true', 'description': 'enable/disable auto exposure for depth image'},
{'name': 'depth_module.exposure.1', 'default': '7500', 'description': 'Depth module first exposure value. Used for hdr_merge filter'},
{'name': 'depth_module.gain.1', 'default': '16', 'description': 'Depth module first gain value. Used for hdr_merge filter'},
{'name': 'depth_module.exposure.2', 'default': '1', 'description': 'Depth module second exposure value. Used for hdr_merge filter'},
{'name': 'depth_module.gain.2', 'default': '16', 'description': 'Depth module second gain value. Used for hdr_merge filter'},
{'name': 'enable_sync', 'default': 'true', 'description': "'enable sync mode'"},
{'name': 'depth_module.inter_cam_sync_mode', 'default': "0", 'description': '[0-Default, 1-Master, 2-Slave]'},
{'name': 'enable_rgbd', 'default': 'false', 'description': "'enable rgbd topic'"},
{'name': 'enable_gyro', 'default': 'false', 'description': "'enable gyro stream'"},
{'name': 'enable_accel', 'default': 'false', 'description': "'enable accel stream'"},
{'name': 'gyro_fps', 'default': '0', 'description': "''"},
{'name': 'enable_motion', 'default': 'false', 'description': "'enable motion stream (IMU) for DDS devices'"},
{'name': 'accel_fps', 'default': '0', 'description': "''"},
{'name': 'unite_imu_method', 'default': "0", 'description': '[0-None, 1-copy, 2-linear_interpolation]'},
{'name': 'clip_distance', 'default': '-2.', 'description': "''"},
{'name': 'angular_velocity_cov', 'default': '0.01', 'description': "''"},
{'name': 'linear_accel_cov', 'default': '0.01', 'description': "''"},
{'name': 'diagnostics_period', 'default': '0.0', 'description': 'Rate of publishing diagnostics. 0=Disabled'},
{'name': 'publish_tf', 'default': 'true', 'description': '[bool] enable/disable publishing static & dynamic TF'},
{'name': 'tf_publish_rate', 'default': '0.0', 'description': '[double] rate in Hz for publishing dynamic TF'},
{'name': 'pointcloud.enable', 'default': 'false', 'description': ''},
{'name': 'pointcloud.stream_filter', 'default': '2', 'description': 'texture stream for pointcloud'},
{'name': 'pointcloud.stream_index_filter','default': '0', 'description': 'texture stream index for pointcloud'},
{'name': 'pointcloud.ordered_pc', 'default': 'false', 'description': ''},
{'name': 'pointcloud.allow_no_texture_points', 'default': 'false', 'description': "''"},
{'name': 'align_depth.enable', 'default': 'true', 'description': 'enable align depth filter'},
{'name': 'colorizer.enable', 'default': 'false', 'description': 'enable colorizer filter'},
{'name': 'decimation_filter.enable', 'default': 'false', 'description': 'enable_decimation_filter'},
{'name': 'rotation_filter.enable', 'default': 'false', 'description': 'enable rotation_filter'},
{'name': 'rotation_filter.rotation', 'default': '0.0', 'description': 'rotation value: 0.0, 90.0, -90.0, 180.0'},
{'name': 'spatial_filter.enable', 'default': 'false', 'description': 'enable_spatial_filter'},
{'name': 'temporal_filter.enable', 'default': 'false', 'description': 'enable_temporal_filter'},
{'name': 'disparity_filter.enable', 'default': 'false', 'description': 'enable_disparity_filter'},
{'name': 'hole_filling_filter.enable', 'default': 'false', 'description': 'enable_hole_filling_filter'},
{'name': 'hdr_merge.enable', 'default': 'false', 'description': 'hdr_merge filter enablement flag'},
{'name': 'wait_for_device_timeout', 'default': '-1.', 'description': 'Timeout for waiting for device to connect (Seconds)'},
{'name': 'reconnect_timeout', 'default': '6.', 'description': 'Timeout(seconds) between consequtive reconnection attempts'},
{'name': 'base_frame_id', 'default': 'link', 'description': 'Root frame of the sensors transform tree'},
]
def declare_configurable_parameters(parameters):
return [DeclareLaunchArgument(param['name'], default_value=param['default'], description=param['description']) for param in parameters]
def set_configurable_parameters(parameters):
return dict([(param['name'], LaunchConfiguration(param['name'])) for param in parameters])
def yaml_to_dict(path_to_yaml):
with open(path_to_yaml, "r") as f:
return yaml.load(f, Loader=yaml.SafeLoader)
def launch_setup(context, params, param_name_suffix=''):
# _config_file = LaunchConfiguration('config_file' + param_name_suffix).perform(context)
# params_from_file = {} if _config_file == "''" else yaml_to_dict(_config_file)
#
# # Load lifecycle nodes setting from YAML dynamically generated by CMAKE instead of environment variable
# lifecycle_param_file = os.path.join(
# os.path.dirname(__file__), '..', 'config', 'global_settings.yaml'
# )
# lifecycle_params = yaml_to_dict(lifecycle_param_file)
# use_lifecycle_node = lifecycle_params.get("use_lifecycle_node", False)
use_lifecycle_node = False
_output = LaunchConfiguration('output' + param_name_suffix)
# Dynamically choose Node or LifecycleNode
node_action = launch_ros.actions.LifecycleNode if use_lifecycle_node else launch_ros.actions.Node
log_message = "Launching as LifecycleNode" if use_lifecycle_node else "Launching as Normal ROS Node"
if(os.getenv('ROS_DISTRO') == 'foxy'):
# Foxy doesn't support output as substitution object (LaunchConfiguration object)
# but supports it as string, so we fetch the string from this substitution object
# see related PR that was merged for humble, iron, rolling: https://github.com/ros2/launch/pull/577
_output = context.perform_substitution(_output)
return [
LogInfo(msg=f"🚀 {log_message}"),
node_action(
package='realsense2_camera',
namespace=LaunchConfiguration('camera_namespace' + param_name_suffix),
name=LaunchConfiguration('camera_name' + param_name_suffix),
executable='realsense2_camera_node',
parameters=[params],
output=_output,
arguments=['--ros-args', '--log-level', LaunchConfiguration('log_level' + param_name_suffix)],
emulate_tty=True,
)
]
def generate_launch_description():
args_detect = [
DeclareLaunchArgument('checkpoint_name', default_value='yolo11s-seg.pt'),
DeclareLaunchArgument('output_boxes', default_value='True'),
DeclareLaunchArgument('output_masks', default_value='False'),
DeclareLaunchArgument('set_confidence', default_value='0.6'),
DeclareLaunchArgument('color_image_topic', default_value='/camera/camera/color/image_raw'),
DeclareLaunchArgument('depth_image_topic', default_value='/camera/camera/depth/image_rect_raw'),
DeclareLaunchArgument('camera_info_topic', default_value='/camera/camera/color/camera_info'),
]
def create_detect_node(context):
checkpoint = LaunchConfiguration('checkpoint_name').perform(context)
output_boxes = LaunchConfiguration('output_boxes').perform(context)
output_masks = LaunchConfiguration('output_masks').perform(context)
conf = LaunchConfiguration('set_confidence').perform(context)
color_image_topic = LaunchConfiguration('color_image_topic').perform(context)
depth_image_topic = LaunchConfiguration('depth_image_topic').perform(context)
camera_info_topic = LaunchConfiguration('camera_info_topic').perform(context)
return [
Node(
package='detect_part',
executable='detect_node',
parameters=[{
'checkpoint_name': checkpoint,
'output_boxes': output_boxes.lower() == 'true',
'output_masks': output_masks.lower() == 'true',
'set_confidence': float(conf),
'color_image_topic': color_image_topic,
'depth_image_topic': depth_image_topic,
'camera_info_topic': camera_info_topic,
}]
)
]
arg_camera = declare_configurable_parameters(configurable_parameters)
arg = arg_camera + args_detect
return LaunchDescription(arg + [
OpaqueFunction(function=launch_setup, kwargs = {'params' : set_configurable_parameters(configurable_parameters)}),
OpaqueFunction(function=create_detect_node),
])

View File

@@ -28,7 +28,9 @@ setup(
entry_points={
'console_scripts': [
'detect_node = detect_part.detect:main',
'test_node = detect_part.detect:test_node',
'sub_pose_node = detect_part.sub_pose:main',
'calibration_node = detect_part.hand_eye_calibration:main',
],
},
)

View File

@@ -0,0 +1 @@
CMakeLists.txt.user

View File

@@ -0,0 +1,299 @@
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Changelog for package realsense2_camera
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
4.56.4 (2025-07-22)
-------------------
* PR `#3329 <https://github.com/IntelRealSense/realsense-ros/issues/3329>`_ from Nir-Az: Update version to 4.56.4
* update version to 4.56.4
* PR `#3325 <https://github.com/IntelRealSense/realsense-ros/issues/3325>`_ from ashrafk93: use ykush to switch ports
* test ci
* disable all ports
* change port logic
* map ports dynamically
* use yakush to switch ports
* PR `#3319 <https://github.com/IntelRealSense/realsense-ros/issues/3319>`_ from ashrafk93: Add LifeCycle Node support at compile time
* Remove message on default normal node
* Fix typo
* Update config file name
* Update README
* Fix PR markups
* Add lifecycle node dependency
* Merge pull request `#3313 <https://github.com/IntelRealSense/realsense-ros/issues/3313>`_ from FarStryke21/ros2-development
Added base_frame_id param to the rs_launch.py file
* Added base_frame_id param to the rs_launch.py file
* Merge remote-tracking branch 'origin/ros2-development' into change_topics_in_readme
* PR `#3303 <https://github.com/IntelRealSense/realsense-ros/issues/3303>`_ from noacoohen: Enable rotation filter for color and depth sensors
* rotate infra sensor
* enable rotation filter for color and depth
* PR `#3293 <https://github.com/IntelRealSense/realsense-ros/issues/3293>`_ from remibettan: align_depth_to_infra2 enabled, pointcloud and align_depth filters to own files
* cr adjustments
* align_depth_to_infra2 enabled, pointcloud and align_depth filters to own files
* PR `#3284 <https://github.com/IntelRealSense/realsense-ros/issues/3284>`_ from noacoohen: Add color format to depth module in the launch file
* edit description
* add color foramt for depth module for d405
* PR `#3274 <https://github.com/IntelRealSense/realsense-ros/issues/3274>`_ from noacoohen: Enable rotation filter ROS2
* PR `#3276 <https://github.com/IntelRealSense/realsense-ros/issues/3276>`_ from remibettan: removing dead code in RosSensor class
runFirstFrameInitialization and _first_frame_functions_stack removed
* removing dead code
* init _is_first_frame to true
* Update rs_launch.py to include additional rotation filter support
* add rotation filter to ros2
* PR `#3214 <https://github.com/IntelRealSense/realsense-ros/issues/3214>`_ from acornaglia: Add ROS bag loop option
* Merge branch 'ros2-development' into change_topics_in_readme
* PR `#3239 <https://github.com/IntelRealSense/realsense-ros/issues/3239>`_ from SamerKhshiboun: Update CMakeLists.txt - remove find_package(fastrtps REQUIRED)
* Update CMakeLists.txt
* Update CMakeLists.txt
* Merge branch 'ros2-development' of github.com:IntelRealSense/realsense-ros into rosbag_loop
* Using variable for avoiding double function call
* Merge branch 'ros2-development' into rosbag_loop
* PR `#3225 <https://github.com/IntelRealSense/realsense-ros/issues/3225>`_ from SamerKhshiboun: Use new APIs for motion, accel and gryo streams
* use new APIs for motion, accel and gryo streams
* PR `#3222 <https://github.com/IntelRealSense/realsense-ros/issues/3222>`_ from SamerKhshiboun: Support D555 and its motion profiles
* support latched QoS for imu_info
* adjusments to DDS Support for ROS Wrapper
* Add D555 PID
* PR `#3221 <https://github.com/IntelRealSense/realsense-ros/issues/3221>`_ from patrickwasp: fix config typo
* fix config typo
* PR `#3216 <https://github.com/IntelRealSense/realsense-ros/issues/3216>`_ from PrasRsRos: hw_reset implementation
* Added rosbag_loop parameter to launch_from_rosbag example
* Added loop back argument description to launch_from_rosbag README
* Added hw_reset service implementation
Added a test in D455 to validate the hw_reset
* Added hw_reset service implementation
Added a test in D455 to validate the hw_reset
* Added hw_reset service implementation
Added a test in D455 to validate the hw_reset
* Added hw_reset service call to reset the device
Added a test in D455 to test the reset device
* Added hw_reset service call to reset the device
* Fixed bag looping mechanism
* Fixed compilation error
* Added ROS bag loop option
* PR `#3200 <https://github.com/IntelRealSense/realsense-ros/issues/3200>`_ from kadiredd: retry thrice finding devices with Ykush reset
* retry thrice finding devices with Ykush reset
* retry thrice finding devices with Ykush reset
* PR `#3178 <https://github.com/IntelRealSense/realsense-ros/issues/3178>`_ from kadiredd: disabling FPS & TF tests for ROS-CI
* disabling FPS & TF tests for ROS-CI
* PR `#3166 <https://github.com/IntelRealSense/realsense-ros/issues/3166>`_ from SamerKhshiboun: Update Calibration Config API
* update calib config usage to the new API, and update readme
* PR `#3159 <https://github.com/IntelRealSense/realsense-ros/issues/3159>`_ from noacoohen: Add D421 PID
* add D421 pid
* PR `#3153 <https://github.com/IntelRealSense/realsense-ros/issues/3153>`_ from SamerKhshiboun: TC | Fix feedback and update readme
* fix feedback and update readme for TC
* PR `#3147 <https://github.com/IntelRealSense/realsense-ros/issues/3147>`_ from SamerKhshiboun: Update READMEs and CONTRIBUTING files regarding ros2-master
* PR `#3148 <https://github.com/IntelRealSense/realsense-ros/issues/3148>`_ from SamerKhshiboun: update READMEs and CONTRIBUTING files regarding ros2-master
* update READMEs and CONTRIBUTING files regarding ros2-master
* update READMEs and CONTRIBUTING files regarding ros2-master
* PR `#3138 <https://github.com/IntelRealSense/realsense-ros/issues/3138>`_ from SamerKhshiboun: Support Triggered Calibration as ROS2 Action
* implement Triggered Calibration action
* Update CMakeLists.txt
* PR `#3135 <https://github.com/IntelRealSense/realsense-ros/issues/3135>`_ from kadiredd: Casefolding device name instead of strict case sensitive comparison
* Casefolding device name instead os strict case sensitive comparison
* PR `#3133 <https://github.com/IntelRealSense/realsense-ros/issues/3133>`_ from SamerKhshiboun: update librealsense2 version to 2.56.0
* update librealsense2 version to 2.56.0
since it includes new API that need for ros2-development
* PR `#3124 <https://github.com/IntelRealSense/realsense-ros/issues/3124>`_ from kadiredd: Support testing ROS2 service call device_info
* PR `#3125 <https://github.com/IntelRealSense/realsense-ros/issues/3125>`_ from SamerKhshiboun: Support calibration config read/write services
* support calib config read/write services
* [ROS][Test Infra] Support testing ROS2 service call device_info
* PR `#3114 <https://github.com/IntelRealSense/realsense-ros/issues/3114>`_ from Arun-Prasad-V: Ubuntu 24.04 support for Rolling and Jazzy distros
* Ubuntu 24.04 support for Rolling and Jazzy
* PR `#3102 <https://github.com/IntelRealSense/realsense-ros/issues/3102>`_ from fortizcuesta: Allow hw synchronization of several realsense using a synchonization cable
* PR `#3096 <https://github.com/IntelRealSense/realsense-ros/issues/3096>`_ from anisotropicity: Update rs_launch.py to add depth_module.color_profile
* Expose depth_module.inter_cam_sync_mode in launch files
* Revert changes
* Merge branch 'ros2-development' into feature/ros2-development-allow-hw-synchronization
* Allow hw synchronization of several realsense devices
* Update rs_launch.py to add depth_module.color_profile
Fix for not being able to set the color profile for D405 cameras.
See https://github.com/IntelRealSense/realsense-ros/issues/3090
* Contributors: Aman Chulawala, Arun-Prasad-V, Ashraf Kattoura, Cornaglia, Alessandro, Madhukar Reddy Kadireddy, Nir Azkiel, Ortiz Cuesta, Fernando, Patrick Wspanialy, PrasRsRos, Remi Bettan, Samer Khshiboun, SamerKhshiboun, acornaglia, administrator, anisotropicity, louislelay, noacoohen, remibettan
4.55.1 (2024-05-28)
-------------------
* PR `#3106 <https://github.com/IntelRealSense/realsense-ros/issues/3106>`_ from SamerKhshiboun: Remove unused parameter _is_profile_exist
* PR `#3098 <https://github.com/IntelRealSense/realsense-ros/issues/3098>`_ from kadiredd: ROS live cam test fixes
* PR `#3094 <https://github.com/IntelRealSense/realsense-ros/issues/3094>`_ from kadiredd: ROSCI infra for live camera testing
* PR `#3066 <https://github.com/IntelRealSense/realsense-ros/issues/3066>`_ from SamerKhshiboun: Revert Foxy Build Support (From Source)
* PR `#3052 <https://github.com/IntelRealSense/realsense-ros/issues/3052>`_ from Arun-Prasad-V: Support for selecting profile for each stream_type
* PR `#3056 <https://github.com/IntelRealSense/realsense-ros/issues/3056>`_ from SamerKhshiboun: Add documentation for RealSense ROS2 Wrapper Windows installation
* PR `#3049 <https://github.com/IntelRealSense/realsense-ros/issues/3049>`_ from Arun-Prasad-V: Applying Colorizer filter to Aligned-Depth image
* PR `#3053 <https://github.com/IntelRealSense/realsense-ros/issues/3053>`_ from Nir-Az: Fix Coverity issues + remove empty warning log
* PR `#3007 <https://github.com/IntelRealSense/realsense-ros/issues/3007>`_ from Arun-Prasad-V: Skip updating Exp 1,2 & Gain 1,2 when HDR is disabled
* PR `#3042 <https://github.com/IntelRealSense/realsense-ros/issues/3042>`_ from kadiredd: Assert Fail if camera not found
* PR `#3008 <https://github.com/IntelRealSense/realsense-ros/issues/3008>`_ from Arun-Prasad-V: Renamed GL GPU enable param
* PR `#2989 <https://github.com/IntelRealSense/realsense-ros/issues/2989>`_ from Arun-Prasad-V: Dynamically switching b/w CPU & GPU processing
* PR `#3001 <https://github.com/IntelRealSense/realsense-ros/issues/3001>`_ from deep0294: Update ReadMe to run ROS2 Unit Test
* PR `#2998 <https://github.com/IntelRealSense/realsense-ros/issues/2998>`_ from SamerKhshiboun: fix calibration intrinsic fail
* PR `#2987 <https://github.com/IntelRealSense/realsense-ros/issues/2987>`_ from SamerKhshiboun: Remove D465 SKU
* PR `#2984 <https://github.com/IntelRealSense/realsense-ros/issues/2984>`_ from deep0294: Fix All Profiles Test
* PR `#2956 <https://github.com/IntelRealSense/realsense-ros/issues/2956>`_ from Arun-Prasad-V: Extending LibRS's GL support to RS ROS2
* PR `#2953 <https://github.com/IntelRealSense/realsense-ros/issues/2953>`_ from Arun-Prasad-V: Added urdf & mesh files for D405 model
* PR `#2940 <https://github.com/IntelRealSense/realsense-ros/issues/2940>`_ from Arun-Prasad-V: Fixing the data_type of ROS Params exposure & gain
* PR `#2948 <https://github.com/IntelRealSense/realsense-ros/issues/2948>`_ from Arun-Prasad-V: Disabling HDR during INIT
* PR `#2934 <https://github.com/IntelRealSense/realsense-ros/issues/2934>`_ from Arun-Prasad-V: Disabling hdr while updating exposure & gain values
* PR `#2946 <https://github.com/IntelRealSense/realsense-ros/issues/2946>`_ from gwen2018: fix ros random crash with error hw monitor command for asic temperature failed
* PR `#2865 <https://github.com/IntelRealSense/realsense-ros/issues/2865>`_ from PrasRsRos: add live camera tests
* PR `#2891 <https://github.com/IntelRealSense/realsense-ros/issues/2891>`_ from Arun-Prasad-V: revert PR2872
* PR `#2853 <https://github.com/IntelRealSense/realsense-ros/issues/2853>`_ from Arun-Prasad-V: Frame latency for the '/topic' provided by user
* PR `#2872 <https://github.com/IntelRealSense/realsense-ros/issues/2872>`_ from Arun-Prasad-V: Updating _camera_name with RS node's name
* PR `#2878 <https://github.com/IntelRealSense/realsense-ros/issues/2878>`_ from Arun-Prasad-V: Updated ros2 examples and readme
* PR `#2841 <https://github.com/IntelRealSense/realsense-ros/issues/2841>`_ from SamerKhshiboun: Remove Dashing, Eloquent, Foxy, L500 and SR300 support
* PR `#2868 <https://github.com/IntelRealSense/realsense-ros/issues/2868>`_ from Arun-Prasad-V: Fix Pointcloud topic frame_id
* PR `#2849 <https://github.com/IntelRealSense/realsense-ros/issues/2849>`_ from Arun-Prasad-V: Create /imu topic only when motion streams enabled
* PR `#2847 <https://github.com/IntelRealSense/realsense-ros/issues/2847>`_ from Arun-Prasad-V: Updated rs_launch param names
* PR `#2839 <https://github.com/IntelRealSense/realsense-ros/issues/2839>`_ from Arun-Prasad: Added ros2 examples
* PR `#2861 <https://github.com/IntelRealSense/realsense-ros/issues/2861>`_ from SamerKhshiboun: fix readme and nodefactory for ros2 run
* PR `#2859 <https://github.com/IntelRealSense/realsense-ros/issues/2859>`_ from PrasRsRos: Fix tests (topic now has camera name)
* PR `#2857 <https://github.com/IntelRealSense/realsense-ros/issues/2857>`_ from lge-ros2: Apply camera name in topics
* PR `#2840 <https://github.com/IntelRealSense/realsense-ros/issues/2840>`_ from SamerKhshiboun: Support Depth, IR and Color formats in ROS2
* PR `#2764 <https://github.com/IntelRealSense/realsense-ros/issues/2764>`_ from lge-ros2 : support modifiable camera namespace
* PR `#2830 <https://github.com/IntelRealSense/realsense-ros/issues/2830>`_ from SamerKhshiboun: Add RGBD + reduce changes between hkr and development
* PR `#2811 <https://github.com/IntelRealSense/realsense-ros/issues/2811>`_ from Arun-Prasad-V: Exposing stream formats params to user
* PR `#2825 <https://github.com/IntelRealSense/realsense-ros/issues/2825>`_ from SamerKhshiboun: Fix align_depth + add test
* PR `#2822 <https://github.com/IntelRealSense/realsense-ros/issues/2822>`_ from Arun-Prasad-V: Updated rs_launch configurations
* PR `#2726 <https://github.com/IntelRealSense/realsense-ros/issues/2726>`_ from PrasRsRos: Integration test template
* PR `#2742 <https://github.com/IntelRealSense/realsense-ros/issues/2742>`_ from danielhonies:Update rs_launch.py
* PR `#2806 <https://github.com/IntelRealSense/realsense-ros/issues/2806>`_ from Arun-Prasad-V: Enabling RGB8 Infrared stream
* PR `#2799 <https://github.com/IntelRealSense/realsense-ros/issues/2799>`_ from SamerKhshiboun: Fix overriding frames on same topics/CV-images due to a bug in PR2759
* PR `#2759 <https://github.com/IntelRealSense/realsense-ros/issues/2759>`_ from SamerKhshiboun: Cleanups and name fixes
* Contributors: (=YG=) Hyunseok Yang, Arun Prasad, Arun-Prasad-V, Daniel Honies, Hyunseok, Madhukar Reddy Kadireddy, Nir, Nir Azkiel, PrasRsRos, Samer Khshiboun, SamerKhshiboun, deep0294, gwen2018, nairps
4.54.1 (2023-06-27)
-------------------
* Applying AlignDepth filter after Pointcloud
* Publish /aligned_depth_to_color topic only when color frame present
* Support Iron distro
* Protect empty string dereference
* Fix: /tf and /static_tf topics' inconsistencies
* Revamped the TF related code
* Fixing TF frame links b/w multi camera nodes when using custom names
* Updated TF descriptions in launch py and readme
* Fixing /tf topic has only TFs of last started sensor
* add D430i support
* Fix Swapped TFs Axes
* replace stereo module with depth module
* use rs2_to_ros to replace stereo module with depth moudle
* calculate extriniscs twice in two opposite ways to save inverting rotation matrix
* fix matrix rotation
* Merge branch 'ros2-development' into readme_fix
* invert translation
* Added 'publish_tf' param in rs launch files
* Indentation corrections
* Fix: Don't publish /tf when publish_tf is false
* use playback device for rosbags
* Avoid configuring dynamic_tf_broadcaster within tf_publish_rate param's callback
* Fix lower FPS in D405, D455
* update rs_launch.py to support enable_auto_exposure and manual exposure
* fix timestamp calculation metadata header to be aligned with metadata json timestamp
* Expose USB port in DeviceInfo service
* Use latched QoS for Extrinsic topic when intra-process is used
* add cppcheck to GHA
* Fix Apache License Header and Intel Copyrights
* apply copyrights and license on project
* Enable intra-process communication for point clouds
* Fix ros2 parameter descriptions and range values
* T265 clean up
* fix float_to_double method
* realsense2_camera/src/sensor_params.cpp
* remove T265 device from ROS Wrapper - step1
* Enable D457
* Fix hdr_merge filter initialization in ros2 launch
* if default profile is not defined, take the first available profile as default
* changed to static_cast and added descriptor name and type
* remove extra ';'
* remove unused variable format_str
* publish point cloud via unique shared pointer
* make source backward compatible to older versions of cv_bridge and rclcpp
* add hdr_merge.enable and depth_module.hdr_enabled to rs_launch.py
* fix compilation errors
* fix tabs
* if default profile is not defined, take the first available profile as default
* Fix ros2 sensor controls steps and add control default value to param description
* Publish static transforms when intra porocess communication is enabled
* Properly read camera config files in rs_launch.py
* fix deprecated API
* Add D457
* Windows bring-up
* publish actual IMU optical frame ID in IMU messages
* Publish static tf for IMU frames
* fix extrinsics calculation
* fix ordered_pc arg prefix
* publish IMU frames only if unite/sync imu method is not none
* Publish static tf for IMU frames
* add D430i support
* Contributors: Arun Prasad, Arun Prasad V, Arun-Prasad-V, Christian Rauch, Daniel Honies, Gilad Bretter, Nir Azkiel, NirAz, Pranav Dhulipala, Samer Khshiboun, SamerKhshiboun, Stephan Wirth, Xiangyu, Yadunund, nvidia
4.51.1 (2022-09-13)
-------------------
* Fix crash when activating IMU & aligned depth together
* Fix rosbag device loading by preventing set_option to HDR/Gain/Exposure
* Support ROS2 Humble
* Publish real frame rate of realsense camera node topics/publishers
* No need to start/stop sensors for align depth changes
* Fix colorizer filter which returns null reference ptr
* Fix align_depth enable/disable
* Add colorizer.enable to rs_launch.py
* Add copyright and license to all ROS2-beta source files
* Fix CUDA suffix for pointcloud and align_depth topics
* Add ROS build farm pre-release to ci
* Contributors: Eran, NirAz, SamerKhshiboun
4.0.4 (2022-03-20)
------------------
* fix required packages for building debians for ros2-beta branch
* Contributors: NirAz
4.0.3 (2022-03-16)
------------------
* Support intra-process zero-copy
* Update README
* Fix Galactic deprecated-declarations compilation warning
* Fix Eloquent compilation error
* Contributors: Eran, Nir-Az, SamerKhshiboun
4.0.2 (2022-02-24)
------------------
* version 4.4.0 changed to 4.0.0 in CHANGELOG
* add frequency monitoring to /diagnostics topic.
* fix topic_hz.py to recognize message type from topic name. (Naive)
* move diagnostic updater for stream frequencies into the RosSensor class.
* add frequency monitoring to /diagnostics topic.
* fix galactic issue with undeclaring parameters
* fix to support Rolling.
* fix dynamic_params syntax.
* fix issue with Galactic parameters set by default to static which prevents them from being undeclared.
* Contributors: Haowei Wen, doronhi, remibettan
4.0.1 (2022-02-01)
------------------
* fix reset issue when multiple devices are connected
* fix /rosout issue
* fix PID for D405 device
* fix bug: frame_id is based on camera_name
* unite_imu_method is now changeable in runtime.
* fix motion module default values.
* add missing extrinsics topics
* fix crash when camera disconnects.
* fix header timestamp for metadata messages.
* Contributors: nomumu, JamesChooWK, benlev, doronhi
4.0.0 (2021-11-17)
-------------------
* changed parameters:
- "stereo_module", "l500_depth_sensor" are replaced by "depth_module"
- for video streams: <module>.profile replaces <stream>_width, <stream>_height, <stream>_fps
- removed paramets <stream>_frame_id, <stream>_optical_frame_id. frame_ids are defined by camera_name
- "filters" is removed. All filters (or post-processing blocks) are enabled/disabled using "<filter>.enable"
- "align_depth" is replaced with "align_depth.enable"
- "allow_no_texture_points", "ordered_pc" replaced by "pointcloud.allow_no_texture_points", "pointcloud.ordered_pc"
- "pointcloud_texture_stream", "pointcloud_texture_index" are replaced by "pointcloud.stream_filter", "pointcloud.stream_index_filter"
* Allow enable/disable of sensors in runtime.
* Allow enable/disable of filters in runtime.

View File

@@ -0,0 +1,436 @@
# Copyright 2024 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cmake_minimum_required(VERSION 3.5)
project(realsense2_camera)
# Default to C99
if(NOT CMAKE_C_STANDARD)
set(CMAKE_C_STANDARD 99)
endif()
# Default to C++14
if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 14)
endif()
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
add_compile_options(-Wall -Wextra -Wpedantic)
endif()
option(BUILD_WITH_OPENMP "Use OpenMP" OFF)
option(SET_USER_BREAK_AT_STARTUP "Set user wait point in startup (for debug)" OFF)
# Define an option to enable or disable lifecycle nodes
option(USE_LIFECYCLE_NODE "Enable lifecycle nodes (ON/OFF)" OFF)
# Compiler Defense Flags
if(UNIX OR APPLE)
# Linker flags.
if(${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU" OR ${CMAKE_CXX_COMPILER_ID} STREQUAL "Intel")
# GCC specific flags. ICC is compatible with them.
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -z noexecstack -z relro -z now")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -z noexecstack -z relro -z now")
elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")
# In Clang, -z flags are not compatible, they need to be passed to linker via -Wl.
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now")
endif()
# Compiler flags.
if(${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU")
# GCC specific flags.
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.9)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -fstack-protector-strong")
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -fstack-protector")
endif()
elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")
# Clang is compatbile with some of the flags.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -fstack-protector")
elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "Intel")
# Same as above, with exception that ICC compilation crashes with -fPIE option, even
# though it uses -pie linker option that require -fPIE during compilation. Checksec
# shows that it generates correct PIE anyway if only -pie is provided.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstack-protector")
endif()
# Generic flags.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -fno-operator-names -Wformat -Wformat-security -Wall")
# Dot not forward c++ flag to GPU beucause it is not supported
set( CUDA_PROPAGATE_HOST_FLAGS OFF )
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -D_FORTIFY_SOURCE=2")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie")
endif()
if(WIN32)
add_definitions(-D_USE_MATH_DEFINES)
endif()
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
endif()
string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE)
if (${uppercase_CMAKE_BUILD_TYPE} STREQUAL "RELEASE")
message(STATUS "Create Release Build.")
set(CMAKE_CXX_FLAGS "-O2 ${CMAKE_CXX_FLAGS}")
else()
message(STATUS "Create Debug Build.")
endif()
if(BUILD_WITH_OPENMP)
find_package(OpenMP)
if(NOT OpenMP_FOUND)
message(FATAL_ERROR "\n\n OpenMP is missing!\n\n")
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS} -fopenmp")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}")
endif()
endif()
if(SET_USER_BREAK_AT_STARTUP)
message("GOT FLAG IN CmakeLists.txt")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DBPDEBUG")
endif()
find_package(ament_cmake REQUIRED)
find_package(builtin_interfaces REQUIRED)
find_package(cv_bridge REQUIRED)
find_package(image_transport REQUIRED)
find_package(rclcpp REQUIRED)
find_package(rclcpp_action REQUIRED)
find_package(rclcpp_components REQUIRED)
find_package(realsense2_camera_msgs REQUIRED)
find_package(std_srvs REQUIRED)
find_package(std_msgs REQUIRED)
find_package(sensor_msgs REQUIRED)
find_package(nav_msgs REQUIRED)
find_package(tf2_ros REQUIRED)
find_package(tf2 REQUIRED)
find_package(diagnostic_updater REQUIRED)
find_package(OpenCV REQUIRED COMPONENTS core)
find_package(realsense2 2.56)
if (BUILD_ACCELERATE_GPU_WITH_GLSL)
find_package(realsense2-gl 2.56)
endif()
if(NOT realsense2_FOUND)
message(FATAL_ERROR "\n\n Intel RealSense SDK 2.0 is missing, please install it from https://github.com/IntelRealSense/librealsense/releases\n\n")
endif()
#set(CMAKE_NO_SYSTEM_FROM_IMPORTED true)
include_directories(include)
include_directories(${OpenCV_INCLUDE_DIRS}) # add OpenCV includes to the included dirs
set(node_plugins "")
set(SOURCES
src/realsense_node_factory.cpp
src/base_realsense_node.cpp
src/parameters.cpp
src/rs_node_setup.cpp
src/ros_sensor.cpp
src/ros_utils.cpp
src/dynamic_params.cpp
src/sensor_params.cpp
src/named_filter.cpp
src/pointcloud_filter.cpp
src/align_depth_filter.cpp
src/profile_manager.cpp
src/image_publisher.cpp
src/tfs.cpp
src/actions.cpp
)
if (BUILD_ACCELERATE_GPU_WITH_GLSL)
list(APPEND SOURCES src/gl_gpu_processing.cpp)
endif()
if(NOT DEFINED ENV{ROS_DISTRO})
message(FATAL_ERROR "ROS_DISTRO is not defined." )
endif()
if("$ENV{ROS_DISTRO}" STREQUAL "foxy")
message(STATUS "Build for ROS2 Foxy")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DFOXY")
set(SOURCES "${SOURCES}" src/ros_param_backend.cpp)
elseif("$ENV{ROS_DISTRO}" STREQUAL "humble")
message(STATUS "Build for ROS2 Humble")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DHUMBLE")
set(SOURCES "${SOURCES}" src/ros_param_backend.cpp)
elseif("$ENV{ROS_DISTRO}" STREQUAL "iron")
message(STATUS "Build for ROS2 Iron")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DIRON")
set(SOURCES "${SOURCES}" src/ros_param_backend.cpp)
elseif("$ENV{ROS_DISTRO}" STREQUAL "rolling")
message(STATUS "Build for ROS2 Rolling")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DROLLING")
set(SOURCES "${SOURCES}" src/ros_param_backend.cpp)
elseif("$ENV{ROS_DISTRO}" STREQUAL "jazzy")
message(STATUS "Build for ROS2 Jazzy")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DJAZZY")
set(SOURCES "${SOURCES}" src/ros_param_backend.cpp)
elseif("$ENV{ROS_DISTRO}" STREQUAL "kilted")
message(STATUS "Build for ROS2 Kilted")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DKILTED")
set(SOURCES "${SOURCES}" src/ros_param_backend.cpp)
else()
message(FATAL_ERROR "Unsupported ROS Distribution: " "$ENV{ROS_DISTRO}")
endif()
# The header 'cv_bridge/cv_bridge.hpp' was added in version 3.3.0. For older
# cv_bridge versions, we have to use the header 'cv_bridge/cv_bridge.h'.
if(${cv_bridge_VERSION} VERSION_GREATER_EQUAL "3.3.0")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DCV_BRDIGE_HAS_HPP")
endif()
# 'OnSetParametersCallbackType' is only defined for rclcpp 17 and onward.
if(${rclcpp_VERSION} VERSION_GREATER_EQUAL "17.0")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DRCLCPP_HAS_OnSetParametersCallbackType")
endif()
if (BUILD_ACCELERATE_GPU_WITH_GLSL)
add_definitions(-DACCELERATE_GPU_WITH_GLSL)
endif()
set(INCLUDES
include/constants.h
include/realsense_node_factory.h
include/base_realsense_node.h
include/ros_sensor.h
include/ros_utils.h
include/dynamic_params.h
include/sensor_params.h
include/named_filter.h
include/pointcloud_filter.h
include/align_depth_filter.h
include/ros_param_backend.h
include/profile_manager.h
include/image_publisher.h)
if (BUILD_ACCELERATE_GPU_WITH_GLSL)
list(APPEND INCLUDES include/gl_window.h)
endif()
if (BUILD_TOOLS)
include_directories(tools)
set(INCLUDES ${INCLUDES}
tools/frame_latency/frame_latency.h)
set(SOURCES ${SOURCES}
tools/frame_latency/frame_latency.cpp)
endif()
add_library(${PROJECT_NAME} SHARED
${INCLUDES}
${SOURCES}
)
if (BUILD_ACCELERATE_GPU_WITH_GLSL)
set(link_libraries ${realsense2-gl_LIBRARY})
else()
set(link_libraries ${realsense2_LIBRARY})
endif()
list(APPEND link_libraries ${OpenCV_LIBS}) # add OpenCV libs to link_libraries
target_link_libraries(${PROJECT_NAME}
${link_libraries}
)
set(dependencies
cv_bridge
image_transport
rclcpp
rclcpp_components
realsense2_camera_msgs
std_srvs
std_msgs
sensor_msgs
nav_msgs
tf2
tf2_ros
diagnostic_updater
)
set (targets
cv_bridge::cv_bridge
image_transport::image_transport
rclcpp::rclcpp
rclcpp_components::component
${realsense2_camera_msgs_TARGETS}
${std_srvs_TARGETS}
${std_msgs_TARGETS}
sensor_msgs::sensor_msgs_library
${nav_msgs_TARGETS}
tf2::tf2
tf2_ros::tf2_ros
diagnostic_updater::diagnostic_updater
)
if (BUILD_ACCELERATE_GPU_WITH_GLSL)
list(APPEND dependencies realsense2-gl)
list(APPEND targets realsense2-gl::realsense2-gl)
else()
list(APPEND dependencies realsense2)
list(APPEND targets realsense2::realsense2)
endif()
# If the flag is enabled, define the macro
if(USE_LIFECYCLE_NODE)
find_package(rclcpp_lifecycle REQUIRED)
find_package(lifecycle_msgs REQUIRED)
list(APPEND dependencies rclcpp_lifecycle lifecycle_msgs)
list(APPEND targets
rclcpp_lifecycle::rclcpp_lifecycle
lifecycle_msgs::lifecycle_msgs__rosidl_typesupport_cpp)
add_definitions(-DUSE_LIFECYCLE_NODE)
message("🚀 USE_LIFECYCLE_NODE is ENABLED")
# Create a configuration file for the launch file
file(WRITE "${CMAKE_BINARY_DIR}/global_settings.yaml"
"use_lifecycle_node: true\n")
else()
file(WRITE "${CMAKE_BINARY_DIR}/global_settings.yaml"
"use_lifecycle_node: false\n")
endif()
install(FILES "${CMAKE_BINARY_DIR}/global_settings.yaml"
DESTINATION share/${PROJECT_NAME}/config)
target_link_libraries(${PROJECT_NAME}
${targets}
)
rclcpp_components_register_node(${PROJECT_NAME}
PLUGIN "realsense2_camera::RealSenseNodeFactory"
EXECUTABLE realsense2_camera_node
)
if(BUILD_TOOLS)
rclcpp_components_register_node(${PROJECT_NAME}
PLUGIN "rs2_ros::tools::frame_latency::FrameLatencyNode"
EXECUTABLE realsense2_frame_latency_node
)
endif()
# Install binaries
install(TARGETS ${PROJECT_NAME}
ARCHIVE DESTINATION lib
LIBRARY DESTINATION lib
RUNTIME DESTINATION bin
)
# Install headers
install(
DIRECTORY include/
DESTINATION include
)
# Install launch files
install(DIRECTORY
launch
DESTINATION share/${PROJECT_NAME}
)
# Install example files
install(DIRECTORY
examples
DESTINATION share/${PROJECT_NAME}
)
# Test
if(BUILD_TESTING)
find_package(ament_cmake_gtest REQUIRED)
set(_gtest_folders
test
)
foreach(test_folder ${_gtest_folders})
file(GLOB files "${test_folder}/gtest_*.cpp")
foreach(file ${files})
get_filename_component(_test_name ${file} NAME_WE)
ament_add_gtest(${_test_name} ${file})
target_include_directories(${_test_name} PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>
)
target_link_libraries(${_test_name}
std_srvs::std_srvs__rosidl_typesupport_cpp
std_msgs::std_msgs__rosidl_typesupport_cpp
)
#target_link_libraries(${_test_name} name_of_local_library)
endforeach()
endforeach()
find_package(ament_cmake_pytest REQUIRED)
set(_pytest_folders
test
test/templates
test/rosbag
test/post_processing_filters
)
foreach(test_folder ${_pytest_folders})
file(GLOB files "${test_folder}/test_*.py")
foreach(file ${files})
get_filename_component(_test_name ${file} NAME_WE)
ament_add_pytest_test(${_test_name} ${file}
APPEND_ENV PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}:${CMAKE_SOURCE_DIR}/test/utils:${CMAKE_SOURCE_DIR}/launch:${CMAKE_SOURCE_DIR}/scripts
TIMEOUT 60
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
)
endforeach()
endforeach()
unset(_pytest_folders)
set(rs_query_cmd "rs-enumerate-devices -s")
execute_process(COMMAND bash -c ${rs_query_cmd}
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
RESULT_VARIABLE rs_result
OUTPUT_VARIABLE RS_DEVICE_INFO)
message(STATUS "rs_device_info:")
message(STATUS "${RS_DEVICE_INFO}")
if((RS_DEVICE_INFO MATCHES "D455") OR (RS_DEVICE_INFO MATCHES "D415") OR (RS_DEVICE_INFO MATCHES "D435"))
message(STATUS "D455 device found")
set(_pytest_live_folders
test/live_camera
)
endif()
foreach(test_folder ${_pytest_live_folders})
file(GLOB files "${test_folder}/test_*.py")
foreach(file ${files})
get_filename_component(_test_name ${file} NAME_WE)
ament_add_pytest_test(${_test_name} ${file}
APPEND_ENV PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}:${CMAKE_SOURCE_DIR}/test/utils:${CMAKE_SOURCE_DIR}/launch:${CMAKE_SOURCE_DIR}/scripts
TIMEOUT 500
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
)
endforeach()
endforeach()
endif()
# Ament exports
ament_export_include_directories(include)
ament_export_libraries(${PROJECT_NAME})
ament_export_dependencies(${dependencies})
ament_package()

View File

@@ -0,0 +1,12 @@
# Align Depth to Color
This example shows how to start the camera node and align depth stream to color stream.
```
ros2 launch realsense2_camera rs_align_depth_launch.py
```
The aligned image will be published to the topic "/aligned_depth_to_color/image_raw"
Also, align depth to color can enabled by following cmd:
```
ros2 launch realsense2_camera rs_launch.py align_depth.enable:=true
```

View File

@@ -0,0 +1,56 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DESCRIPTION #
# ----------- #
# Use this launch file to launch a device and align depth to color.
# The Parameters available for definition in the command line for the camera are described in rs_launch.configurable_parameters
# command line example:
# ros2 launch realsense2_camera rs_align_depth_launch.py
"""Launch realsense2_camera node."""
from launch import LaunchDescription
import launch_ros.actions
from launch.actions import OpaqueFunction
from launch.substitutions import LaunchConfiguration, ThisLaunchFileDir
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.absolute()))
import os
from ament_index_python.packages import get_package_share_directory
sys.path.append(os.path.join(get_package_share_directory('realsense2_camera'), 'launch'))
import rs_launch
local_parameters = [{'name': 'camera_name', 'default': 'camera', 'description': 'camera unique name'},
{'name': 'camera_namespace', 'default': 'camera', 'description': 'camera namespace'},
{'name': 'enable_color', 'default': 'true', 'description': 'enable color stream'},
{'name': 'enable_depth', 'default': 'true', 'description': 'enable depth stream'},
{'name': 'align_depth.enable', 'default': 'true', 'description': 'enable align depth filter'},
{'name': 'enable_sync', 'default': 'true', 'description': 'enable sync mode'},
]
def set_configurable_parameters(local_params):
return dict([(param['name'], LaunchConfiguration(param['name'])) for param in local_params])
def generate_launch_description():
params = rs_launch.configurable_parameters
return LaunchDescription(
rs_launch.declare_configurable_parameters(local_parameters) +
rs_launch.declare_configurable_parameters(params) +
[
OpaqueFunction(function=rs_launch.launch_setup,
kwargs = {'params' : set_configurable_parameters(params)}
)
])

View File

@@ -0,0 +1,45 @@
{
"calibration_config":
{
"roi_num_of_segments": 2,
"roi_0":
{
"vertex_0": [ 0, 36 ],
"vertex_1": [ 640, 144 ],
"vertex_2": [ 640, 576 ],
"vertex_3": [ 0, 684 ]
},
"roi_1":
{
"vertex_0": [ 640, 144 ],
"vertex_1": [ 1280, 35 ],
"vertex_2": [ 1280, 684 ],
"vertex_3": [ 640, 576 ]
},
"roi_2":
{
"vertex_0": [ 0, 0 ],
"vertex_1": [ 0, 0 ],
"vertex_2": [ 0, 0 ],
"vertex_3": [ 0, 0 ]
},
"roi_3":
{
"vertex_0": [ 0, 0 ],
"vertex_1": [ 0, 0 ],
"vertex_2": [ 0, 0 ],
"vertex_3": [ 0, 0 ]
},
"camera_position":
{
"rotation":
[
[ 0.0, 0.0, 1.0],
[-1.0, 0.0, 0.0],
[ 0.0, -1.0, 0.0]
],
"translation": [0.0, 0.0, 0.27]
},
"crypto_signature": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
}
}

View File

@@ -0,0 +1,123 @@
# Launching Dual RS ROS2 nodes
The following example lanches two RS ROS2 nodes.
```
ros2 launch realsense2_camera rs_dual_camera_launch.py serial_no1:=<serial number of the first camera> serial_no2:=<serial number of the second camera>
```
## Example:
Let's say the serial numbers of two RS cameras are 207322251310 and 234422060144.
```
ros2 launch realsense2_camera rs_dual_camera_launch.py serial_no1:="'207322251310'" serial_no2:="'234422060144'"
```
or
```
ros2 launch realsense2_camera rs_dual_camera_launch.py serial_no1:=_207322251310 serial_no2:=_234422060144
```
## How to know the serial number?
Method 1: Using the rs-enumerate-devices tool
```
rs-enumerate-devices | grep "Serial Number"
```
Method 2: Connect single camera and run
```
ros2 launch realsense2_camera rs_launch.py
```
and look for the serial number in the log printed to screen under "[INFO][...] Device Serial No:".
# Using Multiple RS camera by launching each in differnet terminals
Make sure you set a different name and namespace for each camera.
Terminal 1:
```
ros2 launch realsense2_camera rs_launch.py serial_no:="'207322251310'" camera_name:='camera1' camera_namespace:='camera1'
```
Terminal 2:
```
ros2 launch realsense2_camera rs_launch.py serial_no:="'234422060144'" camera_name:='camera2' camera_namespace:='camera2'
```
# Multiple cameras showing a semi-unified pointcloud
The D430 series of RealSense cameras use stereo based algorithm to calculate depth. This mean, a couple of cameras can operate on the same scene. For the purpose of this demonstration, let's say 2 cameras can be coupled to look at the same scene from 2 different points of view. See image:
![multi_cameras](https://user-images.githubusercontent.com/127019120/268692789-1b3d5d8b-a41f-4a97-995d-81d44b4bcacb.jpg)
The schematic settings could be described as:
X--------------------------------->cam_2
|&emsp;&emsp;&emsp;&emsp;(70 cm)
|
|
|&ensp;(60 cm)
|
|
/
cam_1
The cameras have no data regarding their relative position. That’s up to a third party program to set. To simplify things, the coordinate system of cam_1 can be considered as the refernce coordinate system for the whole scene.
The estimated translation of cam_2 from cam_1 is 70(cm) on X-axis and 60(cm) on Y-axis. Also, the estimated yaw angle of cam_2 relative to cam_1 as 90(degrees) clockwise. These are the initial parameters to be set for setting the transformation between the 2 cameras as follows:
```
ros2 launch realsense2_camera rs_dual_camera_launch.py serial_no1:=_207322251310 serial_no2:=_234422060144 tf.translation.x:=0.7 tf.translation.y:=0.6 tf.translation.z:=0.0 tf.rotation.yaw:=-90.0 tf.rotation.pitch:=0.0 tf.rotation.roll:=0.0
```
If the unified pointcloud result is not good, follow the below steps to fine-tune the calibaration.
## Visualizing the pointclouds and fine-tune the camera calibration
Launch 2 cameras in separate terminals:
**Terminal 1:**
```
ros2 launch realsense2_camera rs_launch.py serial_no:="'207322251310'" camera_name:='camera1' camera_namespace:='camera1'
```
**Terminal 2:**
```
ros2 launch realsense2_camera rs_launch.py serial_no:="'234422060144'" camera_name:='camera2' camera_namespace:='camera2'
```
**Terminal 3:**
```
rviz2
```
Open rviz and set '“Fixed Frame'” to “camera1_link”
Add Pointcloud2-> By topic -> /camera1/camera1/depth/color/points
Add Pointcloud2 -> By topic -> /camera2/camera2/depth/color/points
**Terminal 4:**
Run the 'set_cams_transforms.py' tool. It can be used to fine-tune the calibaration.
```
python src/realsense-ros/realsense2_camera/scripts/set_cams_transforms.py camera1_link camera2_link 0.7 0.6 0 -90 0 0
```
**Instructions printed by the tool:**
```
Using default file /home/user_name/ros2_ws/src/realsense-ros/realsense2_camera/scripts/_set_cams_info_file.txt
Use given initial values.
Press the following keys to change mode: x, y, z, (a)zimuth, (p)itch, (r)oll
For each mode, press 6 to increase by step and 4 to decrease
Press + to multiply step by 2 or - to divide
Press Q to quit
```
Note that the tool prints the path of the current configuration file. It saves its last configuration automatically, all the time, to be used on the next run.
After a lot of fiddling around, unified pointcloud looked better with the following calibaration:
```
x = 0.75
y = 0.575
z = 0
azimuth = -91.25
pitch = 0.75
roll = 0
```
Now, use the above results in the launch file:
```
ros2 launch realsense2_camera rs_dual_camera_launch.py serial_no1:=_207322251310 serial_no2:=_234422060144 tf.translation.x:=0.75 tf.translation.y:=0.575 tf.translation.z:=0.0 tf.rotation.yaw:=-91.25 tf.rotation.pitch:=0.75 tf.rotation.roll:=0.0
```

View File

@@ -0,0 +1,109 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DESCRIPTION #
# ----------- #
# Use this launch file to launch 2 devices.
# The Parameters available for definition in the command line for each camera are described in rs_launch.configurable_parameters
# For each device, the parameter name was changed to include an index.
# For example: to set camera_name for device1 set parameter camera_name1.
# command line example:
# ros2 launch realsense2_camera rs_dual_camera_launch.py serial_no1:=<serial number of 1st camera> serial_no2:=<serial number of 2nd camera>
"""Launch realsense2_camera node."""
import copy
from launch import LaunchDescription, LaunchContext
import launch_ros.actions
from launch.actions import OpaqueFunction
from launch.substitutions import LaunchConfiguration, ThisLaunchFileDir
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.absolute()))
import os
from ament_index_python.packages import get_package_share_directory
sys.path.append(os.path.join(get_package_share_directory('realsense2_camera'), 'launch'))
import rs_launch
local_parameters = [{'name': 'camera_name1', 'default': 'camera1', 'description': 'camera unique name'},
{'name': 'camera_name2', 'default': 'camera2', 'description': 'camera unique name'},
{'name': 'camera_namespace1', 'default': 'camera1', 'description': 'camera1 namespace'},
{'name': 'camera_namespace2', 'default': 'camera2', 'description': 'camera2 namespace'},
{'name': 'enable_color1', 'default': 'true', 'description': 'enable color stream'},
{'name': 'enable_color2', 'default': 'true', 'description': 'enable color stream'},
{'name': 'enable_depth1', 'default': 'true', 'description': 'enable depth stream'},
{'name': 'enable_depth2', 'default': 'true', 'description': 'enable depth stream'},
{'name': 'pointcloud.enable1', 'default': 'true', 'description': 'enable pointcloud'},
{'name': 'pointcloud.enable2', 'default': 'true', 'description': 'enable pointcloud'},
{'name': 'spatial_filter.enable1', 'default': 'true', 'description': 'enable_spatial_filter'},
{'name': 'spatial_filter.enable2', 'default': 'true', 'description': 'enable_spatial_filter'},
{'name': 'temporal_filter.enable1', 'default': 'true', 'description': 'enable_temporal_filter'},
{'name': 'temporal_filter.enable2', 'default': 'true', 'description': 'enable_temporal_filter'},
{'name': 'tf.translation.x', 'default': '0.0', 'description': 'x'},
{'name': 'tf.translation.y', 'default': '0.0', 'description': 'y'},
{'name': 'tf.translation.z', 'default': '0.0', 'description': 'z'},
{'name': 'tf.rotation.yaw', 'default': '0.0', 'description': 'yaw'},
{'name': 'tf.rotation.pitch', 'default': '0.0', 'description': 'pitch'},
{'name': 'tf.rotation.roll', 'default': '0.0', 'description': 'roll'},
]
def set_configurable_parameters(local_params):
return dict([(param['original_name'], LaunchConfiguration(param['name'])) for param in local_params])
def duplicate_params(general_params, posix):
local_params = copy.deepcopy(general_params)
for param in local_params:
param['original_name'] = param['name']
param['name'] += posix
return local_params
def launch_static_transform_publisher_node(context : LaunchContext):
# Static transformation from camera1 to camera2
node = launch_ros.actions.Node(
name = "my_static_transform_publisher",
package = "tf2_ros",
executable = "static_transform_publisher",
arguments = [context.launch_configurations['tf.translation.x'],
context.launch_configurations['tf.translation.y'],
context.launch_configurations['tf.translation.z'],
context.launch_configurations['tf.rotation.yaw'],
context.launch_configurations['tf.rotation.pitch'],
context.launch_configurations['tf.rotation.roll'],
context.launch_configurations['camera_name1'] + "_link",
context.launch_configurations['camera_name2'] + "_link"]
)
return [node]
def generate_launch_description():
params1 = duplicate_params(rs_launch.configurable_parameters, '1')
params2 = duplicate_params(rs_launch.configurable_parameters, '2')
return LaunchDescription(
rs_launch.declare_configurable_parameters(local_parameters) +
rs_launch.declare_configurable_parameters(params1) +
rs_launch.declare_configurable_parameters(params2) +
[
OpaqueFunction(function=rs_launch.launch_setup,
kwargs = {'params' : set_configurable_parameters(params1),
'param_name_suffix': '1'}),
OpaqueFunction(function=rs_launch.launch_setup,
kwargs = {'params' : set_configurable_parameters(params2),
'param_name_suffix': '2'}),
OpaqueFunction(function=launch_static_transform_publisher_node),
launch_ros.actions.Node(
package='rviz2',
namespace='',
executable='rviz2',
name='rviz2',
arguments=['-d', [ThisLaunchFileDir(), '/rviz/dual_camera_pointcloud.rviz']]
)
])

View File

@@ -0,0 +1,194 @@
Panels:
- Class: rviz_common/Displays
Help Height: 0
Name: Displays
Property Tree Widget:
Expanded:
- /Global Options1
- /Status1
- /Grid1
- /PointCloud21
- /PointCloud22
Splitter Ratio: 0.5
Tree Height: 865
- Class: rviz_common/Selection
Name: Selection
- Class: rviz_common/Tool Properties
Expanded:
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz_common/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz_default_plugins/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz_default_plugins/PointCloud2
Color: 255; 255; 255
Color Transformer: RGB8
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Flat Squares
Topic:
Depth: 5
Durability Policy: Volatile
Filter size: 10
History Policy: Keep Last
Reliability Policy: Reliable
Value: /camera1/camera1/depth/color/points
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz_default_plugins/PointCloud2
Color: 255; 255; 255
Color Transformer: RGB8
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Flat Squares
Topic:
Depth: 5
Durability Policy: Volatile
Filter size: 10
History Policy: Keep Last
Reliability Policy: Reliable
Value: /camera2/camera2/depth/color/points
Use Fixed Frame: true
Use rainbow: true
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Fixed Frame: camera1_link
Frame Rate: 30
Name: root
Tools:
- Class: rviz_default_plugins/MoveCamera
- Class: rviz_default_plugins/Select
- Class: rviz_default_plugins/FocusCamera
- Class: rviz_default_plugins/Measure
Line color: 128; 128; 0
- Class: rviz_default_plugins/SetInitialPose
Covariance x: 0.25
Covariance y: 0.25
Covariance yaw: 0.06853891909122467
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /initialpose
- Class: rviz_default_plugins/SetGoal
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /move_base_simple/goal
- Class: rviz_default_plugins/PublishPoint
Single click: true
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /clicked_point
Transformation:
Current:
Class: rviz_default_plugins/TF
Value: true
Views:
Current:
Class: rviz_default_plugins/Orbit
Distance: 8.93685531616211
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: -0.18814913928508759
Y: -0.17941315472126007
Z: 0.14549313485622406
Focal Shape Fixed Size: true
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: -1.5697963237762451
Target Frame: <Fixed Frame>
Value: Orbit (rviz_default_plugins)
Yaw: 4.730405330657959
Saved: ~
Window Geometry:
Displays:
collapsed: false
Height: 1016
Hide Left Dock: false
Hide Right Dock: true
QMainWindow State: 000000ff00000000fd0000000400000000000001560000039efc0200000010fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d0000039e000000c900fffffffb0000000a0049006d006100670065000000015b0000009a0000000000000000fb0000000a0049006d0061006700650000000197000000d60000000000000000fb0000000a0049006d0061006700650000000203000000f20000000000000000fb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d00610067006500000002a8000001330000000000000000fb0000000a0049006d00610067006501000001940000005d0000000000000000fb0000000a0049006d00610067006501000001f70000007a0000000000000000fb0000000a0049006d00610067006501000002770000009d0000000000000000fb0000000a0049006d006100670065010000031a000000ca0000000000000000000000010000010f000001effc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d000001ef000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b20000000000000000000000020000073d000000a9fc0100000002fb0000000a0049006d00610067006503000001c5000000bb000001f8000001b0fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004420000003efc0100000002fb0000000800540069006d00650100000000000004420000000000000000fb0000000800540069006d00650100000000000004500000000000000000000005da0000039e00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1846
X: 74
Y: 27

View File

@@ -0,0 +1,22 @@
# Launching RS ROS2 node from rosbag File
The following example allows streaming a rosbag file, saved by Intel RealSense Viewer, instead of streaming live with a camera. It can be used for testing and repetition of the same sequence.
```
ros2 launch realsense2_camera rs_launch_from_rosbag.py
```
By default, the 'rs_launch_from_rosbag.py' launch file uses the "/rosbag/D435i_Depth_and_IMU_Stands_still.bag" rosbag file.
User can also provide a different rosbag file through cmd line as follows:
```
ros2 launch realsense2_camera rs_launch_from_rosbag.py rosbag_filename:="/full/path/to/rosbag/file"
```
or
```
ros2 launch realsense2_camera rs_launch.py rosbag_filename:="/full/path/to/rosbag/file"
```
Additionally, the 'rosbag_loop' cmd line argument enables the looped playback of the rosbag file:
```
ros2 launch realsense2_camera rs_launch_from_rosbag.py rosbag_filename:="/full/path/to/rosbag/file" rosbag_loop:="true"
```
Check-out [sample-recordings](https://github.com/IntelRealSense/librealsense/blob/master/doc/sample-data.md) for a few recorded samples.

View File

@@ -0,0 +1,57 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DESCRIPTION #
# ----------- #
# Use this launch file to launch a device from rosbag file.
# The Parameters available for definition in the command line for the camera are described in rs_launch.configurable_parameters
# command line example:
# ros2 launch realsense2_camera rs_launch_from_rosbag.py
"""Launch realsense2_camera node."""
from launch import LaunchDescription
import launch_ros.actions
from launch.actions import OpaqueFunction
from launch.substitutions import LaunchConfiguration, ThisLaunchFileDir
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.absolute()))
import os
from ament_index_python.packages import get_package_share_directory
sys.path.append(os.path.join(get_package_share_directory('realsense2_camera'), 'launch'))
import rs_launch
local_parameters = [{'name': 'camera_name', 'default': 'camera', 'description': 'camera unique name'},
{'name': 'camera_namespace', 'default': 'camera', 'description': 'camera namespace'},
{'name': 'enable_depth', 'default': 'true', 'description': 'enable depth stream'},
{'name': 'enable_gyro', 'default': 'true', 'description': "'enable gyro stream'"},
{'name': 'enable_accel', 'default': 'true', 'description': "'enable accel stream'"},
{'name': 'rosbag_filename', 'default': [ThisLaunchFileDir(), "/rosbag/D435i_Depth_and_IMU_Stands_still.bag"], 'description': 'A realsense bagfile to run from as a device'},
{'name': 'rosbag_loop', 'default': 'false', 'description': 'enable realsense bagfile loop playback'},
]
def set_configurable_parameters(local_params):
return dict([(param['name'], LaunchConfiguration(param['name'])) for param in local_params])
def generate_launch_description():
params = rs_launch.configurable_parameters
return LaunchDescription(
rs_launch.declare_configurable_parameters(local_parameters) +
rs_launch.declare_configurable_parameters(params) +
[
OpaqueFunction(function=rs_launch.launch_setup,
kwargs = {'params' : set_configurable_parameters(params)}
)
])

View File

@@ -0,0 +1,33 @@
# Get RS ROS2 node params from YAML file
The following example gets the RS ROS2 node params from YAML file.
```
ros2 launch realsense2_camera rs_launch_get_params_from_yaml.py
```
By default, 'rs_launch_get_params_from_yaml.py' launch file uses the "/config/config.yaml" YAML file.
User can provide a different YAML file through cmd line as follows:
```
ros2 launch realsense2_camera rs_launch_get_params_from_yaml.py config_file:="/full/path/to/config/file"
```
or
```
ros2 launch realsense2_camera rs_launch.py config_file:="/full/path/to/config/file"
```
## Syntax for defining params in YAML file
```
param1: value
param2: value
```
Example:
```
enable_color: true
rgb_camera.color_profile: 1280x720x15
enable_depth: true
align_depth.enable: true
enable_sync: true
publish_tf: true
tf_publish_rate: 1.0
```

View File

@@ -0,0 +1,7 @@
enable_color: true
rgb_camera.color_profile: 1280x720x15
enable_depth: true
align_depth.enable: true
enable_sync: true
publish_tf: true
tf_publish_rate: 1.0

View File

@@ -0,0 +1,53 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DESCRIPTION #
# ----------- #
# Use this launch file to launch a device and get the params from a YAML file.
# The Parameters available for definition in the command line for the camera are described in rs_launch.configurable_parameters
# command line example:
# ros2 launch realsense2_camera rs_launch_get_params_from_yaml.py
"""Launch realsense2_camera node."""
from launch import LaunchDescription
import launch_ros.actions
from launch.actions import OpaqueFunction
from launch.substitutions import LaunchConfiguration, ThisLaunchFileDir
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.absolute()))
import os
from ament_index_python.packages import get_package_share_directory
sys.path.append(os.path.join(get_package_share_directory('realsense2_camera'), 'launch'))
import rs_launch
local_parameters = [{'name': 'camera_name', 'default': 'camera', 'description': 'camera unique name'},
{'name': 'camera_namespace', 'default': 'camera', 'description': 'camera namespace'},
{'name': 'config_file', 'default': [ThisLaunchFileDir(), "/config/config.yaml"], 'description': 'yaml config file'},
]
def set_configurable_parameters(local_params):
return dict([(param['name'], LaunchConfiguration(param['name'])) for param in local_params])
def generate_launch_description():
params = rs_launch.configurable_parameters
return LaunchDescription(
rs_launch.declare_configurable_parameters(local_parameters) +
rs_launch.declare_configurable_parameters(params) +
[
OpaqueFunction(function=rs_launch.launch_setup,
kwargs = {'params' : set_configurable_parameters(params)}
)
])

View File

@@ -0,0 +1,17 @@
# PointCloud Visualization
The following example starts the camera and simultaneously opens RViz GUI to visualize the published pointcloud.
```
ros2 launch realsense2_camera rs_pointcloud_launch.py
```
Alternatively, start the camera terminal 1:
```
ros2 launch realsense2_camera rs_launch.py pointcloud.enable:=true
```
and in terminal 2 open rviz to visualize pointcloud.
# PointCloud with different coordinate systems
This example opens rviz and shows the camera model with different coordinate systems and the pointcloud, so it presents the pointcloud and the camera together.
```
ros2 launch realsense2_camera rs_d455_pointcloud_launch.py
```

View File

@@ -0,0 +1,92 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DESCRIPTION #
# ----------- #
# Use this launch file to launch a device and visualize pointcloud along with the camera model D405.
# The Parameters available for definition in the command line for the camera are described in rs_launch.configurable_parameters
# command line example:
# ros2 launch realsense2_camera rs_d405_pointcloud_launch.py
"""Launch realsense2_camera node."""
from launch import LaunchDescription
import launch_ros.actions
from launch.actions import OpaqueFunction
from launch.substitutions import LaunchConfiguration, ThisLaunchFileDir
import os
import sys
import pathlib
import xacro
import tempfile
from ament_index_python.packages import get_package_share_directory
sys.path.append(str(pathlib.Path(__file__).parent.absolute()))
sys.path.append(os.path.join(get_package_share_directory('realsense2_camera'), 'launch'))
import rs_launch
local_parameters = [{'name': 'camera_name', 'default': 'camera', 'description': 'camera unique name'},
{'name': 'camera_namespace', 'default': 'camera', 'description': 'camera namespace'},
{'name': 'device_type', 'default': "d405", 'description': 'choose device by type'},
{'name': 'enable_color', 'default': 'true', 'description': 'enable color stream'},
{'name': 'enable_depth', 'default': 'true', 'description': 'enable depth stream'},
{'name': 'pointcloud.enable', 'default': 'true', 'description': 'enable pointcloud'},
]
def to_urdf(xacro_path, parameters=None):
"""Convert the given xacro file to URDF file.
* xacro_path -- the path to the xacro file
* parameters -- to be used when xacro file is parsed.
"""
urdf_path = tempfile.mktemp(prefix="%s_" % os.path.basename(xacro_path))
# open and process file
doc = xacro.process_file(xacro_path, mappings=parameters)
# open the output file
out = xacro.open_output(urdf_path)
out.write(doc.toprettyxml(indent=' '))
return urdf_path
def set_configurable_parameters(local_params):
return dict([(param['name'], LaunchConfiguration(param['name'])) for param in local_params])
def generate_launch_description():
params = rs_launch.configurable_parameters
xacro_path = os.path.join(get_package_share_directory('realsense2_description'), 'urdf', 'test_d405_camera.urdf.xacro')
urdf = to_urdf(xacro_path, {'use_nominal_extrinsics': 'true', 'add_plug': 'true'})
return LaunchDescription(
rs_launch.declare_configurable_parameters(local_parameters) +
rs_launch.declare_configurable_parameters(params) +
[
OpaqueFunction(function=rs_launch.launch_setup,
kwargs = {'params' : set_configurable_parameters(params)}
),
launch_ros.actions.Node(
package='rviz2',
namespace='',
executable='rviz2',
name='rviz2',
arguments=['-d', [ThisLaunchFileDir(), '/rviz/urdf_pointcloud.rviz']],
output='screen',
parameters=[{'use_sim_time': False}]
),
launch_ros.actions.Node(
name='model_node',
package='robot_state_publisher',
executable='robot_state_publisher',
namespace='',
output='screen',
arguments=[urdf]
)
])

View File

@@ -0,0 +1,92 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DESCRIPTION #
# ----------- #
# Use this launch file to launch a device and visualize pointcloud along with the camera model D455.
# The Parameters available for definition in the command line for the camera are described in rs_launch.configurable_parameters
# command line example:
# ros2 launch realsense2_camera rs_d455_pointcloud_launch.py
"""Launch realsense2_camera node."""
from launch import LaunchDescription
import launch_ros.actions
from launch.actions import OpaqueFunction
from launch.substitutions import LaunchConfiguration, ThisLaunchFileDir
import os
import sys
import pathlib
import xacro
import tempfile
from ament_index_python.packages import get_package_share_directory
sys.path.append(str(pathlib.Path(__file__).parent.absolute()))
sys.path.append(os.path.join(get_package_share_directory('realsense2_camera'), 'launch'))
import rs_launch
local_parameters = [{'name': 'camera_name', 'default': 'camera', 'description': 'camera unique name'},
{'name': 'camera_namespace', 'default': 'camera', 'description': 'camera namespace'},
{'name': 'device_type', 'default': "d455", 'description': 'choose device by type'},
{'name': 'enable_color', 'default': 'true', 'description': 'enable color stream'},
{'name': 'enable_depth', 'default': 'true', 'description': 'enable depth stream'},
{'name': 'pointcloud.enable', 'default': 'true', 'description': 'enable pointcloud'},
]
def to_urdf(xacro_path, parameters=None):
"""Convert the given xacro file to URDF file.
* xacro_path -- the path to the xacro file
* parameters -- to be used when xacro file is parsed.
"""
urdf_path = tempfile.mktemp(prefix="%s_" % os.path.basename(xacro_path))
# open and process file
doc = xacro.process_file(xacro_path, mappings=parameters)
# open the output file
out = xacro.open_output(urdf_path)
out.write(doc.toprettyxml(indent=' '))
return urdf_path
def set_configurable_parameters(local_params):
return dict([(param['name'], LaunchConfiguration(param['name'])) for param in local_params])
def generate_launch_description():
params = rs_launch.configurable_parameters
xacro_path = os.path.join(get_package_share_directory('realsense2_description'), 'urdf', 'test_d455_camera.urdf.xacro')
urdf = to_urdf(xacro_path, {'use_nominal_extrinsics': 'true', 'add_plug': 'true'})
return LaunchDescription(
rs_launch.declare_configurable_parameters(local_parameters) +
rs_launch.declare_configurable_parameters(params) +
[
OpaqueFunction(function=rs_launch.launch_setup,
kwargs = {'params' : set_configurable_parameters(params)}
),
launch_ros.actions.Node(
package='rviz2',
namespace='',
executable='rviz2',
name='rviz2',
arguments=['-d', [ThisLaunchFileDir(), '/rviz/urdf_pointcloud.rviz']],
output='screen',
parameters=[{'use_sim_time': False}]
),
launch_ros.actions.Node(
name='model_node',
package='robot_state_publisher',
executable='robot_state_publisher',
namespace='',
output='screen',
arguments=[urdf]
)
])

View File

@@ -0,0 +1,58 @@
# Copyright 2025 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DESCRIPTION #
# ----------- #
# Use this launch file to launch a device and align depth to infrared 2.
# The Parameters available for definition in the command line for the camera are described in rs_launch.configurable_parameters
# command line example:
# ros2 launch realsense2_camera rs_align_depth_to infra_launch.py
"""Launch realsense2_camera node."""
from launch import LaunchDescription
import launch_ros.actions
from launch.actions import OpaqueFunction
from launch.substitutions import LaunchConfiguration, ThisLaunchFileDir
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.absolute()))
import os
from ament_index_python.packages import get_package_share_directory
sys.path.append(os.path.join(get_package_share_directory('realsense2_camera'), 'launch'))
import rs_launch
local_parameters = [{'name': 'camera_name', 'default': 'camera', 'description': 'camera unique name'},
{'name': 'camera_namespace', 'default': 'camera', 'description': 'camera namespace'},
{'name': 'enable_color', 'default': 'false', 'description': 'enable color stream'},
{'name': 'enable_depth', 'default': 'true', 'description': 'enable depth stream'},
{'name': 'enable_infra2', 'default': 'true', 'description': 'enable depth stream'},
{'name': 'enable_sync', 'default': 'true', 'description': 'enable sync mode'},
{'name': 'pointcloud.enable', 'default': 'true', 'description': 'enable pointcloud'},
{'name': 'pointcloud.stream_filter', 'default': '3', 'description': 'pointcloud with infrared'},
]
def set_configurable_parameters(local_params):
return dict([(param['name'], LaunchConfiguration(param['name'])) for param in local_params])
def generate_launch_description():
params = rs_launch.configurable_parameters
return LaunchDescription(
rs_launch.declare_configurable_parameters(local_parameters) +
rs_launch.declare_configurable_parameters(params) +
[
OpaqueFunction(function=rs_launch.launch_setup,
kwargs = {'params' : set_configurable_parameters(params)}
)
])

View File

@@ -0,0 +1,62 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DESCRIPTION #
# ----------- #
# Use this launch file to launch a device and visualize pointcloud.
# The Parameters available for definition in the command line for the camera are described in rs_launch.configurable_parameters
# command line example:
# ros2 launch realsense2_camera rs_pointcloud_launch.py
"""Launch realsense2_camera node."""
from launch import LaunchDescription
import launch_ros.actions
from launch.actions import OpaqueFunction
from launch.substitutions import LaunchConfiguration, ThisLaunchFileDir
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.absolute()))
import os
from ament_index_python.packages import get_package_share_directory
sys.path.append(os.path.join(get_package_share_directory('realsense2_camera'), 'launch'))
import rs_launch
local_parameters = [{'name': 'camera_name', 'default': 'camera', 'description': 'camera unique name'},
{'name': 'camera_namespace', 'default': 'camera', 'description': 'camera namespace'},
{'name': 'enable_color', 'default': 'true', 'description': 'enable color stream'},
{'name': 'enable_depth', 'default': 'true', 'description': 'enable depth stream'},
{'name': 'pointcloud.enable', 'default': 'true', 'description': 'enable pointcloud'},
]
def set_configurable_parameters(local_params):
return dict([(param['name'], LaunchConfiguration(param['name'])) for param in local_params])
def generate_launch_description():
params = rs_launch.configurable_parameters
return LaunchDescription(
rs_launch.declare_configurable_parameters(local_parameters) +
rs_launch.declare_configurable_parameters(params) +
[
OpaqueFunction(function=rs_launch.launch_setup,
kwargs = {'params' : set_configurable_parameters(params)}
),
launch_ros.actions.Node(
package='rviz2',
namespace='',
executable='rviz2',
name='rviz2',
arguments=['-d', [ThisLaunchFileDir(), '/rviz/pointcloud.rviz']]
)
])

View File

@@ -0,0 +1,189 @@
Panels:
- Class: rviz_common/Displays
Help Height: 0
Name: Displays
Property Tree Widget:
Expanded:
- /Global Options1
- /Status1
- /Grid1
- /PointCloud21
Splitter Ratio: 0.5
Tree Height: 382
- Class: rviz_common/Selection
Name: Selection
- Class: rviz_common/Tool Properties
Expanded:
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz_common/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz_default_plugins/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz_default_plugins/PointCloud2
Color: 255; 255; 255
Color Transformer: RGB8
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Flat Squares
Topic:
Depth: 5
Durability Policy: Volatile
Filter size: 10
History Policy: Keep Last
Reliability Policy: Reliable
Value: /camera/camera/depth/color/points
Use Fixed Frame: true
Use rainbow: true
Value: true
- Class: rviz_default_plugins/Image
Enabled: true
Max Value: 1
Median window: 5
Min Value: 0
Name: Image
Normalize Range: true
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /camera/camera/color/image_raw
Value: true
- Class: rviz_default_plugins/Image
Enabled: true
Max Value: 1
Median window: 5
Min Value: 0
Name: Image
Normalize Range: true
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /camera/camera/depth/image_rect_raw
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Fixed Frame: camera_depth_optical_frame
Frame Rate: 30
Name: root
Tools:
- Class: rviz_default_plugins/MoveCamera
- Class: rviz_default_plugins/Select
- Class: rviz_default_plugins/FocusCamera
- Class: rviz_default_plugins/Measure
Line color: 128; 128; 0
- Class: rviz_default_plugins/SetInitialPose
Covariance x: 0.25
Covariance y: 0.25
Covariance yaw: 0.06853891909122467
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /initialpose
- Class: rviz_default_plugins/SetGoal
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /move_base_simple/goal
- Class: rviz_default_plugins/PublishPoint
Single click: true
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /clicked_point
Transformation:
Current:
Class: rviz_default_plugins/TF
Value: true
Views:
Current:
Class: rviz_default_plugins/Orbit
Distance: 1.0510121583938599
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: -0.18814913928508759
Y: -0.17941315472126007
Z: 0.14549313485622406
Focal Shape Fixed Size: true
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: -1.5697963237762451
Target Frame: <Fixed Frame>
Value: Orbit (rviz_default_plugins)
Yaw: 4.730405330657959
Saved: ~
Window Geometry:
Displays:
collapsed: false
Height: 1016
Hide Left Dock: false
Hide Right Dock: true
Image:
collapsed: false
QMainWindow State: 000000ff00000000fd0000000400000000000001560000039efc0200000010fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d000001bb000000c900fffffffb0000000a0049006d006100670065010000010c0000001c0000000000000000fb0000000a0049006d006100670065010000012e0000001f0000000000000000fb0000000a0049006d00610067006501000001530000001c0000000000000000fb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d0061006700650100000175000000190000000000000000fb0000000a0049006d00610067006501000001fe000000cd0000002800fffffffb0000000a0049006d00610067006501000002d10000010a0000002800fffffffb0000000a0049006d0061006700650000000233000000b70000000000000000fb0000000a0049006d00610067006501000002b2000001290000000000000000000000010000010f000001effc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d000001ef000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b20000000000000000000000020000073d000000a9fc0100000002fb0000000a0049006d00610067006503000001c5000000bb000001f8000001b0fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004420000003efc0100000002fb0000000800540069006d00650100000000000004420000000000000000fb0000000800540069006d00650100000000000004500000000000000000000005da0000039e00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1846
X: 74
Y: 27

View File

@@ -0,0 +1,363 @@
Panels:
- Class: rviz_common/Displays
Help Height: 78
Name: Displays
Property Tree Widget:
Expanded:
- /Global Options1
- /Status1
- /RobotModel1
- /RobotModel1/Description Topic1
- /TF1/Frames1
- /PointCloud21
- /Image1
- /Image2
Splitter Ratio: 0.36195287108421326
Tree Height: 308
- Class: rviz_common/Selection
Name: Selection
- Class: rviz_common/Tool Properties
Expanded:
- /2D Pose Estimate1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz_common/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz_default_plugins/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 1
Class: rviz_default_plugins/RobotModel
Collision Enabled: false
Description File: ""
Description Source: Topic
Description Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /robot_description
Enabled: true
Links:
All Links Enabled: true
Expand Joint Details: false
Expand Link Details: false
Expand Tree: false
Link Tree Style: Links in Alphabetic Order
base_link:
Alpha: 1
Show Axes: false
Show Trail: false
camera_accel_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_accel_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_bottom_screw_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_gyro_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_gyro_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_imu_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra1_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra1_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra2_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra2_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
camera_usb_plug_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
Mass Properties:
Inertia: false
Mass: false
Name: RobotModel
TF Prefix: ""
Update Interval: 0
Value: true
Visual Enabled: true
- Class: rviz_default_plugins/TF
Enabled: true
Frame Timeout: 15
Frames:
All Enabled: true
base_link:
Value: true
camera_accel_frame:
Value: true
camera_accel_optical_frame:
Value: true
camera_bottom_screw_frame:
Value: true
camera_color_frame:
Value: true
camera_color_optical_frame:
Value: true
camera_depth_frame:
Value: true
camera_depth_optical_frame:
Value: true
camera_gyro_frame:
Value: true
camera_gyro_optical_frame:
Value: true
camera_imu_optical_frame:
Value: true
camera_infra1_frame:
Value: true
camera_infra1_optical_frame:
Value: true
camera_infra2_frame:
Value: true
camera_infra2_optical_frame:
Value: true
camera_link:
Value: true
camera_usb_plug_link:
Value: true
Marker Scale: 0.10000000149011612
Name: TF
Show Arrows: true
Show Axes: true
Show Names: false
Tree:
base_link:
camera_bottom_screw_frame:
camera_link:
camera_accel_frame:
camera_accel_optical_frame:
{}
camera_color_frame:
camera_color_optical_frame:
{}
camera_depth_frame:
camera_depth_optical_frame:
{}
camera_gyro_frame:
camera_gyro_optical_frame:
camera_imu_optical_frame:
{}
camera_infra1_frame:
camera_infra1_optical_frame:
{}
camera_infra2_frame:
camera_infra2_optical_frame:
{}
camera_usb_plug_link:
{}
Update Interval: 0
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz_default_plugins/PointCloud2
Color: 255; 255; 255
Color Transformer: RGB8
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Flat Squares
Topic:
Depth: 5
Durability Policy: Volatile
Filter size: 10
History Policy: Keep Last
Reliability Policy: Reliable
Value: /camera/camera/depth/color/points
Use Fixed Frame: true
Use rainbow: true
Value: true
- Class: rviz_default_plugins/Image
Enabled: true
Max Value: 1
Median window: 5
Min Value: 0
Name: Image
Normalize Range: true
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /camera/camera/color/image_raw
Value: true
- Class: rviz_default_plugins/Image
Enabled: true
Max Value: 1
Median window: 5
Min Value: 0
Name: Image
Normalize Range: true
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /camera/camera/depth/image_rect_raw
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Fixed Frame: base_link
Frame Rate: 30
Name: root
Tools:
- Class: rviz_default_plugins/Interact
Hide Inactive Objects: true
- Class: rviz_default_plugins/MoveCamera
- Class: rviz_default_plugins/Select
- Class: rviz_default_plugins/FocusCamera
- Class: rviz_default_plugins/Measure
Line color: 128; 128; 0
- Class: rviz_default_plugins/SetInitialPose
Covariance x: 0.25
Covariance y: 0.25
Covariance yaw: 0.06853891909122467
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /initialpose
- Class: rviz_default_plugins/SetGoal
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /goal_pose
- Class: rviz_default_plugins/PublishPoint
Single click: true
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /clicked_point
Transformation:
Current:
Class: rviz_default_plugins/TF
Value: true
Views:
Current:
Class: rviz_default_plugins/Orbit
Distance: 4.639365196228027
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: 0.0008243769989348948
Y: -0.00015415334200952202
Z: 0.0003343875869177282
Focal Shape Fixed Size: true
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: 1.2247962951660156
Target Frame: <Fixed Frame>
Value: Orbit (rviz)
Yaw: 3.343583583831787
Saved: ~
Window Geometry:
Displays:
collapsed: false
Height: 1016
Hide Left Dock: false
Hide Right Dock: false
Image:
collapsed: false
QMainWindow State: 000000ff00000000fd0000000400000000000002540000039efc020000000cfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d000001bf000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d0061006700650100000132000000b00000000000000000fb0000000a0049006d00610067006501000001e8000000ed0000000000000000fb0000000a0049006d0061006700650100000202000000e90000002800fffffffb0000000a0049006d00610067006501000002f1000000ea0000002800ffffff000000010000010f0000039efc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003d0000039e000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004420000003efc0100000002fb0000000800540069006d00650100000000000004420000000000000000fb0000000800540069006d00650100000000000004500000000000000000000003c70000039e00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: false
Width: 1846
X: 74
Y: 27

View File

@@ -0,0 +1,33 @@
// Copyright 2025 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <memory>
#include <librealsense2/rs.hpp>
#include <sensor_params.h>
#include <sensor_msgs/msg/point_cloud2.hpp>
#include <ros_sensor.h>
#include "named_filter.h"
namespace realsense2_camera
{
class AlignDepthFilter : public NamedFilter
{
public:
AlignDepthFilter(std::shared_ptr<rs2::filter> filter, std::function<void(const rclcpp::Parameter&)> update_align_depth_func,
std::shared_ptr<Parameters> parameters, rclcpp::Logger logger, bool is_enabled = false);
};
}

View File

@@ -0,0 +1,398 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <librealsense2/rs.hpp>
#include <librealsense2/rsutil.h>
#include "constants.h"
// cv_bridge.h last supported version is humble
#if defined(CV_BRDIGE_HAS_HPP)
#include <cv_bridge/cv_bridge.hpp>
#else
#include <cv_bridge/cv_bridge.h>
#endif
#include <diagnostic_updater/diagnostic_updater.hpp>
#include <diagnostic_updater/publisher.hpp>
#include <std_srvs/srv/empty.hpp>
#include "realsense2_camera_msgs/msg/imu_info.hpp"
#include "realsense2_camera_msgs/msg/extrinsics.hpp"
#include "realsense2_camera_msgs/msg/metadata.hpp"
#include "realsense2_camera_msgs/msg/rgbd.hpp"
#include "realsense2_camera_msgs/srv/device_info.hpp"
#include "realsense2_camera_msgs/srv/calib_config_read.hpp"
#include "realsense2_camera_msgs/srv/calib_config_write.hpp"
#include "rclcpp_action/rclcpp_action.hpp"
#include "realsense2_camera_msgs/action/triggered_calibration.hpp"
#include <librealsense2/hpp/rs_processing.hpp>
#include <librealsense2/rs_advanced_mode.hpp>
#include <sensor_msgs/image_encodings.hpp>
#include <sensor_msgs/msg/camera_info.hpp>
#include <sensor_msgs/msg/image.hpp>
#include <sensor_msgs/msg/imu.hpp>
#if defined(HUMBLE) || defined(IRON) || defined(JAZZY)
#include <tf2/LinearMath/Quaternion.h>
#else
#include <tf2/LinearMath/Quaternion.hpp>
#endif
#include <tf2_ros/transform_broadcaster.h>
#include <tf2_ros/static_transform_broadcaster.h>
#include <eigen3/Eigen/Geometry>
#include <condition_variable>
#include <ros_sensor.h>
#include <named_filter.h>
#if defined (ACCELERATE_GPU_WITH_GLSL)
#include <gl_window.h>
#endif
#include <queue>
#include <mutex>
#include <atomic>
#include <thread>
using realsense2_camera_msgs::msg::Extrinsics;
using realsense2_camera_msgs::msg::IMUInfo;
using realsense2_camera_msgs::msg::RGBD;
#define FRAME_ID(sip) (static_cast<std::ostringstream&&>(std::ostringstream() << _camera_name << "_" << STREAM_NAME(sip) << "_frame")).str()
#define IMU_FRAME_ID (static_cast<std::ostringstream&&>(std::ostringstream() << _camera_name << "_imu_frame")).str()
#define IMU_OPTICAL_FRAME_ID (static_cast<std::ostringstream&&>(std::ostringstream() << _camera_name << "_imu_optical_frame")).str()
#define OPTICAL_FRAME_ID(sip) (static_cast<std::ostringstream&&>(std::ostringstream() << _camera_name << "_" << STREAM_NAME(sip) << "_optical_frame")).str()
#define ALIGNED_DEPTH_TO_FRAME_ID(sip) (static_cast<std::ostringstream&&>(std::ostringstream() << _camera_name << "_" << "aligned_depth_to_" << STREAM_NAME(sip) << "_frame")).str()
namespace realsense2_camera
{
typedef std::pair<rs2_stream, int> stream_index_pair;
class image_publisher; // forward declaration
class PipelineSyncer : public rs2::asynchronous_syncer
{
public:
void operator()(rs2::frame f) const
{
invoke(std::move(f));
}
};
class SyncedImuPublisher
{
public:
SyncedImuPublisher(rclcpp::Publisher<sensor_msgs::msg::Imu>::SharedPtr imu_publisher,
std::size_t waiting_list_size=1000);
~SyncedImuPublisher();
void Pause(); // Pause sending messages. All messages from now on are saved in queue.
void Resume(); // Send all pending messages and allow sending future messages.
void Publish(sensor_msgs::msg::Imu msg); //either send or hold message.
size_t getNumSubscribers();
void Enable(bool is_enabled) {_is_enabled=is_enabled;};
private:
void PublishPendingMessages();
private:
std::mutex _mutex;
rclcpp::Publisher<sensor_msgs::msg::Imu>::SharedPtr _publisher;
bool _pause_mode;
std::queue<sensor_msgs::msg::Imu> _pending_messages;
std::size_t _waiting_list_size;
bool _is_enabled;
};
class AlignDepthFilter;
class PointcloudFilter;
class BaseRealSenseNode
{
public:
BaseRealSenseNode(RosNodeBase& node,
rs2::device dev,
std::shared_ptr<Parameters> parameters,
bool use_intra_process = false);
~BaseRealSenseNode();
void publishTopics();
public:
using TriggeredCalibration = realsense2_camera_msgs::action::TriggeredCalibration;
using GoalHandleTriggeredCalibration = rclcpp_action::ServerGoalHandle<TriggeredCalibration>;
enum class imu_sync_method{NONE, COPY, LINEAR_INTERPOLATION};
protected:
class float3
{
public:
float x, y, z;
public:
float3& operator*=(const float& factor)
{
x*=factor;
y*=factor;
z*=factor;
return (*this);
}
float3& operator+=(const float3& other)
{
x+=other.x;
y+=other.y;
z+=other.z;
return (*this);
}
};
std::string _base_frame_id;
bool _is_running;
RosNodeBase& _node;
std::string _camera_name;
std::vector<rs2_option> _monitor_options;
rclcpp::Logger _logger;
rclcpp::Service<std_srvs::srv::Empty>::SharedPtr _reset_srv;
rclcpp::Service<realsense2_camera_msgs::srv::DeviceInfo>::SharedPtr _device_info_srv;
rclcpp::Service<realsense2_camera_msgs::srv::CalibConfigRead>::SharedPtr _calib_config_read_srv;
rclcpp::Service<realsense2_camera_msgs::srv::CalibConfigWrite>::SharedPtr _calib_config_write_srv;
rclcpp_action::Server<TriggeredCalibration>::SharedPtr _triggered_calibration_action_server;
std::shared_ptr<Parameters> _parameters;
std::list<std::string> _parameters_names;
void restartStaticTransformBroadcaster();
void publishExtrinsicsTopic(const stream_index_pair& sip, const rs2_extrinsics& ex);
void calcAndAppendTransformMsgs(const rs2::stream_profile& profile, const rs2::stream_profile& base_profile);
void handleHWReset(const std_srvs::srv::Empty::Request::SharedPtr req,
const std_srvs::srv::Empty::Response::SharedPtr res);
void getDeviceInfo(const realsense2_camera_msgs::srv::DeviceInfo::Request::SharedPtr req,
realsense2_camera_msgs::srv::DeviceInfo::Response::SharedPtr res);
void CalibConfigReadService(const realsense2_camera_msgs::srv::CalibConfigRead::Request::SharedPtr req,
realsense2_camera_msgs::srv::CalibConfigRead::Response::SharedPtr res);
void CalibConfigWriteService(const realsense2_camera_msgs::srv::CalibConfigWrite::Request::SharedPtr req,
realsense2_camera_msgs::srv::CalibConfigWrite::Response::SharedPtr res);
rclcpp_action::GoalResponse TriggeredCalibrationHandleGoal(const rclcpp_action::GoalUUID & uuid, std::shared_ptr<const TriggeredCalibration::Goal> goal);
rclcpp_action::CancelResponse TriggeredCalibrationHandleCancel(const std::shared_ptr<GoalHandleTriggeredCalibration> goal_handle);
void TriggeredCalibrationHandleAccepted(const std::shared_ptr<GoalHandleTriggeredCalibration> goal_handle);
void TriggeredCalibrationExecute(const std::shared_ptr<GoalHandleTriggeredCalibration> goal_handle);
tf2::Quaternion rotationMatrixToQuaternion(const float rotation[9]) const;
void append_static_tf_msg(const rclcpp::Time& t,
const float3& trans,
const tf2::Quaternion& q,
const std::string& from,
const std::string& to);
void erase_static_tf_msg(const std::string& frame_id,
const std::string& child_frame_id);
void eraseTransformMsgs(const stream_index_pair& sip, const rs2::stream_profile& profile);
void setup();
private:
class CimuData
{
public:
CimuData() : m_data({0,0,0}), m_time_ns(-1) {};
CimuData(const stream_index_pair type, Eigen::Vector3d data, double time):
m_type(type),
m_data(data),
m_time_ns(time){};
bool is_set() {return m_time_ns > 0;};
public:
stream_index_pair m_type;
Eigen::Vector3d m_data;
double m_time_ns;
};
void getParameters();
void setDynamicParams();
void clearParameters();
void setupDevice();
void hardwareResetRequest();
void setupPublishers();
void enable_devices();
void setupFilters();
bool setBaseTime(double frame_time, rs2_timestamp_domain time_domain);
uint64_t millisecondsToNanoseconds(double timestamp_ms);
rclcpp::Time frameSystemTimeSec(rs2::frame frame);
cv::Mat& fix_depth_scale(const cv::Mat& from_image, cv::Mat& to_image);
void clip_depth(rs2::depth_frame depth_frame, float clipping_dist);
void updateProfilesStreamCalibData(const std::vector<rs2::stream_profile>& profiles);
void updateExtrinsicsCalibData(const rs2::video_stream_profile& left_video_profile, const rs2::video_stream_profile& right_video_profile);
void updateStreamCalibData(const rs2::video_stream_profile& video_profile);
void SetBaseStream();
void publishStaticTransforms();
void startDynamicTf();
void publishDynamicTransforms();
void publishPointCloud(rs2::points f, const rclcpp::Time& t, const rs2::frameset& frameset);
Extrinsics rsExtrinsicsToMsg(const rs2_extrinsics& extrinsics) const;
IMUInfo getImuInfo(const rs2::stream_profile& profile);
void initializeFormatsMaps();
bool fillROSImageMsgAndReturnStatus(
const cv::Mat& cv_matrix_image,
const stream_index_pair& stream,
unsigned int width,
unsigned int height,
const rs2_format& stream_format,
const rclcpp::Time& t,
sensor_msgs::msg::Image* img_msg_ptr);
bool fillCVMatImageAndReturnStatus(
rs2::frame& frame,
std::map<stream_index_pair, cv::Mat>& images,
unsigned int width,
unsigned int height,
const stream_index_pair& stream);
void publishFrame(
rs2::frame f,
const rclcpp::Time& t,
const stream_index_pair& stream,
std::map<stream_index_pair, cv::Mat>& images,
const std::map<stream_index_pair, rclcpp::Publisher<sensor_msgs::msg::CameraInfo>::SharedPtr>& info_publishers,
const std::map<stream_index_pair, std::shared_ptr<image_publisher>>& image_publishers,
const bool is_publishMetadata = true);
void publishRGBD(
const cv::Mat& rgb_cv_matrix,
const rs2_format& color_format,
const cv::Mat& depth_cv_matrix,
const rs2_format& depth_format,
const rclcpp::Time& t);
void publishMetadata(rs2::frame f, const rclcpp::Time& header_time, const std::string& frame_id);
sensor_msgs::msg::Imu CreateUnitedMessage(const CimuData accel_data, const CimuData gyro_data);
void FillImuData_Copy(const CimuData imu_data, std::deque<sensor_msgs::msg::Imu>& imu_msgs);
void ImuMessage_AddDefaultValues(sensor_msgs::msg::Imu& imu_msg);
void FillImuData_LinearInterpolation(const CimuData imu_data, std::deque<sensor_msgs::msg::Imu>& imu_msgs);
void imu_callback(rs2::frame frame);
void imu_callback_sync(rs2::frame frame, imu_sync_method sync_method=imu_sync_method::COPY);
void multiple_message_callback(rs2::frame frame, imu_sync_method sync_method);
void frame_callback(rs2::frame frame);
void startDiagnosticsUpdater();
void monitoringProfileChanges();
void publish_temperature();
void setAvailableSensors();
void setCallbackFunctions();
void updateSensors();
void startUpdatedSensors();
void stopRequiredSensors();
void publishServices();
void publishActions();
void startPublishers(const std::vector<rs2::stream_profile>& profiles, const RosSensor& sensor);
void startRGBDPublisherIfNeeded();
void stopPublishers(const std::vector<rs2::stream_profile>& profiles);
#if defined (ACCELERATE_GPU_WITH_GLSL)
void initOpenGLProcessing(bool use_gpu_processing);
void shutdownOpenGLProcessing();
void glfwPollEventCallback();
#endif
rs2::device _dev;
std::map<stream_index_pair, rs2::sensor> _sensors;
std::map<std::string, std::function<void(rs2::frame)>> _sensors_callback;
std::string _json_file_path;
float _depth_scale_meters;
float _clipping_distance;
double _linear_accel_cov;
double _angular_velocity_cov;
bool _hold_back_imu_for_frames;
std::map<stream_index_pair, bool> _enable;
bool _publish_tf;
double _tf_publish_rate, _diagnostics_period;
std::mutex _publish_tf_mutex;
std::mutex _update_sensor_mutex;
std::mutex _profile_changes_mutex;
std::mutex _publish_dynamic_tf_mutex;
std::shared_ptr<tf2_ros::StaticTransformBroadcaster> _static_tf_broadcaster;
std::shared_ptr<tf2_ros::TransformBroadcaster> _dynamic_tf_broadcaster;
std::vector<geometry_msgs::msg::TransformStamped> _static_tf_msgs;
std::shared_ptr<std::thread> _tf_t;
bool _use_intra_process;
std::map<stream_index_pair, std::shared_ptr<image_publisher>> _image_publishers;
std::map<stream_index_pair, rclcpp::Publisher<sensor_msgs::msg::Imu>::SharedPtr> _imu_publishers;
std::shared_ptr<SyncedImuPublisher> _synced_imu_publisher;
std::map<stream_index_pair, rclcpp::Publisher<sensor_msgs::msg::CameraInfo>::SharedPtr> _info_publishers;
std::map<stream_index_pair, rclcpp::Publisher<realsense2_camera_msgs::msg::Metadata>::SharedPtr> _metadata_publishers;
std::map<stream_index_pair, rclcpp::Publisher<IMUInfo>::SharedPtr> _imu_info_publishers;
std::map<stream_index_pair, rclcpp::Publisher<Extrinsics>::SharedPtr> _extrinsics_publishers;
rclcpp::Publisher<realsense2_camera_msgs::msg::RGBD>::SharedPtr _rgbd_publisher;
std::map<stream_index_pair, cv::Mat> _images;
std::map<rs2_format, std::string> _rs_format_to_ros_format;
std::map<rs2_format, int> _rs_format_to_cv_format;
std::map<stream_index_pair, sensor_msgs::msg::CameraInfo> _camera_info;
std::atomic_bool _is_initialized_time_base;
double _camera_time_base;
rclcpp::Time _ros_time_base;
bool _sync_frames;
bool _enable_rgbd;
bool _is_color_enabled;
bool _is_depth_enabled;
bool _is_accel_enabled;
bool _is_gyro_enabled;
bool _pointcloud;
imu_sync_method _imu_sync_method;
stream_index_pair _pointcloud_texture;
PipelineSyncer _syncer;
rs2::asynchronous_syncer _asyncer;
std::shared_ptr<NamedFilter> _colorizer_filter;
std::shared_ptr<AlignDepthFilter> _align_depth_filter;
std::shared_ptr<PointcloudFilter> _pc_filter;
std::vector<std::shared_ptr<NamedFilter>> _filters;
std::vector<rs2::sensor> _dev_sensors;
std::vector<std::unique_ptr<RosSensor>> _available_ros_sensors;
std::map<rs2_stream, std::shared_ptr<rs2::align>> _align;
std::map<stream_index_pair, cv::Mat> _depth_aligned_image;
std::map<stream_index_pair, cv::Mat> _depth_scaled_image;
std::map<stream_index_pair, rclcpp::Publisher<sensor_msgs::msg::CameraInfo>::SharedPtr> _depth_aligned_info_publisher;
std::map<stream_index_pair, std::shared_ptr<image_publisher>> _depth_aligned_image_publishers;
std::map<std::string, rs2::region_of_interest> _auto_exposure_roi;
std::map<rs2_stream, bool> _is_first_frame;
std::shared_ptr<std::thread> _monitoring_t;
std::shared_ptr<std::thread> _monitoring_pc; //pc = profile changes
mutable std::condition_variable _cv_temp, _cv_mpc, _cv_tf;
bool _is_profile_changed;
bool _is_align_depth_changed;
std::shared_ptr<diagnostic_updater::Updater> _diagnostics_updater;
rs2::stream_profile _base_profile;
#if defined (ACCELERATE_GPU_WITH_GLSL)
GLwindow _app;
rclcpp::TimerBase::SharedPtr _timer;
bool _accelerate_gpu_with_glsl;
bool _is_accelerate_gpu_with_glsl_changed;
#endif
};//end class
}

View File

@@ -0,0 +1,103 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include "ros_node_base.h"
#define REALSENSE_ROS_MAJOR_VERSION 4
#define REALSENSE_ROS_MINOR_VERSION 56
#define REALSENSE_ROS_PATCH_VERSION 4
#define STRINGIFY(arg) #arg
#define VAR_ARG_STRING(arg) STRINGIFY(arg)
/* Return version in "X.Y.Z" format */
#define REALSENSE_ROS_VERSION_STR (VAR_ARG_STRING(REALSENSE_ROS_MAJOR_VERSION.REALSENSE_ROS_MINOR_VERSION.REALSENSE_ROS_PATCH_VERSION))
#define ROS_DEBUG(...) RCLCPP_DEBUG(_logger, __VA_ARGS__)
#define ROS_INFO(...) RCLCPP_INFO(_logger, __VA_ARGS__)
#define ROS_WARN(...) RCLCPP_WARN(_logger, __VA_ARGS__)
#define ROS_ERROR(...) RCLCPP_ERROR(_logger, __VA_ARGS__)
// Based on: https://docs.ros2.org/latest/api/rclcpp/logging_8hpp.html
#define ROS_DEBUG_STREAM(msg) RCLCPP_DEBUG_STREAM(_logger, msg)
#define ROS_INFO_STREAM(msg) RCLCPP_INFO_STREAM(_logger, msg)
#define ROS_WARN_STREAM(msg) RCLCPP_WARN_STREAM(_logger, msg)
#define ROS_ERROR_STREAM(msg) RCLCPP_ERROR_STREAM(_logger, msg)
#define ROS_FATAL_STREAM(msg) RCLCPP_FATAL_STREAM(_logger, msg)
#define ROS_DEBUG_STREAM_ONCE(msg) RCLCPP_DEBUG_STREAM_ONCE(_logger, msg)
#define ROS_INFO_STREAM_ONCE(msg) RCLCPP_INFO_STREAM_ONCE(_logger, msg)
#define ROS_WARN_STREAM_COND(cond, msg) RCLCPP_WARN_STREAM_EXPRESSION(_logger, cond, msg)
#define ROS_WARN_ONCE(msg) RCLCPP_WARN_ONCE(_logger, msg)
#define ROS_WARN_COND(cond, ...) RCLCPP_WARN_EXPRESSION(_logger, cond, __VA_ARGS__)
namespace realsense2_camera
{
const uint16_t RS400_PID = 0x0ad1; // PSR
const uint16_t RS410_PID = 0x0ad2; // ASR
const uint16_t RS415_PID = 0x0ad3; // ASRC
const uint16_t RS430_PID = 0x0ad4; // AWG
const uint16_t RS430_MM_PID = 0x0ad5; // AWGT
const uint16_t RS_USB2_PID = 0x0ad6; // USB2
const uint16_t RS420_PID = 0x0af6; // PWG
const uint16_t RS421_PID = 0x1155; // D421
const uint16_t RS420_MM_PID = 0x0afe; // PWGT
const uint16_t RS410_MM_PID = 0x0aff; // ASR
const uint16_t RS400_MM_PID = 0x0b00; // PSR
const uint16_t RS430_MM_RGB_PID = 0x0b01; // AWGCT
const uint16_t RS460_PID = 0x0b03; // DS5U
const uint16_t RS435_RGB_PID = 0x0b07; // AWGC
const uint16_t RS435i_RGB_PID = 0x0B3A; // AWGC_MM
const uint16_t RS416_RGB_PID = 0x0B52; // F416 RGB
const uint16_t RS430i_PID = 0x0b4b; // D430i
const uint16_t RS405_PID = 0x0B5B; // DS5U
const uint16_t RS455_PID = 0x0B5C; // D455
const uint16_t RS457_PID = 0xABCD; // D457
const uint16_t RS555_PID = 0x0B56; // D555
const bool ALLOW_NO_TEXTURE_POINTS = false;
const bool ORDERED_PC = false;
const bool SYNC_FRAMES = false;
const bool ENABLE_RGBD = false;
const bool PUBLISH_TF = true;
const double TF_PUBLISH_RATE = 0; // Static transform
const double DIAGNOSTICS_PERIOD = 0.0;
const std::string IMAGE_QOS = "SYSTEM_DEFAULT";
const std::string DEFAULT_QOS = "DEFAULT";
const std::string HID_QOS = "SENSOR_DATA";
const bool HOLD_BACK_IMU_FOR_FRAMES = false;
const std::string DEFAULT_BASE_FRAME_ID = "link";
const std::string DEFAULT_IMU_OPTICAL_FRAME_ID = "camera_imu_optical_frame";
const float ROS_DEPTH_SCALE = 0.001;
static const rmw_qos_profile_t rmw_qos_profile_latched =
{
RMW_QOS_POLICY_HISTORY_KEEP_LAST,
1,
RMW_QOS_POLICY_RELIABILITY_RELIABLE,
RMW_QOS_POLICY_DURABILITY_TRANSIENT_LOCAL,
RMW_QOS_DEADLINE_DEFAULT,
RMW_QOS_LIFESPAN_DEFAULT,
RMW_QOS_POLICY_LIVELINESS_SYSTEM_DEFAULT,
RMW_QOS_LIVELINESS_LEASE_DURATION_DEFAULT,
false
};
} // namespace realsense2_camera

View File

@@ -0,0 +1,73 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <ros_utils.h>
#include "constants.h"
#include <deque>
#include "ros_param_backend.h"
namespace realsense2_camera
{
class Parameters
{
public:
Parameters(RosNodeBase& node);
~Parameters();
template <class T>
T setParam(std::string param_name, const T& initial_value,
std::function<void(const rclcpp::Parameter&)> func = std::function<void(const rclcpp::Parameter&)>(),
rcl_interfaces::msg::ParameterDescriptor descriptor=rcl_interfaces::msg::ParameterDescriptor());
template <class T>
T readAndDeleteParam(std::string param_name, const T& initial_value);
template <class T>
void setParamT(std::string param_name, T& param,
std::function<void(const rclcpp::Parameter&)> func = std::function<void(const rclcpp::Parameter&)>(),
rcl_interfaces::msg::ParameterDescriptor descriptor=rcl_interfaces::msg::ParameterDescriptor());
template <class T>
void setParamValue(T& param, const T& value); // function updates the parameter value both locally and in the parameters server
void setRosParamValue(const std::string param_name, void const* const value); // function updates the parameters server
void removeParam(std::string param_name);
void pushUpdateFunctions(std::vector<std::function<void()> > funcs);
template <class T>
void queueSetRosValue(const std::string& param_name, const T value);
template<typename T>
T getOrDeclareParameter(const std::string param_name, const T& initial_value);
template <class T>
T getParam(std::string param_name);
private:
void monitor_update_functions();
private:
RosNodeBase& _node;
rclcpp::Logger _logger;
std::map<std::string, std::function<void(const rclcpp::Parameter&)> > _param_functions;
std::map<void*, std::string> _param_names;
ParametersBackend _params_backend;
std::condition_variable _update_functions_cv;
bool _is_running;
std::shared_ptr<std::thread> _update_functions_t;
std::deque<std::function<void()> > _update_functions_v;
std::list<std::string> self_set_parameters;
std::mutex _mu;
};
}

View File

@@ -0,0 +1,84 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <librealsense2/rs.hpp> // Include RealSense Cross Platform API
#if defined (ACCELERATE_GPU_WITH_GLSL)
#define GL_SILENCE_DEPRECATION
#define GLFW_INCLUDE_GLU
#include <GLFW/glfw3.h>
#include <GL/gl.h>
#include <iostream>
#include <librealsense2-gl/rs_processing_gl.hpp> // Include GPU-Processing API
#ifndef PI
#define PI 3.14159265358979323846
#define PI_FL 3.141592f
#endif
class GLwindow
{
public:
GLwindow(int width, int height, const char* title)
: _width(width), _height(height)
{
glfwInit();
glfwWindowHint(GLFW_VISIBLE, 0);
win = glfwCreateWindow(width, height, title, nullptr, nullptr);
if (!win)
throw std::runtime_error("Could not open OpenGL window, please check your graphic drivers or use the textual SDK tools");
glfwMakeContextCurrent(win);
glfwSetWindowUserPointer(win, this);
}
~GLwindow()
{
glfwDestroyWindow(win);
glfwTerminate();
}
void close()
{
glfwSetWindowShouldClose(win, 1);
}
float width() const { return float(_width); }
float height() const { return float(_height); }
operator bool()
{
auto res = !glfwWindowShouldClose(win);
glfwPollEvents();
return res;
}
operator GLFWwindow* () { return win; }
private:
GLFWwindow* win;
int _width, _height;
};
#endif

View File

@@ -0,0 +1,59 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "ros_node_base.h"
#include <sensor_msgs/msg/image.hpp>
#include <image_transport/image_transport.hpp>
namespace realsense2_camera {
class image_publisher
{
public:
virtual void publish( sensor_msgs::msg::Image::UniquePtr image_ptr ) = 0;
virtual size_t get_subscription_count() const = 0;
virtual ~image_publisher() = default;
};
// Native RCL implementation of an image publisher (needed for intra-process communication)
class image_rcl_publisher : public image_publisher
{
public:
image_rcl_publisher( RosNodeBase & node,
const std::string & topic_name,
const rmw_qos_profile_t & qos );
void publish( sensor_msgs::msg::Image::UniquePtr image_ptr ) override;
size_t get_subscription_count() const override;
private:
rclcpp::Publisher< sensor_msgs::msg::Image >::SharedPtr image_publisher_impl;
};
// image_transport implementation of an image publisher (adds a compressed image topic)
class image_transport_publisher : public image_publisher
{
public:
image_transport_publisher( rclcpp::Node & node,
const std::string & topic_name,
const rmw_qos_profile_t & qos );
void publish( sensor_msgs::msg::Image::UniquePtr image_ptr ) override;
size_t get_subscription_count() const override;
private:
std::shared_ptr< image_transport::Publisher > image_publisher_impl;
};
} // namespace realsense2_camera

View File

@@ -0,0 +1,50 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <memory>
#include <librealsense2/rs.hpp>
#include <sensor_params.h>
#include <sensor_msgs/msg/point_cloud2.hpp>
#include <ros_sensor.h>
namespace realsense2_camera
{
class NamedFilter
{
public:
NamedFilter(std::shared_ptr<rs2::filter> filter, std::shared_ptr<Parameters> parameters, rclcpp::Logger logger, bool is_enabled=false, bool is_set_parameters=true);
bool is_enabled() {return _is_enabled;};
rs2::frameset Process(rs2::frameset frameset);
rs2::frame Process(rs2::frame frame);
protected:
void setParameters(std::function<void(const rclcpp::Parameter&)> enable_param_func = std::function<void(const rclcpp::Parameter&)>());
private:
void clearParameters();
public:
std::shared_ptr<rs2::filter> _filter;
protected:
bool _is_enabled;
SensorParams _params;
std::vector<std::string> _parameters_names;
rclcpp::Logger _logger;
};
}

View File

@@ -0,0 +1,47 @@
// Copyright 2025 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <memory>
#include <librealsense2/rs.hpp>
#include <sensor_params.h>
#include <sensor_msgs/msg/point_cloud2.hpp>
#include <ros_sensor.h>
#include "named_filter.h"
namespace realsense2_camera
{
class PointcloudFilter : public NamedFilter
{
public:
PointcloudFilter(std::shared_ptr<rs2::filter> filter, RosNodeBase& node, std::shared_ptr<Parameters> parameters, rclcpp::Logger logger, bool is_enabled=false);
void setPublisher();
void Publish(rs2::points pc, const rclcpp::Time& t, const rs2::frameset& frameset, const std::string& frame_id);
private:
void setParameters();
private:
bool _is_enabled_pc;
RosNodeBase& _node;
bool _allow_no_texture_points;
bool _ordered_pc;
std::mutex _mutex_publisher;
rclcpp::Publisher<sensor_msgs::msg::PointCloud2>::SharedPtr _pointcloud_publisher;
std::string _pointcloud_qos;
};
}

View File

@@ -0,0 +1,105 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <librealsense2/rs.hpp>
#include <sensor_params.h>
#define STREAM_NAME(sip) (static_cast<std::ostringstream&&>(std::ostringstream() << create_graph_resource_name(ros_stream_to_string(sip.first)) << ((sip.second>0) ? std::to_string(sip.second) : ""))).str()
using namespace rs2;
namespace realsense2_camera
{
typedef std::pair<rs2_stream, int> stream_index_pair;
class ProfilesManager
{
public:
ProfilesManager(std::shared_ptr<Parameters> parameters, rclcpp::Logger logger);
virtual bool isWantedProfile(const rs2::stream_profile& profile) = 0;
virtual void registerProfileParameters(std::vector<stream_profile> all_profiles, std::function<void()> update_sensor_func) = 0;
bool isTypeExist();
static std::string profile_string(const rs2::stream_profile& profile);
void registerSensorQOSParam(std::string template_name,
std::set<stream_index_pair> unique_sips,
std::map<stream_index_pair, std::shared_ptr<std::string> >& params,
std::string value);
template<class T>
void registerSensorUpdateParam(std::string template_name,
std::set<stream_index_pair> unique_sips,
std::map<stream_index_pair, std::shared_ptr<T> >& params,
T value,
std::function<void()> update_sensor_func);
void addWantedProfiles(std::vector<rs2::stream_profile>& wanted_profiles);
void clearParameters();
bool hasSIP(const stream_index_pair& sip) const;
rmw_qos_profile_t getQOS(const stream_index_pair& sip) const;
rmw_qos_profile_t getInfoQOS(const stream_index_pair& sip) const;
protected:
std::map<stream_index_pair, rs2::stream_profile> getDefaultProfiles();
protected:
rclcpp::Logger _logger;
SensorParams _params;
std::map<stream_index_pair, std::shared_ptr<bool>> _enabled_profiles;
std::map<stream_index_pair, std::shared_ptr<std::string>> _profiles_image_qos_str, _profiles_info_qos_str;
std::vector<rs2::stream_profile> _all_profiles;
std::vector<std::string> _parameters_names;
};
class VideoProfilesManager : public ProfilesManager
{
public:
VideoProfilesManager(std::shared_ptr<Parameters> parameters, const std::string& module_name, rclcpp::Logger logger, bool force_image_default_qos = false);
bool isWantedProfile(const rs2::stream_profile& profile) override;
void registerProfileParameters(std::vector<stream_profile> all_profiles, std::function<void()> update_sensor_func) override;
int getHeight(rs2_stream stream_type) {return _height[stream_type];};
int getWidth(rs2_stream stream_type) {return _width[stream_type];};
int getFPS(rs2_stream stream_type) {return _fps[stream_type];};
private:
bool isSameProfileValues(const rs2::stream_profile& profile, const int width, const int height, const int fps, const rs2_format format);
rs2::stream_profile validateAndGetSuitableProfile(rs2_stream stream_type, rs2::stream_profile given_profile);
void registerVideoSensorProfileFormat(stream_index_pair sip);
void registerVideoSensorParams(std::set<stream_index_pair> sips);
std::string get_profiles_descriptions(rs2_stream stream_type);
std::string getProfileFormatsDescriptions(stream_index_pair sip);
private:
std::string _module_name;
std::map<stream_index_pair, rs2_format> _formats;
std::map<rs2_stream, int> _fps, _width, _height;
bool _force_image_default_qos;
};
class MotionProfilesManager : public ProfilesManager
{
public:
using ProfilesManager::ProfilesManager;
bool isWantedProfile(const rs2::stream_profile& profile) override;
void registerProfileParameters(std::vector<stream_profile> all_profiles, std::function<void()> update_sensor_func) override;
private:
void registerFPSParams();
bool isSameProfileValues(const rs2::stream_profile& profile, const rs2_stream stype, const int fps);
std::map<stream_index_pair, std::vector<int>> getAvailableFPSValues();
protected:
std::map<stream_index_pair, std::shared_ptr<int> > _fps;
};
}

View File

@@ -0,0 +1,80 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
// cpplint: c system headers
#include "constants.h"
#include "base_realsense_node.h"
#include <builtin_interfaces/msg/time.hpp>
#include <console_bridge/console.h>
#include "rclcpp_components/register_node_macro.hpp"
#include "ros_node_base.h"
#include <algorithm>
#include <csignal>
#include <iostream>
#include <limits>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <thread>
namespace realsense2_camera
{
class RealSenseNodeFactory : public RosNodeBase
{
public:
explicit RealSenseNodeFactory(const rclcpp::NodeOptions & node_options = rclcpp::NodeOptions());
RealSenseNodeFactory(
const std::string & node_name, const std::string & ns,
const rclcpp::NodeOptions & node_options = rclcpp::NodeOptions());
virtual ~RealSenseNodeFactory();
#ifdef USE_LIFECYCLE_NODE
using CallbackReturn = rclcpp_lifecycle::node_interfaces::LifecycleNodeInterface::CallbackReturn;
CallbackReturn on_configure(const rclcpp_lifecycle::State & state) override;
CallbackReturn on_activate(const rclcpp_lifecycle::State & state) override;
CallbackReturn on_deactivate(const rclcpp_lifecycle::State & state) override;
CallbackReturn on_cleanup(const rclcpp_lifecycle::State & state) override;
CallbackReturn on_shutdown(const rclcpp_lifecycle::State & state) override;
#endif
private:
void init();
void closeDevice();
void startDevice();
void stopDevice();
void changeDeviceCallback(rs2::event_information& info);
void getDevice(rs2::device_list list);
void tryGetLogSeverity(rs2_log_severity& severity) const;
static std::string parseUsbPort(std::string line);
RosNodeBase::SharedPtr _node;
rs2::device _device;
std::unique_ptr<BaseRealSenseNode> _realSenseNode;
rs2::context _ctx;
std::string _serial_no;
std::string _usb_port_id;
std::string _device_type;
double _wait_for_device_timeout;
double _reconnect_timeout;
bool _initial_reset;
std::thread _query_thread;
bool _is_alive;
rclcpp::Logger _logger;
std::shared_ptr<Parameters> _parameters;
};
}//end namespace

View File

@@ -0,0 +1,25 @@
// Copyright 2025 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <rclcpp/rclcpp.hpp>
// Define RosNodeBase Alias
#ifdef USE_LIFECYCLE_NODE
#include <rclcpp_lifecycle/lifecycle_node.hpp>
using RosNodeBase = rclcpp_lifecycle::LifecycleNode;
#else
using RosNodeBase = rclcpp::Node;
#endif

View File

@@ -0,0 +1,45 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "ros_node_base.h"
namespace realsense2_camera
{
class ParametersBackend
{
public:
ParametersBackend(RosNodeBase& node) :
_node(node),
_logger(node.get_logger())
{}
~ParametersBackend();
#if defined( RCLCPP_HAS_OnSetParametersCallbackType )
using ros2_param_callback_type = rclcpp::node_interfaces::NodeParametersInterface::OnSetParametersCallbackType;
#else
using ros2_param_callback_type = rclcpp::node_interfaces::NodeParametersInterface::OnParametersSetCallbackType;
#endif
void add_on_set_parameters_callback(ros2_param_callback_type callback);
private:
RosNodeBase& _node;
rclcpp::Logger _logger;
std::shared_ptr<void> _ros_callback;
};
}

View File

@@ -0,0 +1,123 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <librealsense2/rs.hpp>
#include <librealsense2/rsutil.h>
#include "constants.h"
#include <map>
#include "ros_node_base.h"
#include <ros_utils.h>
#include <sensor_params.h>
#include <profile_manager.h>
#include <diagnostic_updater/diagnostic_updater.hpp>
#include <diagnostic_updater/update_functions.hpp>
namespace realsense2_camera
{
typedef std::pair<rs2_stream, int> stream_index_pair;
class FrequencyDiagnostics
{
public:
FrequencyDiagnostics(std::string name, int expected_frequency, std::shared_ptr<diagnostic_updater::Updater> updater):
_name(name),
_min_freq(expected_frequency), _max_freq(expected_frequency),
_freq_status_param(&_min_freq, &_max_freq, 0.1, 10),
_freq_status(_freq_status_param, _name),
_p_updater(updater)
{
_p_updater->add(_freq_status);
};
FrequencyDiagnostics (const FrequencyDiagnostics& other):
_name(other._name),
_min_freq(other._min_freq),
_max_freq(other._max_freq),
_freq_status_param(&_min_freq, &_max_freq, 0.1, 10),
_freq_status(_freq_status_param, _name),
_p_updater(other._p_updater)
{
_p_updater->add(_freq_status);
};
~FrequencyDiagnostics()
{
_p_updater->removeByName(_name);
}
void Tick()
{
_freq_status.tick();
}
private:
std::string _name;
double _min_freq, _max_freq;
diagnostic_updater::FrequencyStatusParam _freq_status_param;
diagnostic_updater::FrequencyStatus _freq_status;
std::shared_ptr<diagnostic_updater::Updater> _p_updater;
};
class RosSensor : public rs2::sensor
{
public:
RosSensor(rs2::sensor sensor,
std::shared_ptr<Parameters> parameters,
std::function<void(rs2::frame)> frame_callback,
std::function<void()> update_sensor_func,
std::function<void()> hardware_reset_func,
std::shared_ptr<diagnostic_updater::Updater> diagnostics_updater,
rclcpp::Logger logger,
bool force_image_default_qos = false,
bool is_rosbag_file = false);
~RosSensor();
void registerSensorParameters();
bool getUpdatedProfiles(std::vector<rs2::stream_profile>& wanted_profiles);
virtual bool start(const std::vector<rs2::stream_profile>& profiles);
void stop();
rmw_qos_profile_t getQOS(const stream_index_pair& sip) const;
rmw_qos_profile_t getInfoQOS(const stream_index_pair& sip) const;
template<class T>
bool is() const
{
return rs2::sensor::is<T>();
}
private:
void setupErrorCallback();
void setParameters(bool is_rosbag_file = false);
void clearParameters();
void set_sensor_auto_exposure_roi();
void registerAutoExposureROIOptions();
void UpdateSequenceIdCallback();
template<class T>
void set_sensor_parameter_to_ros(rs2_option option);
private:
rclcpp::Logger _logger;
std::function<void(rs2::frame)> _origin_frame_callback;
std::function<void(rs2::frame)> _frame_callback;
SensorParams _params;
std::function<void()> _update_sensor_func, _hardware_reset_func;
std::vector<std::shared_ptr<ProfilesManager> > _profile_managers;
rs2::region_of_interest _auto_exposure_roi;
std::vector<std::string> _parameters_names;
std::shared_ptr<diagnostic_updater::Updater> _diagnostics_updater;
std::map<stream_index_pair, FrequencyDiagnostics> _frequency_diagnostics;
bool _force_image_default_qos;
};
}

View File

@@ -0,0 +1,48 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <iostream>
#include <vector>
#include "ros_node_base.h"
#include <librealsense2/rs.hpp>
#include <librealsense2/rsutil.h>
namespace realsense2_camera
{
typedef std::pair<rs2_stream, int> stream_index_pair;
const stream_index_pair COLOR{RS2_STREAM_COLOR, 0};
const stream_index_pair DEPTH{RS2_STREAM_DEPTH, 0};
const stream_index_pair INFRA0{RS2_STREAM_INFRARED, 0};
const stream_index_pair INFRA1{RS2_STREAM_INFRARED, 1};
const stream_index_pair INFRA2{RS2_STREAM_INFRARED, 2};
const stream_index_pair GYRO{RS2_STREAM_GYRO, 0};
const stream_index_pair ACCEL{RS2_STREAM_ACCEL, 0};
const stream_index_pair MOTION{RS2_STREAM_MOTION, 0};
bool isValidCharInName(char c);
std::string rs2_to_ros(std::string rs2_name);
std::string ros_stream_to_string(rs2_stream stream);
std::string create_graph_resource_name(const std::string &original_name);
const rmw_qos_profile_t qos_string_to_qos(std::string str);
const std::string list_available_qos_strings();
rs2_format string_to_rs2_format(std::string str);
std::string vectorToJsonString(const std::vector<uint8_t>& vec);
}

View File

@@ -0,0 +1,48 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <librealsense2/rs.hpp>
#include "ros_node_base.h"
#include <dynamic_params.h>
namespace realsense2_camera
{
class SensorParams
{
public:
SensorParams(std::shared_ptr<Parameters> parameters, rclcpp::Logger logger):
_logger(logger),
_parameters(parameters) {};
~SensorParams();
void registerDynamicOptions(rs2::options sensor, const std::string& module_name);
void clearParameters();
std::shared_ptr<Parameters> getParameters() {return _parameters;};
public:
rclcpp::Logger _logger;
private:
double float_to_double(float val, rs2::option_range option_range);
template<class T>
rcl_interfaces::msg::ParameterDescriptor get_parameter_descriptor(const std::string& option_name, rs2::option_range option_range,
T option_value, const std::string& option_description, const std::string& description_addition);
template<class T>
void set_parameter(rs2::options sensor, rs2_option option, const std::string& module_name, const std::string& description_addition="");
private:
std::shared_ptr<Parameters> _parameters;
std::vector<std::string> _parameters_names;
};
}

View File

@@ -0,0 +1,221 @@
Panels:
- Class: rviz_common/Displays
Help Height: 0
Name: Displays
Property Tree Widget:
Expanded:
- /Global Options1
- /Status1
- /Grid1
- /PointCloud21
- /Image1
- /Image2
- /Image3
- /Image4
Splitter Ratio: 0.5
Tree Height: 190
- Class: rviz_common/Selection
Name: Selection
- Class: rviz_common/Tool Properties
Expanded:
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz_common/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz_default_plugins/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz_default_plugins/PointCloud2
Color: 255; 255; 255
Color Transformer: RGB8
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Flat Squares
Topic:
Depth: 5
Durability Policy: Volatile
Filter size: 10
History Policy: Keep Last
Reliability Policy: Reliable
Value: /camera/camera/depth/color/points
Use Fixed Frame: true
Use rainbow: true
Value: true
- Class: rviz_default_plugins/Image
Enabled: true
Max Value: 1
Median window: 5
Min Value: 0
Name: Image
Normalize Range: true
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /camera/camera/color/image_raw
Value: true
- Class: rviz_default_plugins/Image
Enabled: true
Max Value: 1
Median window: 5
Min Value: 0
Name: Image
Normalize Range: true
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /camera/camera/depth/image_rect_raw
Value: true
- Class: rviz_default_plugins/Image
Enabled: true
Max Value: 1
Median window: 5
Min Value: 0
Name: Image
Normalize Range: true
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /camera/camera/infra1/image_rect_raw
Value: true
- Class: rviz_default_plugins/Image
Enabled: true
Max Value: 1
Median window: 5
Min Value: 0
Name: Image
Normalize Range: true
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /camera/camera/infra2/image_rect_raw
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Fixed Frame: camera_depth_optical_frame
Frame Rate: 30
Name: root
Tools:
- Class: rviz_default_plugins/MoveCamera
- Class: rviz_default_plugins/Select
- Class: rviz_default_plugins/FocusCamera
- Class: rviz_default_plugins/Measure
Line color: 128; 128; 0
- Class: rviz_default_plugins/SetInitialPose
Covariance x: 0.25
Covariance y: 0.25
Covariance yaw: 0.06853891909122467
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /initialpose
- Class: rviz_default_plugins/SetGoal
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /move_base_simple/goal
- Class: rviz_default_plugins/PublishPoint
Single click: true
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /clicked_point
Transformation:
Current:
Class: rviz_default_plugins/TF
Value: true
Views:
Current:
Class: rviz_default_plugins/Orbit
Distance: 3.677529811859131
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: -0.18814913928508759
Y: -0.17941315472126007
Z: 0.14549313485622406
Focal Shape Fixed Size: true
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: -1.5697963237762451
Target Frame: <Fixed Frame>
Value: Orbit (rviz_default_plugins)
Yaw: 4.730405330657959
Saved: ~
Window Geometry:
Displays:
collapsed: false
Height: 1016
Hide Left Dock: false
Hide Right Dock: true
Image:
collapsed: false
QMainWindow State: 000000ff00000000fd0000000400000000000001560000039efc0200000010fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d000000fb000000c900fffffffb0000000a0049006d006100670065010000011e000000780000000000000000fb0000000a0049006d006100670065010000013e000000cd0000002800fffffffb0000000a0049006d0061006700650100000211000000b10000002800fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d00610067006501000002c8000000a20000002800fffffffb0000000a0049006d00610067006501000003700000006b0000002800fffffffb0000000a0049006d00610067006501000001f70000007a0000000000000000fb0000000a0049006d00610067006501000002770000009d0000000000000000fb0000000a0049006d006100670065010000031a000000ca0000000000000000000000010000010f000001effc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d000001ef000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b20000000000000000000000020000073d000000a9fc0100000002fb0000000a0049006d00610067006503000001c5000000bb000001f8000001b0fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004420000003efc0100000002fb0000000800540069006d00650100000000000004420000000000000000fb0000000800540069006d00650100000000000004500000000000000000000005da0000039e00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1846
X: 74
Y: 27

View File

@@ -0,0 +1,109 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Launch realsense2_camera node & a frame latency printer node,
This tool allow the user the evaluate the reduction of the frame latency when intra-process communication is used.
Run syntax: ros2 launch realsense2_camera rs_intra_process_demo_launch.py intra_process_comms:=true
Note:
* Running this tool require building with build tools flag on (colcon build --cmake-args '-DBUILD_TOOLS=ON')
* Currently default for color stream only
'''
import sys
import subprocess
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import ComposableNodeContainer
from launch_ros.descriptions import ComposableNode
# Make sure required packages can be found
process = subprocess.run(['ros2','component', 'types'],
stdout=subprocess.PIPE,
universal_newlines=True)
rs_node_class= 'RealSenseNodeFactory'
rs_latency_tool_class = 'FrameLatencyNode'
if process.stdout.find(rs_node_class) == -1 or process.stdout.find(rs_latency_tool_class) == -1 :
sys.exit('Cannot locate all required node components (' + rs_node_class + ',' + rs_latency_tool_class + ') on the available component list\n' + process.stdout + \
'\nplease make sure you run "colcon build --cmake-args \'-DBUILD_TOOLS=ON\'" command before running this launch file')
realsense_node_params = [{'name': 'serial_no', 'default': "''", 'description': 'choose device by serial number'},
{'name': 'usb_port_id', 'default': "''", 'description': 'choose device by usb port id'},
{'name': 'device_type', 'default': "''", 'description': 'choose device by type'},
{'name': 'log_level', 'default': 'info', 'description': 'debug log level [DEBUG|INFO|WARN|ERROR|FATAL]'},
{'name': 'rgb_camera.profile', 'default': '0,0,0', 'description': 'color image width'},
{'name': 'enable_color', 'default': 'true', 'description': 'enable color stream'},
{'name': 'enable_depth', 'default': 'true', 'description': 'enable depth stream'},
{'name': 'enable_infra', 'default': 'false', 'description': 'enable infra stream'},
{'name': 'enable_infra1', 'default': 'true', 'description': 'enable infra1 stream'},
{'name': 'enable_infra2', 'default': 'true', 'description': 'enable infra2 stream'},
{'name': 'enable_gyro', 'default': 'true', 'description': "enable gyro stream"},
{'name': 'enable_accel', 'default': 'true', 'description': "enable accel stream"},
{'name': 'unite_imu_method', 'default': "1", 'description': '[0-None, 1-copy, 2-linear_interpolation]'},
{'name': 'intra_process_comms', 'default': 'true', 'description': "enable intra-process communication"},
{'name': 'enable_sync', 'default': 'true', 'description': "'enable sync mode'"},
{'name': 'pointcloud.enable', 'default': 'true', 'description': ''},
{'name': 'enable_rgbd', 'default': 'true', 'description': "'enable rgbd topic'"},
{'name': 'align_depth.enable', 'default': 'true', 'description': "'enable align depth filter'"},
{'name': 'publish_tf', 'default': 'true', 'description': '[bool] enable/disable publishing static & dynamic TF'},
{'name': 'tf_publish_rate', 'default': '1.0', 'description': '[double] rate in HZ for publishing dynamic TF'},
]
frame_latency_node_params = [{'name': 'topic_name', 'default': '/camera/color/image_raw', 'description': 'topic to which latency calculated'},
{'name': 'topic_type', 'default': 'image', 'description': 'topic type [image|points|imu|metadata|camera_info|rgbd|imu_info|tf]'},
]
def declare_configurable_parameters(parameters):
return [DeclareLaunchArgument(param['name'], default_value=param['default'], description=param['description']) for param in parameters]
def set_configurable_parameters(parameters):
return dict([(param['name'], LaunchConfiguration(param['name'])) for param in parameters])
def generate_launch_description():
return LaunchDescription(declare_configurable_parameters(realsense_node_params) +
declare_configurable_parameters(frame_latency_node_params) +[
ComposableNodeContainer(
name='my_container',
namespace='',
package='rclcpp_components',
executable='component_container',
composable_node_descriptions=[
ComposableNode(
package='realsense2_camera',
namespace='',
plugin='realsense2_camera::' + rs_node_class,
name="camera",
parameters=[set_configurable_parameters(realsense_node_params)],
extra_arguments=[{'use_intra_process_comms': LaunchConfiguration("intra_process_comms")}]) ,
ComposableNode(
package='realsense2_camera',
namespace='',
plugin='rs2_ros::tools::frame_latency::' + rs_latency_tool_class,
name='frame_latency',
parameters=[set_configurable_parameters(frame_latency_node_params)],
extra_arguments=[{'use_intra_process_comms': LaunchConfiguration("intra_process_comms")}]) ,
],
output='screen',
emulate_tty=True, # needed for display of logs
arguments=['--ros-args', '--log-level', LaunchConfiguration('log_level')],
)])

View File

@@ -0,0 +1,145 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch realsense2_camera node."""
import os
import yaml
from launch import LaunchDescription
import launch_ros.actions
from launch.actions import DeclareLaunchArgument, OpaqueFunction, LogInfo
from launch.substitutions import LaunchConfiguration
configurable_parameters = [{'name': 'camera_name', 'default': 'camera', 'description': 'camera unique name'},
{'name': 'camera_namespace', 'default': 'camera', 'description': 'namespace for camera'},
{'name': 'serial_no', 'default': "''", 'description': 'choose device by serial number'},
{'name': 'usb_port_id', 'default': "''", 'description': 'choose device by usb port id'},
{'name': 'device_type', 'default': "''", 'description': 'choose device by type'},
{'name': 'config_file', 'default': "''", 'description': 'yaml config file'},
{'name': 'json_file_path', 'default': "''", 'description': 'allows advanced configuration'},
{'name': 'initial_reset', 'default': 'false', 'description': "''"},
{'name': 'accelerate_gpu_with_glsl', 'default': "false", 'description': 'enable GPU acceleration with GLSL'},
{'name': 'rosbag_filename', 'default': "''", 'description': 'A realsense bagfile to run from as a device'},
{'name': 'rosbag_loop', 'default': 'false', 'description': 'Enable loop playback when playing a bagfile'},
{'name': 'log_level', 'default': 'info', 'description': 'debug log level [DEBUG|INFO|WARN|ERROR|FATAL]'},
{'name': 'output', 'default': 'screen', 'description': 'pipe node output [screen|log]'},
{'name': 'enable_color', 'default': 'true', 'description': 'enable color stream'},
{'name': 'rgb_camera.color_profile', 'default': '0,0,0', 'description': 'color stream profile'},
{'name': 'rgb_camera.color_format', 'default': 'RGB8', 'description': 'color stream format'},
{'name': 'rgb_camera.enable_auto_exposure', 'default': 'true', 'description': 'enable/disable auto exposure for color image'},
{'name': 'enable_depth', 'default': 'true', 'description': 'enable depth stream'},
{'name': 'enable_infra', 'default': 'false', 'description': 'enable infra0 stream'},
{'name': 'enable_infra1', 'default': 'false', 'description': 'enable infra1 stream'},
{'name': 'enable_infra2', 'default': 'false', 'description': 'enable infra2 stream'},
{'name': 'depth_module.depth_profile', 'default': '0,0,0', 'description': 'depth stream profile'},
{'name': 'depth_module.depth_format', 'default': 'Z16', 'description': 'depth stream format'},
{'name': 'depth_module.infra_profile', 'default': '0,0,0', 'description': 'infra streams (0/1/2) profile'},
{'name': 'depth_module.infra_format', 'default': 'RGB8', 'description': 'infra0 stream format'},
{'name': 'depth_module.infra1_format', 'default': 'Y8', 'description': 'infra1 stream format'},
{'name': 'depth_module.infra2_format', 'default': 'Y8', 'description': 'infra2 stream format'},
{'name': 'depth_module.color_profile', 'default': '0,0,0', 'description': 'Depth module color stream profile for d405'},
{'name': 'depth_module.color_format', 'default': 'RGB8', 'description': 'color stream format for d405'},
{'name': 'depth_module.exposure', 'default': '8500', 'description': 'Depth module manual exposure value'},
{'name': 'depth_module.gain', 'default': '16', 'description': 'Depth module manual gain value'},
{'name': 'depth_module.hdr_enabled', 'default': 'false', 'description': 'Depth module hdr enablement flag. Used for hdr_merge filter'},
{'name': 'depth_module.enable_auto_exposure', 'default': 'true', 'description': 'enable/disable auto exposure for depth image'},
{'name': 'depth_module.exposure.1', 'default': '7500', 'description': 'Depth module first exposure value. Used for hdr_merge filter'},
{'name': 'depth_module.gain.1', 'default': '16', 'description': 'Depth module first gain value. Used for hdr_merge filter'},
{'name': 'depth_module.exposure.2', 'default': '1', 'description': 'Depth module second exposure value. Used for hdr_merge filter'},
{'name': 'depth_module.gain.2', 'default': '16', 'description': 'Depth module second gain value. Used for hdr_merge filter'},
{'name': 'enable_sync', 'default': 'false', 'description': "'enable sync mode'"},
{'name': 'depth_module.inter_cam_sync_mode', 'default': "0", 'description': '[0-Default, 1-Master, 2-Slave]'},
{'name': 'enable_rgbd', 'default': 'false', 'description': "'enable rgbd topic'"},
{'name': 'enable_gyro', 'default': 'false', 'description': "'enable gyro stream'"},
{'name': 'enable_accel', 'default': 'false', 'description': "'enable accel stream'"},
{'name': 'gyro_fps', 'default': '0', 'description': "''"},
{'name': 'enable_motion', 'default': 'false', 'description': "'enable motion stream (IMU) for DDS devices'"},
{'name': 'accel_fps', 'default': '0', 'description': "''"},
{'name': 'unite_imu_method', 'default': "0", 'description': '[0-None, 1-copy, 2-linear_interpolation]'},
{'name': 'clip_distance', 'default': '-2.', 'description': "''"},
{'name': 'angular_velocity_cov', 'default': '0.01', 'description': "''"},
{'name': 'linear_accel_cov', 'default': '0.01', 'description': "''"},
{'name': 'diagnostics_period', 'default': '0.0', 'description': 'Rate of publishing diagnostics. 0=Disabled'},
{'name': 'publish_tf', 'default': 'true', 'description': '[bool] enable/disable publishing static & dynamic TF'},
{'name': 'tf_publish_rate', 'default': '0.0', 'description': '[double] rate in Hz for publishing dynamic TF'},
{'name': 'pointcloud.enable', 'default': 'false', 'description': ''},
{'name': 'pointcloud.stream_filter', 'default': '2', 'description': 'texture stream for pointcloud'},
{'name': 'pointcloud.stream_index_filter','default': '0', 'description': 'texture stream index for pointcloud'},
{'name': 'pointcloud.ordered_pc', 'default': 'false', 'description': ''},
{'name': 'pointcloud.allow_no_texture_points', 'default': 'false', 'description': "''"},
{'name': 'align_depth.enable', 'default': 'false', 'description': 'enable align depth filter'},
{'name': 'colorizer.enable', 'default': 'false', 'description': 'enable colorizer filter'},
{'name': 'decimation_filter.enable', 'default': 'false', 'description': 'enable_decimation_filter'},
{'name': 'rotation_filter.enable', 'default': 'false', 'description': 'enable rotation_filter'},
{'name': 'rotation_filter.rotation', 'default': '0.0', 'description': 'rotation value: 0.0, 90.0, -90.0, 180.0'},
{'name': 'spatial_filter.enable', 'default': 'false', 'description': 'enable_spatial_filter'},
{'name': 'temporal_filter.enable', 'default': 'false', 'description': 'enable_temporal_filter'},
{'name': 'disparity_filter.enable', 'default': 'false', 'description': 'enable_disparity_filter'},
{'name': 'hole_filling_filter.enable', 'default': 'false', 'description': 'enable_hole_filling_filter'},
{'name': 'hdr_merge.enable', 'default': 'false', 'description': 'hdr_merge filter enablement flag'},
{'name': 'wait_for_device_timeout', 'default': '-1.', 'description': 'Timeout for waiting for device to connect (Seconds)'},
{'name': 'reconnect_timeout', 'default': '6.', 'description': 'Timeout(seconds) between consequtive reconnection attempts'},
{'name': 'base_frame_id', 'default': 'link', 'description': 'Root frame of the sensors transform tree'},
]
def declare_configurable_parameters(parameters):
return [DeclareLaunchArgument(param['name'], default_value=param['default'], description=param['description']) for param in parameters]
def set_configurable_parameters(parameters):
return dict([(param['name'], LaunchConfiguration(param['name'])) for param in parameters])
def yaml_to_dict(path_to_yaml):
with open(path_to_yaml, "r") as f:
return yaml.load(f, Loader=yaml.SafeLoader)
def launch_setup(context, params, param_name_suffix=''):
_config_file = LaunchConfiguration('config_file' + param_name_suffix).perform(context)
params_from_file = {} if _config_file == "''" else yaml_to_dict(_config_file)
# Load lifecycle nodes setting from YAML dynamically generated by CMAKE instead of environment variable
lifecycle_param_file = os.path.join(
os.path.dirname(__file__), '..', 'config', 'global_settings.yaml'
)
lifecycle_params = yaml_to_dict(lifecycle_param_file)
use_lifecycle_node = lifecycle_params.get("use_lifecycle_node", False)
_output = LaunchConfiguration('output' + param_name_suffix)
# Dynamically choose Node or LifecycleNode
node_action = launch_ros.actions.LifecycleNode if use_lifecycle_node else launch_ros.actions.Node
log_message = "Launching as LifecycleNode" if use_lifecycle_node else "Launching as Normal ROS Node"
if(os.getenv('ROS_DISTRO') == 'foxy'):
# Foxy doesn't support output as substitution object (LaunchConfiguration object)
# but supports it as string, so we fetch the string from this substitution object
# see related PR that was merged for humble, iron, rolling: https://github.com/ros2/launch/pull/577
_output = context.perform_substitution(_output)
return [
LogInfo(msg=f"🚀 {log_message}"),
node_action(
package='realsense2_camera',
namespace=LaunchConfiguration('camera_namespace' + param_name_suffix),
name=LaunchConfiguration('camera_name' + param_name_suffix),
executable='realsense2_camera_node',
parameters=[params, params_from_file],
output=_output,
arguments=['--ros-args', '--log-level', LaunchConfiguration('log_level' + param_name_suffix)],
emulate_tty=True,
)
]
def generate_launch_description():
return LaunchDescription(declare_configurable_parameters(configurable_parameters) + [
OpaqueFunction(function=launch_setup, kwargs = {'params' : set_configurable_parameters(configurable_parameters)})
])

View File

@@ -0,0 +1,96 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DESCRIPTION #
# ----------- #
# Use this launch file to launch 2 devices.
# The Parameters available for definition in the command line for each camera are described in rs_launch.configurable_parameters
# For each device, the parameter name was changed to include an index.
# For example: to set camera_name for device1 set parameter camera_name1.
# command line example:
# ros2 launch realsense2_camera rs_multi_camera_launch.py camera_name1:=D400 device_type2:=l5. device_type1:=d4..
"""Launch realsense2_camera node."""
import copy
from launch import LaunchDescription, LaunchContext
import launch_ros.actions
from launch.actions import IncludeLaunchDescription, OpaqueFunction, LogInfo
from launch.substitutions import LaunchConfiguration, ThisLaunchFileDir
from launch.launch_description_sources import PythonLaunchDescriptionSource
import sys
import os
import yaml
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.absolute()))
import rs_launch
local_parameters = [{'name': 'camera_name1', 'default': 'camera1', 'description': 'camera1 unique name'},
{'name': 'camera_name2', 'default': 'camera2', 'description': 'camera2 unique name'},
{'name': 'camera_namespace1', 'default': 'camera1', 'description': 'camera1 namespace'},
{'name': 'camera_namespace2', 'default': 'camera2', 'description': 'camera2 namespace'},
]
def yaml_to_dict(path_to_yaml):
with open(path_to_yaml, "r") as f:
return yaml.load(f, Loader=yaml.SafeLoader)
def set_configurable_parameters(local_params):
return dict([(param['original_name'], LaunchConfiguration(param['name'])) for param in local_params])
def duplicate_params(general_params, posix):
local_params = copy.deepcopy(general_params)
for param in local_params:
param['original_name'] = param['name']
param['name'] += posix
return local_params
def launch_static_transform_publisher_node(context : LaunchContext):
# Load lifecycle nodes setting from YAML dynamically generated by CMAKE instead of environment variable
lifecycle_param_file = os.path.join(
os.path.dirname(__file__), '..', 'config', 'global_settings.yaml'
)
lifecycle_params = yaml_to_dict(lifecycle_param_file)
use_lifecycle_node = lifecycle_params.get("use_lifecycle_node", False)
node_action = launch_ros.actions.LifecycleNode if use_lifecycle_node else launch_ros.actions.Node
log_message = "Launching as LifecycleNode" if use_lifecycle_node else "Launching as Normal ROS Node"
# dummy static transformation from camera1 to camera2
node = node_action(
namespace="",
name="tf2_static_transform_publisher",
package = "tf2_ros",
executable = "static_transform_publisher",
arguments = ["0", "0", "0", "0", "0", "0",
context.launch_configurations['camera_name1'] + "_link",
context.launch_configurations['camera_name2'] + "_link"]
)
return [LogInfo(msg=f"🚀 {log_message}"), node]
def generate_launch_description():
params1 = duplicate_params(rs_launch.configurable_parameters, '1')
params2 = duplicate_params(rs_launch.configurable_parameters, '2')
return LaunchDescription(
rs_launch.declare_configurable_parameters(local_parameters) +
rs_launch.declare_configurable_parameters(params1) +
rs_launch.declare_configurable_parameters(params2) +
[
OpaqueFunction(function=rs_launch.launch_setup,
kwargs = {'params' : set_configurable_parameters(params1),
'param_name_suffix': '1'}),
OpaqueFunction(function=rs_launch.launch_setup,
kwargs = {'params' : set_configurable_parameters(params2),
'param_name_suffix': '2'}),
OpaqueFunction(function=launch_static_transform_publisher_node)
])

View File

@@ -0,0 +1,100 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DESCRIPTION #
# ----------- #
# Use this launch file to launch 2 devices and enable the hardware synchronization.
# As describe in https://dev.intelrealsense.com/docs/multiple-depth-cameras-configuration both devices
# have to be connected using a sync cable. The devices will by default stream asynchronously.
# Using this launch file one device will operate as master and the other as slave. As a result they will
# capture at exactly the same time and rate.
# command line example: (to be adapted with the serial numbers or the used devices)
# ros2 launch realsense2_camera rs_multi_camera_launch_sync.py camera_name1:=cam_1 camera_name2:=cam_2 camera_namespace1:=camera camera_namespace2:=camera serial_no1:="'_031422250097'" serial_no2:="'_336222301921'"
# The value of the param can be checked using ros2 param get /camera/cam_1 depth_module.inter_cam_sync_mode and ros2 param get /camera/cam_2 depth_module.inter_cam_sync_mode
"""Launch realsense2_camera node."""
import copy
from launch import LaunchDescription, LaunchContext
import launch_ros.actions
from launch.actions import IncludeLaunchDescription, OpaqueFunction, LogInfo
from launch.substitutions import LaunchConfiguration, ThisLaunchFileDir
from launch.launch_description_sources import PythonLaunchDescriptionSource
import sys
import os
import yaml
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.absolute()))
import rs_launch
local_parameters = [{'name': 'camera_name1', 'default': 'camera1', 'description': 'camera1 unique name'},
{'name': 'camera_name2', 'default': 'camera2', 'description': 'camera2 unique name'},
{'name': 'camera_namespace1', 'default': 'camera1', 'description': 'camera1 namespace'},
{'name': 'camera_namespace2', 'default': 'camera2', 'description': 'camera2 namespace'},
{'name': 'depth_module.inter_cam_sync_mode1', 'default': '1', 'description': 'master'},
{'name': 'depth_module.inter_cam_sync_mode2', 'default': '2', 'description': 'slave'},
]
def yaml_to_dict(path_to_yaml):
with open(path_to_yaml, "r") as f:
return yaml.load(f, Loader=yaml.SafeLoader)
def set_configurable_parameters(local_params):
return dict([(param['original_name'], LaunchConfiguration(param['name'])) for param in local_params])
def duplicate_params(general_params, posix):
local_params = copy.deepcopy(general_params)
for param in local_params:
param['original_name'] = param['name']
param['name'] += posix
return local_params
def launch_static_transform_publisher_node(context : LaunchContext):
# Load lifecycle nodes setting from YAML dynamically generated by CMAKE instead of environment variable
lifecycle_param_file = os.path.join(
os.path.dirname(__file__), '..', 'config', 'global_settings.yaml'
)
lifecycle_params = yaml_to_dict(lifecycle_param_file)
use_lifecycle_node = lifecycle_params.get("use_lifecycle_node", False)
node_action = launch_ros.actions.LifecycleNode if use_lifecycle_node else launch_ros.actions.Node
log_message = "Launching as LifecycleNode" if use_lifecycle_node else "Launching as Normal ROS Node"
# dummy static transformation from camera1 to camera2
node = node_action(
namespace="",
name="tf2_static_transform_publisher",
package = "tf2_ros",
executable = "static_transform_publisher",
arguments = ["0", "0", "0", "0", "0", "0",
context.launch_configurations['camera_name1'] + "_link",
context.launch_configurations['camera_name2'] + "_link"]
)
return [LogInfo(msg=f"🚀 {log_message}"), node]
def generate_launch_description():
params1 = duplicate_params(rs_launch.configurable_parameters, '1')
params2 = duplicate_params(rs_launch.configurable_parameters, '2')
return LaunchDescription(
rs_launch.declare_configurable_parameters(local_parameters) +
rs_launch.declare_configurable_parameters(params1) +
rs_launch.declare_configurable_parameters(params2) +
[
OpaqueFunction(function=rs_launch.launch_setup,
kwargs = {'params' : set_configurable_parameters(params1),
'param_name_suffix': '1'}),
OpaqueFunction(function=rs_launch.launch_setup,
kwargs = {'params' : set_configurable_parameters(params2),
'param_name_suffix': '2'}),
OpaqueFunction(function=launch_static_transform_publisher_node)
])

View File

@@ -0,0 +1,37 @@
uri: 'https://raw.githubusercontent.com/magazino/pylon_camera/indigo-devel/rosdep/empty.tar'
check-presence-script: |
#!/bin/bash
if [ $(dpkg-query -W -f='${Status}' librealsense2-dkms 2>/dev/null | grep -c "ok installed") -eq 0 ];
then
exit 1
else
exit 0
fi
install-script: |
#!/bin/bash
# Install
if [[ "$EUID" -ne 0 ]]; then
# Install add-apt-repository
sudo apt-get install -y software-properties-common
# Register the server's public key
sudo apt-key adv --keyserver keys.gnupg.net --recv-key C8B3A55A6F3EFCDE || sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-key C8B3A55A6F3EFCDE
# Add the server to the list of repositories
sudo add-apt-repository "deb http://realsense-hw-public.s3.amazonaws.com/Debian/apt-repo bionic main" -u
# Install the libraries
sudo apt-get install -y librealsense2-dkms librealsense2-dev
else
# Install add-apt-repository
apt-get install -y software-properties-common
# Register the server's public key
apt-key adv --keyserver keys.gnupg.net --recv-key C8B3A55A6F3EFCDE || apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-key C8B3A55A6F3EFCDE
# Add the server to the list of repositories
add-apt-repository "deb http://realsense-hw-public.s3.amazonaws.com/Debian/apt-repo bionic main" -u
# Install the libraries
apt-get install -y librealsense2-dkms librealsense2-dev
fi
exit $?

View File

@@ -0,0 +1,37 @@
uri: 'https://raw.githubusercontent.com/magazino/pylon_camera/indigo-devel/rosdep/empty.tar'
check-presence-script: |
#!/bin/bash
if [ $(dpkg-query -W -f='${Status}' librealsense2-dkms 2>/dev/null | grep -c "ok installed") -eq 0 ];
then
exit 1
else
exit 0
fi
install-script: |
#!/bin/bash
# Install
if [[ "$EUID" -ne 0 ]]; then
# Install add-apt-repository
sudo apt-get install -y software-properties-common
# Register the server's public key
sudo apt-key adv --keyserver keys.gnupg.net --recv-key C8B3A55A6F3EFCDE || sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-key C8B3A55A6F3EFCDE
# Add the server to the list of repositories
sudo add-apt-repository "deb http://realsense-hw-public.s3.amazonaws.com/Debian/apt-repo xenial main" -u
# Install the libraries
sudo apt-get install -y librealsense2-dkms librealsense2-dev
else
# Install add-apt-repository
apt-get install -y software-properties-common
# Register the server's public key
apt-key adv --keyserver keys.gnupg.net --recv-key C8B3A55A6F3EFCDE || apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-key C8B3A55A6F3EFCDE
# Add the server to the list of repositories
add-apt-repository "deb http://realsense-hw-public.s3.amazonaws.com/Debian/apt-repo xenial main" -u
# Install the libraries
apt-get install -y librealsense2-dkms librealsense2-dev
fi
exit $?

View File

@@ -0,0 +1,55 @@
<?xml version="1.0"?>
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>realsense2_camera</name>
<version>4.56.4</version>
<description>RealSense camera package allowing access to Intel D400 3D cameras</description>
<maintainer email="librs.ros@intel.com">LibRealSense ROS Team</maintainer>
<license>Apache License 2.0</license>
<url type="website">http://www.ros.org/wiki/RealSense</url>
<url type="bugtracker">https://github.com/intel-ros/realsense/issues</url>
<author email="librs.ros@intel.com">LibRealSense ROS Team</author>
<buildtool_depend>ament_cmake</buildtool_depend>
<depend>eigen</depend>
<depend>builtin_interfaces</depend>
<depend>cv_bridge</depend>
<depend>image_transport</depend>
<depend>librealsense2</depend>
<depend>rclcpp</depend>
<!-- Lifecycle dependencies are optional -DUSE_LIFECYCLE_NODE -->
<depend>rclcpp_lifecycle</depend>
<depend>lifecycle_msgs</depend>
<depend>rclcpp_components</depend>
<depend>realsense2_camera_msgs</depend>
<depend>sensor_msgs</depend>
<depend>geometry_msgs</depend>
<depend>std_srvs</depend>
<depend>std_msgs</depend>
<depend>nav_msgs</depend>
<depend>tf2</depend>
<depend>tf2_ros</depend>
<depend>diagnostic_updater</depend>
<test_depend condition="$ROS_DISTRO != foxy">ament_cmake_gtest</test_depend>
<test_depend condition="$ROS_DISTRO != foxy">launch_testing</test_depend>
<test_depend condition="$ROS_DISTRO != foxy">ament_cmake_pytest</test_depend>
<test_depend condition="$ROS_DISTRO != foxy">launch_pytest</test_depend>
<test_depend condition="$ROS_DISTRO != foxy">sensor_msgs_py</test_depend>
<test_depend condition="$ROS_DISTRO != foxy">python3-numpy</test_depend>
<test_depend condition="$ROS_DISTRO != foxy">python3-tqdm</test_depend>
<test_depend condition="$ROS_DISTRO != foxy">sensor_msgs_py</test_depend>
<test_depend condition="$ROS_DISTRO != foxy">python3-requests</test_depend>
<test_depend condition="$ROS_DISTRO != foxy">tf2_ros_py</test_depend>
<test_depend condition="$ROS_DISTRO != foxy">ros2topic</test_depend>
<exec_depend>launch_ros</exec_depend>
<build_depend>ros_environment</build_depend>
<export>
<build_type>ament_cmake</build_type>
</export>
</package>

View File

@@ -0,0 +1,52 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import os
import sys
import rclpy
from rclpy.node import Node
from rclpy import qos
from realsense2_camera_msgs.msg import Metadata
import json
def metadata_cb(msg):
aa = json.loads(msg.json_data)
os.system('clear')
print('header:\nstamp:\n secs:', msg.header.stamp.sec, '\n nsecs:', msg.header.stamp.nanosec)
print('\n'.join(['%10s:%-10s' % (key, str(value)) for key, value in aa.items()]))
def main():
if len(sys.argv) < 2 or '--help' in sys.argv or '/?' in sys.argv:
print ('USAGE:')
print('echo_metadata.py <topic>')
print('Demo for listening on given metadata topic.')
print('App subscribes on given topic')
print('App then prints metadata from messages')
print('')
print('Example: echo_metadata.py /camera/depth/metadata')
print('')
exit(-1)
topic = sys.argv[1]
rclpy.init()
node = Node('metadata_tester')
depth_sub = node.create_subscription(Metadata, topic, metadata_cb, qos.qos_profile_sensor_data)
rclpy.spin(node)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,253 @@
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019 Event-driven Perception for Robotics
Authors: Sim Bamford
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <https://www.gnu.org/licenses/>.
Unpacks a rosbag into its topics and messages
Uses the topic types to interpret the messages from each topic,
yielding dicts for each topic containing iterables for each field.
By default unpacks all topics, but you can use any of the following keyword
params to limit which topics are intepretted:
- 'listTopics' = True - no unpacking - just returns a list of the topics contained in
the file and their associated types
- 'importTopics' = <list of strings> - only imports the listed topics
- 'importTypes' = <list of strings> - only imports the listed types
Message types supported are strictly those listed in the initial imports section
of this file. There are a selection of standard message types and a couple
related to event-based sensors.
The method of importation is honed to the particular needs of
the author, sometimes ignoring certain fields, grouping data in particular ways
etc. However it should serve as a model for anyone who wishes to import rosbags.
Although it's possible to import messages programmatically given only the
message definition files, we have chosen not to do this, because if we did it
we would anyway need to take the resulting data and pick out the bits we wanted.
"""
#%%
from struct import unpack
from struct import error as structError
from tqdm import tqdm
# Local imports
from .messageTypes.common import unpackHeader
from .messageTypes.dvs_msgs_EventArray import importTopic as import_dvs_msgs_EventArray
from .messageTypes.esim_msgs_OpticFlow import importTopic as import_esim_msgs_OpticFlow
from .messageTypes.geometry_msgs_PoseStamped import importTopic as import_geometry_msgs_PoseStamped
from .messageTypes.geometry_msgs_Transform import importTopic as import_geometry_msgs_Transform
from .messageTypes.geometry_msgs_TransformStamped import importTopic as import_geometry_msgs_TransformStamped
from .messageTypes.geometry_msgs_TwistStamped import importTopic as import_geometry_msgs_TwistStamped
from .messageTypes.sensor_msgs_CameraInfo import importTopic as import_sensor_msgs_CameraInfo
from .messageTypes.sensor_msgs_Image import importTopic as import_sensor_msgs_Image
from .messageTypes.sensor_msgs_Imu import importTopic as import_sensor_msgs_Imu
from .messageTypes.sensor_msgs_PointCloud2 import importTopic as import_sensor_msgs_PointCloud2
from .messageTypes.tf_tfMessage import importTopic as import_tf_tfMessage
import logging
def importTopic(topic, **kwargs):
msgs = topic['msgs']
topicType = topic['type'].replace('/','_')
if topicType == 'dvs_msgs_EventArray': topicDict = import_dvs_msgs_EventArray(msgs, **kwargs)
elif topicType == 'esim_msgs_OpticFlow': topicDict = import_esim_msgs_OpticFlow(msgs, **kwargs)
elif topicType == 'geometry_msgs_PoseStamped': topicDict = import_geometry_msgs_PoseStamped(msgs, **kwargs)
elif topicType == 'geometry_msgs_Transform': topicDict = import_geometry_msgs_Transform(msgs, **kwargs)
elif topicType == 'geometry_msgs_TransformStamped': topicDict = import_geometry_msgs_TransformStamped(msgs, **kwargs)
elif topicType == 'geometry_msgs_TwistStamped': topicDict = import_geometry_msgs_TwistStamped(msgs, **kwargs)
elif topicType == 'sensor_msgs_CameraInfo': topicDict = import_sensor_msgs_CameraInfo(msgs, **kwargs)
elif topicType == 'sensor_msgs_Image': topicDict = import_sensor_msgs_Image(msgs, **kwargs)
elif topicType == 'sensor_msgs_Imu': topicDict = import_sensor_msgs_Imu(msgs, **kwargs)
elif topicType == 'sensor_msgs_PointCloud2': topicDict = import_sensor_msgs_PointCloud2(msgs, **kwargs)
elif topicType == 'tf_tfMessage': topicDict = import_tf_tfMessage(msgs, **kwargs)
else:
return None
if topicDict:
topicDict['rosbagType'] = topic['type']
return topicDict
def readFile(filePathOrName):
global disable_bar
logging.info('Attempting to import ' + filePathOrName + ' as a rosbag 2.0 file.')
with open(filePathOrName, 'rb') as file:
# File format string
fileFormatString = file.readline().decode("utf-8")
logging.info('ROSBAG file format: ' + fileFormatString)
if fileFormatString != '#ROSBAG V2.0\n':
logging.error('This file format (%s) might not be supported' % fileFormatString)
eof = False
conns = []
chunks = []
while not eof:
# Read a record header
try:
headerLen = unpack('=l', file.read(4))[0]
except structError:
if len(file.read(1)) == 0: # Distinguish EOF from other struct errors
# a struct error could also occur if the data is downloaded by one os and read by another.
eof = True
continue
# unpack the header into fields
headerBytes = file.read(headerLen)
fields = unpackHeader(headerLen, headerBytes)
# Read the record data
dataLen = unpack('=l', file.read(4))[0]
data = file.read(dataLen)
# The op code tells us what to do with the record
op = unpack('=b', fields['op'])[0]
fields['op'] = op
if op == 2:
# It's a message
# AFAIK these are not found unpacked in the file
#fields['data'] = data
#msgs.append(fields)
pass
elif op == 3:
# It's a bag header - use this to do progress bar for the read
chunkCount = unpack('=l', fields['chunk_count'])[0]
pbar = tqdm(total=chunkCount, position=0, leave=True, disable=disable_bar)
elif op == 4:
# It's an index - this is used to index the previous chunk
conn = unpack('=l', fields['conn'])[0]
count = unpack('=l', fields['count'])[0]
for idx in range(count):
time, offset = unpack('=ql', data[idx*12:idx*12+12])
chunks[-1]['ids'].append((conn, time, offset))
elif op == 5:
# It's a chunk
fields['data'] = data
fields['ids'] = []
chunks.append(fields)
pbar.update(len(chunks))
elif op == 6:
# It's a chunk-info - seems to be redundant
pass
elif op == 7:
# It's a conn
# interpret data as a string containing the connection header
connFields = unpackHeader(dataLen, data)
connFields.update(fields)
connFields['conn'] = unpack('=l', connFields['conn'])[0]
connFields['topic'] = connFields['topic'].decode("utf-8")
connFields['type'] = connFields['type'].decode("utf-8").replace('/', '_')
conns.append(connFields)
return conns, chunks
#%% Break chunks into msgs
def breakChunksIntoMsgs(chunks):
global disable_bar
msgs = []
logging.debug('Breaking chunks into msgs ...')
for chunk in tqdm(chunks, position=0, leave=True, disable=disable_bar):
for idx in chunk['ids']:
ptr = idx[2]
headerLen = unpack('=l', chunk['data'][ptr:ptr+4])[0]
ptr += 4
# unpack the header into fields
headerBytes = chunk['data'][ptr:ptr+headerLen]
ptr += headerLen
fields = unpackHeader(headerLen, headerBytes)
# Read the record data
dataLen = unpack('=l', chunk['data'][ptr:ptr+4])[0]
ptr += 4
fields['data'] = chunk['data'][ptr:ptr+dataLen]
fields['conn'] = unpack('=l', fields['conn'])[0]
msgs.append(fields)
return msgs
def rekeyConnsByTopic(connDict):
topics = {}
for conn in connDict:
topics[connDict[conn]['topic']] = connDict[conn]
return topics
def importRosbag(filePathOrName, **kwargs):
global disable_bar
disable_bar = kwargs.get('disable_bar')
loglevel = kwargs.get('log')
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.getLogger().setLevel(numeric_level)
logging.info('Importing file: ' + filePathOrName)
conns, chunks = readFile(filePathOrName)
# Restructure conns as a dictionary keyed by conn number
connDict = {}
for conn in conns:
connDict[conn['conn']] = conn
conn['msgs'] = []
if kwargs.get('listTopics', False):
topics = rekeyConnsByTopic(connDict)
logging.info('Topics in the file are (with types):')
for topicKey, topic in topics.items():
del topic['conn']
del topic['md5sum']
del topic['msgs']
del topic['op']
del topic['topic']
topic['message_definition'] = topic['message_definition'].decode("utf-8")
logging.info(' ' + topicKey + ' --- ' + topic['type'])
return topics
msgs = breakChunksIntoMsgs(chunks)
for msg in msgs:
connDict[msg['conn']]['msgs'].append(msg)
topics = rekeyConnsByTopic(connDict)
importedTopics = {}
importTopics = kwargs.get('importTopics')
importTypes = kwargs.get('importTypes')
if importTopics is not None:
for topicToImport in importTopics:
for topicInFile in list(topics.keys()):
if topicInFile == topicToImport:
importedTopic = importTopic(topics[topicInFile], **kwargs)
if importedTopic is not None:
importedTopics[topicToImport] = importedTopic
del topics[topicInFile]
elif importTypes is not None:
for typeToImport in importTypes:
typeToImport = typeToImport.replace('/', '_')
for topicInFile in list(topics.keys()):
if topics[topicInFile]['type'].replace('/', '_') == typeToImport:
importedTopic = importTopic(topics[topicInFile], **kwargs)
if importedTopic is not None:
importedTopics[topicInFile] = importedTopic
del topics[topicInFile]
else: # import everything
for topicInFile in list(topics.keys()):
importedTopic = importTopic(topics[topicInFile], **kwargs)
if importedTopic is not None:
importedTopics[topicInFile] = importedTopic
del topics[topicInFile]
logging.info('')
if importedTopics:
logging.info('Topics imported are:')
for topic in importedTopics.keys():
logging.info(topic + ' --- ' + importedTopics[topic]['rosbagType'])
#del importedTopics[topic]['rosbagType']
logging.info('')
if topics:
logging.info('Topics not imported are:')
for topic in topics.keys():
logging.info(topic + ' --- ' + topics[topic]['type'])
logging.info('')
return importedTopics

View File

@@ -0,0 +1,68 @@
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019 Event-driven Perception for Robotics
Authors: Sim Bamford
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <https://www.gnu.org/licenses/>.
Intended as part of importRosbag.
"""
#%%
from struct import unpack
import numpy as np
def unpackHeader(headerLen, headerBytes):
fields = {}
ptr = 0
while ptr < headerLen:
fieldLen = unpack('=l', headerBytes[ptr:ptr+4])[0]
ptr += 4
#print(fieldLen)
field = headerBytes[ptr:ptr+fieldLen]
ptr += fieldLen
#print(field)
fieldSplit = field.find(b'\x3d')
fieldName = field[:fieldSplit].decode("utf-8")
fieldValue = field[fieldSplit+1:]
fields[fieldName] = fieldValue
return fields
def unpackRosUint32(data, ptr):
return unpack('=L', data[ptr:ptr+4])[0], ptr+4
def unpackRosUint8(data, ptr):
return unpack('=B', data[ptr:ptr+1])[0], ptr+1
def unpackRosString(data, ptr):
stringLen = unpack('=L', data[ptr:ptr+4])[0]
ptr += 4
try:
outStr = data[ptr:ptr+stringLen].decode('utf-8')
except UnicodeDecodeError:
outStr = 'UnicodeDecodeError'
ptr += stringLen
return outStr, ptr
def unpackRosFloat64Array(data, num, ptr):
return np.frombuffer(data[ptr:ptr+num*8], dtype=np.float64), ptr+num*8
def unpackRosFloat32Array(data, num, ptr):
return np.frombuffer(data[ptr:ptr+num*4], dtype=np.float32), ptr+num*4
def unpackRosFloat32(data, ptr):
return unpack('=f', data[ptr:ptr+4])[0], ptr+4
def unpackRosTimestamp(data, ptr):
timeS, timeNs = unpack('=LL', data[ptr:ptr+8])
timeFloat = np.float64(timeS)+np.float64(timeNs)*0.000000001
return timeFloat, ptr+8

View File

@@ -0,0 +1,75 @@
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019 Event-driven Perception for Robotics
Authors: Sim Bamford
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <https://www.gnu.org/licenses/>.
Intended as part of importRosbag.
The importTopic function receives a list of messages and returns
a dict with one field for each data field in the message, where the field
will contain an appropriate iterable to contain the interpretted contents of each message.
In some cases, static info is repeated in each message; in which case a field may not contain an iterable.
This function imports the ros message type defined at:
https://github.com/uzh-rpg/rpg_dvs_ros/tree/master/dvs_msgs/msg
"""
#%%
from tqdm import tqdm
import numpy as np
# local imports
from .common import unpackRosString, unpackRosUint32
def importTopic(msgs, **kwargs):
disable_bar = kwargs.get('disable_bar')
tsByMessage = []
xByMessage = []
yByMessage = []
polByMessage = []
for msg in tqdm(msgs, position=0, leave=True, disable=disable_bar):
# TODO: maybe implement kwargs['useRosMsgTimestamps']
data = msg['data']
#seq = unpack('=L', data[0:4])[0]
#timeS, timeNs = unpack('=LL', data[4:12])
frame_id, ptr = unpackRosString(data, 12)
height, ptr = unpackRosUint32(data, ptr)
width, ptr = unpackRosUint32(data, ptr)
numEventsInMsg, ptr = unpackRosUint32(data, ptr)
# The format of the event is x=Uint16, y=Uint16, ts = Uint32, tns (nano seconds) = Uint32, pol=Bool
# Unpack in batch into uint8 and then compose
dataAsArray = np.frombuffer(data[ptr:ptr+numEventsInMsg*13], dtype=np.uint8)
dataAsArray = dataAsArray.reshape((-1, 13), order='C')
# Assuming big-endian
xByMessage.append((dataAsArray[:, 0] + dataAsArray[:, 1] * 2**8).astype(np.uint16))
yByMessage.append((dataAsArray[:, 2] + dataAsArray[:, 3] * 2**8).astype(np.uint16))
ts = ((dataAsArray[:, 4] + \
dataAsArray[:, 5] * 2**8 + \
dataAsArray[:, 6] * 2**16 + \
dataAsArray[:, 7] * 2**24 ).astype(np.float64))
tns = ((dataAsArray[:, 8] + \
dataAsArray[:, 9] * 2**8 + \
dataAsArray[:, 10] * 2**16 + \
dataAsArray[:, 11] * 2**24).astype(np.float64))
tsByMessage.append(ts + tns / 1000000000) # Combine timestamp parts, result is in seconds
polByMessage.append(dataAsArray[:, 12].astype(np.bool))
outDict = {
'x': np.concatenate(xByMessage),
'y': np.concatenate(yByMessage),
'ts': np.concatenate(tsByMessage),
'pol': np.concatenate(polByMessage),
'dimX': width,
'dimY': height}
return outDict

View File

@@ -0,0 +1,69 @@
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019 Event-driven Perception for Robotics
Authors: Sim Bamford
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <https://www.gnu.org/licenses/>.
Intended as part of importRosbag.
The importRosbag function receives a list of messages and returns
a dict with one field for each data field in the message, where the field
will contain an appropriate iterable to contain the interpretted contents of each message.
This function imports the ros message type defined at:
https://github.com/uzh-rpg/rpg_esim/blob/master/event_camera_simulator/esim_msgs/msg/OpticFlow.msg
"""
#%%
from tqdm import tqdm
import numpy as np
from .common import unpackRosFloat32Array, unpackRosUint32, \
unpackRosTimestamp, unpackRosString
def importTopic(msgs, **kwargs):
disable_bar = kwargs.get('disable_bar')
tsAll = []
flowMaps = []
for msg in tqdm(msgs, disable=disable_bar):
data = msg['data']
ptr = 0
seq, ptr = unpackRosUint32(data, ptr) # Not used
ts, ptr = unpackRosTimestamp(data, ptr)
frame_id, ptr = unpackRosString(data, ptr) # Not used
height, ptr = unpackRosUint32(data, ptr)
width, ptr = unpackRosUint32(data, ptr)
if width > 0 and height > 0:
arraySize, ptr = unpackRosUint32(data, ptr)
#assert arraySize == width*height
flowMapX, ptr = unpackRosFloat32Array(data, width*height, ptr)
arraySize, ptr = unpackRosUint32(data, ptr)
#assert arraySize == width*height
flowMapY, ptr = unpackRosFloat32Array(data, width*height, ptr)
flowMap = np.concatenate((flowMapX.reshape(height, width, 1),
flowMapY.reshape(height, width, 1)),
axis=2)
flowMaps.append(flowMap)
tsAll.append(ts)
if not tsAll:
return None
outDict = {
'ts': np.array(tsAll, dtype=np.float64),
'flowMaps': flowMaps,
}
return outDict

View File

@@ -0,0 +1,66 @@
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019 Event-driven Perception for Robotics
Authors: Sim Bamford
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <https://www.gnu.org/licenses/>.
Intended as part of importRosbag.
The importTopic function receives a list of messages and returns
a dict with one field for each data field in the message, where the field
will contain an appropriate iterable to contain the interpretted contents of each message.
In some cases, static info is repeated in each message; in which case a field may not contain an iterable.
This function imports the ros message type defined at:
http://docs.ros.org/melodic/api/geometry_msgs/html/msg/PoseStamped.html
The result is a ts plus a 7-column np array of np.float64,
where the cols are x, y, z, q-w, q-x, q-y, q-z, (i.e. quaternion orientation)
NOTE: QUATERNION ORDER GETS MODIFIED from xyzw to wxyz
"""
#%%
from tqdm import tqdm
import numpy as np
from .common import unpackRosString, unpackRosTimestamp, unpackRosFloat64Array
def importTopic(msgs, **kwargs):
#if 'Stamped' not in kwargs.get('messageType', 'Stamped'):
# return interpretMsgsAsPose6qAlt(msgs, **kwargs)
disable_bar = kwargs.get('disable_bar')
sizeOfArray = 1024
tsAll = np.zeros((sizeOfArray), dtype=np.float64)
poseAll = np.zeros((sizeOfArray, 7), dtype=np.float64)
for idx, msg in enumerate(tqdm(msgs, position=0, leave=True, disable=disable_bar)):
if sizeOfArray <= idx:
tsAll = np.append(tsAll, np.zeros((sizeOfArray), dtype=np.float64))
poseAll = np.concatenate((poseAll, np.zeros((sizeOfArray, 7), dtype=np.float64)))
sizeOfArray *= 2
# TODO: maybe implement kwargs['useRosMsgTimestamps']
data = msg['data']
#seq = unpack('=L', data[0:4])[0]
tsAll[idx], ptr = unpackRosTimestamp(data, 4)
frame_id, ptr = unpackRosString(data, ptr)
poseAll[idx, :], _ = unpackRosFloat64Array(data, 7, ptr)
# Crop arrays to number of events
numEvents = idx + 1
tsAll = tsAll[:numEvents]
poseAll = poseAll[:numEvents]
point = poseAll[:, 0:3]
rotation = poseAll[:, [6, 3, 4, 5]] # Switch quaternion form from xyzw to wxyz
outDict = {
'ts': tsAll,
'point': point,
'rotation': rotation}
return outDict

View File

@@ -0,0 +1,64 @@
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019 Event-driven Perception for Robotics
Authors: Sim Bamford
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <https://www.gnu.org/licenses/>.
Intended as part of importRosbag.
The importTopic function receives a list of messages and returns
a dict with one field for each data field in the message, where the field
will contain an appropriate iterable to contain the interpretted contents of each message.
In some cases, static info is repeated in each message; in which case a field may not contain an iterable.
This function imports the ros message type defined at:
http://docs.ros.org/melodic/api/geometry_msgs/html/msg/Transform.html
"""
#%%
from tqdm import tqdm
import numpy as np
# Local imports
from .common import unpackRosTimestamp, unpackRosFloat64Array
def importTopic(msgs, **kwargs):
disable_bar = kwargs.get('disable_bar')
numEvents = 0
sizeOfArray = 1024
tsAll = np.zeros((sizeOfArray), dtype=np.float64)
poseAll = np.zeros((sizeOfArray, 7), dtype=np.float64)
for idx, msg in enumerate(tqdm(msgs, position=0, leave=True, disable=disable_bar)):
if sizeOfArray <= idx:
tsAll = np.append(tsAll, np.zeros((sizeOfArray), dtype=np.float64))
poseAll = np.concatenate((poseAll, np.zeros((sizeOfArray, 7), dtype=np.float64)))
sizeOfArray *= 2
# Note - ignoring kwargs['useRosMsgTimestamps'] as there is no choice
tsAll[idx], _ = unpackRosTimestamp(msg['time'], 0)
poseAll[idx, :], _ = unpackRosFloat64Array(msg['data'], 7, 0)
numEvents = idx + 1
# Crop arrays to number of events
tsAll = tsAll[:numEvents]
''' Needed?
from timestamps import zeroTimestampsForAChannel, rezeroTimestampsForImportedDicts, unwrapTimestamps
tsAll = unwrapTimestamps(tsAll) # here we could pass in wrapTime=2**62, but actually it handles this internally
'''
poseAll = poseAll[:numEvents]
point = poseAll[:, 0:3]
rotation = poseAll[:, [6, 3, 4, 5]] # Switch quaternion form from xyzw to wxyz
outDict = {
'ts': tsAll,
'point': point,
'rotation': rotation}
return outDict

View File

@@ -0,0 +1,68 @@
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019 Event-driven Perception for Robotics
Authors: Sim Bamford
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <https://www.gnu.org/licenses/>.
Intended as part of importRosbag.
The importTopic function receives a list of messages and returns
a dict with one field for each data field in the message, where the field
will contain an appropriate iterable to contain the interpretted contents of each message.
In some cases, static info is repeated in each message; in which case a field may not contain an iterable.
This function imports the ros message type defined at:
http://docs.ros.org/api/geometry_msgs/html/msg/TransformStamped.html
The result is a ts plus a 7-column np array of np.float64,
where the cols are x, y, z, q-w, q-x, q-y, q-z, (i.e. quaternion orientation)
NOTE: QUATERNION ORDER GETS MODIFIED from xyzw to wxyz
NOTE - this code is identical to geometry_msgs_PoseStamped
"""
#%%
from tqdm import tqdm
import numpy as np
from .common import unpackRosString, unpackRosTimestamp, unpackRosFloat64Array
def importTopic(msgs, **kwargs):
#if 'Stamped' not in kwargs.get('messageType', 'Stamped'):
# return interpretMsgsAsPose6qAlt(msgs, **kwargs)
disable_bar = kwargs.get('disable_bar')
sizeOfArray = 1024
tsAll = np.zeros((sizeOfArray), dtype=np.float64)
poseAll = np.zeros((sizeOfArray, 7), dtype=np.float64)
for idx, msg in enumerate(tqdm(msgs, position=0, leave=True, disable=disable_bar)):
if sizeOfArray <= idx:
tsAll = np.append(tsAll, np.zeros((sizeOfArray), dtype=np.float64))
poseAll = np.concatenate((poseAll, np.zeros((sizeOfArray, 7), dtype=np.float64)))
sizeOfArray *= 2
# TODO: maybe implement kwargs['useRosMsgTimestamps']
data = msg['data']
#seq = unpack('=L', data[0:4])[0]
tsAll[idx], ptr = unpackRosTimestamp(data, 4)
frame_id, ptr = unpackRosString(data, ptr)
poseAll[idx, :], _ = unpackRosFloat64Array(data, 7, ptr)
# Crop arrays to number of events
numEvents = idx + 1
tsAll = tsAll[:numEvents]
poseAll = poseAll[:numEvents]
point = poseAll[:, 0:3]
rotation = poseAll[:, [6, 3, 4, 5]] # Switch quaternion form from xyzw to wxyz
outDict = {
'ts': tsAll,
'point': point,
'rotation': rotation}
return outDict

View File

@@ -0,0 +1,60 @@
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019 Event-driven Perception for Robotics
Authors: Sim Bamford
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <https://www.gnu.org/licenses/>.
Intended as part of importRosbag.
The interpretMessages function receives a list of messages and returns
a dict with one field for each data field in the message, where the field
will contain an appropriate iterable to contain the interpretted contents of each message.
This function imports the ros message type defined at:
http://docs.ros.org/melodic/api/geometry_msgs/html/msg/TwistStamped.html
"""
#%%
from tqdm import tqdm
import numpy as np
from .common import unpackRosString, unpackRosTimestamp, unpackRosFloat64Array
def importTopic(msgs, **kwargs):
disable_bar = kwargs.get('disable_bar')
sizeOfArray = 1024
ts = np.zeros((sizeOfArray), dtype=np.float64)
linV = np.zeros((sizeOfArray, 3), dtype=np.float64)
angV = np.zeros((sizeOfArray, 3), dtype=np.float64)
for idx, msg in enumerate(tqdm(msgs, position=0, leave=True, disable=disable_bar)):
if sizeOfArray <= idx:
ts = np.append(ts, np.zeros((sizeOfArray), dtype=np.float64))
linV = np.concatenate((linV, np.zeros((sizeOfArray, 3), dtype=np.float64)))
angV = np.concatenate((angV, np.zeros((sizeOfArray, 3), dtype=np.float64)))
sizeOfArray *= 2
data = msg['data']
#seq = unpack('=L', data[0:4])[0]
ts[idx], ptr = unpackRosTimestamp(data, 4)
frame_id, ptr = unpackRosString(data, ptr)
linV[idx, :], ptr = unpackRosFloat64Array(data, 3, ptr)
angV[idx, :], _ = unpackRosFloat64Array(data, 3, ptr)
# Crop arrays to number of events
numEvents = idx + 1
ts = ts[:numEvents]
linV = linV[:numEvents]
angV = angV[:numEvents]
outDict = {
'ts': ts,
'linV': linV,
'angV': angV}
return outDict

View File

@@ -0,0 +1,67 @@
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019 Event-driven Perception for Robotics
Authors: Sim Bamford
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <https://www.gnu.org/licenses/>.
Intended as part of importRosbag.
The importTopic function receives a list of messages and returns
a dict with one field for each data field in the message, where the field
will contain an appropriate iterable to contain the interpretted contents of each message.
In some cases, static info is repeated in each message; in which case a field may not contain an iterable.
This function imports the ros message type defined at:
http://docs.ros.org/api/sensor_msgs/html/msg/CameraInfo.html
We assume that there will only be one camera_info msg per channel,
so the resulting dict is populated by the following fields:
std_msgs/Header header
uint32 height
uint32 width
string distortion_model
<following as numpy matrices of the appropriate dimensions>
float64[] D (distortion params)
float64[9] K (Intrinsic camera matrix)
float64[9] R (rectification matrix - only for stereo setup)
float64[12] P (projection matrix)
<ignoring the following for now:>
uint32 binning_x
uint32 binning_y
sensor_msgs/RegionOfInterest roi
"""
#%%
from .common import unpackRosString, unpackRosUint32, unpackRosFloat64Array
def importTopic(msgs, **kwargs):
outDict = {}
data = msgs[0]['data'] # There is one calibration msg per frame.
# Just use the first one
#seq = unpack('=L', data[0:4])[0]
#timeS, timeNs = unpack('=LL', data[4:12])
frame_id, ptr = unpackRosString(data, 12)
outDict['height'], ptr = unpackRosUint32(data, ptr)
outDict['width'], ptr = unpackRosUint32(data, ptr)
outDict['distortionModel'], ptr = unpackRosString(data, ptr)
numElementsInD, ptr = unpackRosUint32(data, ptr)
outDict['D'], ptr = unpackRosFloat64Array(data, numElementsInD, ptr)
outDict['K'], ptr = unpackRosFloat64Array(data, 9, ptr)
outDict['K'] = outDict['K'].reshape(3, 3)
outDict['R'], ptr = unpackRosFloat64Array(data, 9, ptr)
outDict['R'] = outDict['R'].reshape(3, 3)
outDict['P'], ptr = unpackRosFloat64Array(data, 12, ptr)
outDict['P'] = outDict['P'].reshape(3, 4)
# Ignore binning and ROI
return outDict

View File

@@ -0,0 +1,101 @@
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019 Event-driven Perception for Robotics
Authors: Sim Bamford
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <https://www.gnu.org/licenses/>.
Intended as part of importRosbag.
The importTopic function receives a list of messages and returns
a dict with one field for each data field in the message, where the field
will contain an appropriate iterable to contain the interpretted contents of each message.
In some cases, static info is repeated in each message; in which case a field may not contain an iterable.
This function imports the ros message type defined at:
http://docs.ros.org/api/sensor_msgs/html/msg/Image.html
"""
#%%
from tqdm import tqdm
import numpy as np
from .common import unpackRosString, unpackRosUint32, unpackRosUint8, unpackRosTimestamp
def importTopic(msgs, **kwargs):
'''
ros message is defined here:
http://docs.ros.org/api/sensor_msgs/html/msg/Image.html
the result is a ts plus a 2d array of samples ()
'''
disable_bar = kwargs.get('disable_bar')
sizeOfArray = 1024
tsAll = np.zeros((sizeOfArray), dtype=np.float64)
framesAll = []
for idx, msg in enumerate(tqdm(msgs, position=0, leave=True, disable=disable_bar)):
if sizeOfArray <= idx:
tsAll = np.append(tsAll, np.zeros((sizeOfArray), dtype=np.float64))
sizeOfArray *= 2
data = msg['data']
#seq = unpack('=L', data[0:4])[0]
if kwargs.get('useRosMsgTimestamps', False):
tsAll[idx], _ = unpackRosTimestamp(msg['time'], 0)
else:
tsAll[idx], _ = unpackRosTimestamp(data, 4)
frame_id, ptr = unpackRosString(data, 12)
height, ptr = unpackRosUint32(data, ptr)
width, ptr = unpackRosUint32(data, ptr)
fmtString, ptr = unpackRosString(data, ptr)
isBigendian, ptr = unpackRosUint8(data, ptr)
if isBigendian:
print('data is bigendian, but it doesn''t matter')
step, ptr = unpackRosUint32(data, ptr) # not used
arraySize, ptr = unpackRosUint32(data, ptr)
# assert arraySize == height*width
# The pain of writing this scetion will continue to increase until it
# matches this reference implementation:
# http://docs.ros.org/jade/api/sensor_msgs/html/image__encodings_8h_source.html
if fmtString in ['mono8', '8UC1']:
frameData = np.frombuffer(data[ptr:ptr+height*width],np.uint8)
depth = 1
elif fmtString in ['mono16', '16UC1']:
frameData = np.frombuffer(data[ptr:ptr+height*width*2],np.uint16)
depth = 1
elif fmtString in ['bgr8', 'rgb8']:
frameData = np.frombuffer(data[ptr:ptr+height*width*3],np.uint8)
depth = 3
elif fmtString in ['bgra8', 'rgba8']:
frameData = np.frombuffer(data[ptr:ptr+height*width*4],np.uint8)
depth = 4
elif fmtString == '16SC1':
frameData = np.frombuffer(data[ptr:ptr+height*width*2],np.int16)
depth = 1
elif fmtString == '32FC1':
frameData = np.frombuffer(data[ptr:ptr+height*width*4],np.float32)
depth = 1
else:
print('image format not supported:' + fmtString)
return None
if depth > 1:
frameData = frameData.reshape(height, width, depth)
else:
frameData = frameData.reshape(height, width)
framesAll.append(frameData)
numEvents = idx + 1
# Crop arrays to number of events
tsAll = tsAll[:numEvents]
outDict = {
'ts': tsAll,
'frames': framesAll}
return outDict

View File

@@ -0,0 +1,86 @@
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019 Event-driven Perception for Robotics
Authors: Sim Bamford
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <https://www.gnu.org/licenses/>.
Intended as part of importRosbag.
The importTopic function receives a list of messages and returns
a dict with one field for each data field in the message, where the field
will contain an appropriate iterable to contain the interpretted contents of each message.
In some cases, static info is repeated in each message; in which case a field may not contain an iterable.
This function imports the ros message type defined at:
http://docs.ros.org/api/sensor_msgs/html/msg/Imu.html
"""
#%%
from tqdm import tqdm
import numpy as np
from .common import unpackRosString, unpackRosTimestamp, unpackRosFloat64Array
def importTopic(msgs, **kwargs):
'''
ros message is defined here:
http://docs.ros.org/api/geometry_msgs/html/msg/PoseStamped.html
the result is are np arrays of float64 for:
rotQ (4 cols, quaternion)
angV (3 cols)
acc (3 cols)
mag (3 cols)
temp (1 cols) - but I'll probably ignore this to start with
'''
disable_bar = kwargs.get('disable_bar')
sizeOfArray = 1024
tsAll = np.zeros((sizeOfArray), dtype=np.float64)
rotQAll = np.zeros((sizeOfArray, 4), dtype=np.float64)
angVAll = np.zeros((sizeOfArray, 3), dtype=np.float64)
accAll = np.zeros((sizeOfArray, 3), dtype=np.float64)
magAll = np.zeros((sizeOfArray, 3), dtype=np.float64)
#tempAll = np.zeros((sizeOfArray, 1), dtype=np.float64)
for idx, msg in enumerate(tqdm(msgs, position=0, leave=True, disable=disable_bar)):
if sizeOfArray <= idx:
tsAll = np.append(tsAll, np.zeros((sizeOfArray), dtype=np.float64))
rotQAll = np.concatenate((rotQAll, np.zeros((sizeOfArray, 4), dtype=np.float64)))
angVAll = np.concatenate((angVAll, np.zeros((sizeOfArray, 3), dtype=np.float64)))
accAll = np.concatenate((accAll, np.zeros((sizeOfArray, 3), dtype=np.float64)))
magAll = np.concatenate((magAll, np.zeros((sizeOfArray, 3), dtype=np.float64)))
sizeOfArray *= 2
# TODO: maybe implement kwargs['useRosMsgTimestamps']
data = msg['data']
#seq = unpack('=L', data[0:4])[0]
tsAll[idx], ptr = unpackRosTimestamp(data, 4)
frame_id, ptr = unpackRosString(data, ptr)
rotQAll[idx, :], ptr = unpackRosFloat64Array(data, 4, ptr)
ptr += 72 # Skip the covariance matrix
angVAll[idx, :], ptr = unpackRosFloat64Array(data, 3, ptr)
ptr += 72 # Skip the covariance matrix
accAll[idx, :], ptr = unpackRosFloat64Array(data, 3, ptr)
#ptr += 24
#ptr += 72 # Skip the covariance matrix
numEvents = idx + 1
# Crop arrays to number of events
tsAll = tsAll[:numEvents]
rotQAll = rotQAll[:numEvents]
angVAll = angVAll[:numEvents]
accAll = accAll[:numEvents]
magAll = magAll[:numEvents]
outDict = {
'ts': tsAll,
'rotQ': rotQAll,
'angV': angVAll,
'acc': accAll,
'mag': magAll
}
return outDict

View File

@@ -0,0 +1,100 @@
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019 Event-driven Perception for Robotics
Authors: Sim Bamford
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <https://www.gnu.org/licenses/>.
Intended as part of importRosbag.
The importTopic function receives a list of messages and returns
a dict with one field for each data field in the message, where the field
will contain an appropriate iterable to contain the interpretted contents of each message.
In some cases, static info is repeated in each message; in which case a field may not contain an iterable.
This function imports the ros message type defined at:
http://docs.ros.org/api/sensor_msgs/html/msg/PointCloud2.html
For simplicity, we're currently directly unpacking the format that we are
encountering in the data, which is x,y,z,_,rgb,_,_,_
each as 32-bit little-endian floats
"""
#%%
from tqdm import tqdm
import numpy as np
from .common import unpackRosString, unpackRosUint8, unpackRosUint32, \
unpackRosTimestamp
def importTopic(msgs, **kwargs):
'''
ros message is defined here:
http://docs.ros.org/api/geometry_msgs/html/msg/PoseStamped.html
the result is are np arrays of float64 for:
rotQ (4 cols, quaternion)
angV (3 cols)
acc (3 cols)
mag (3 cols)
temp (1 cols) - but I'll probably ignore this to start with
'''
#tempAll = np.zeros((sizeOfArray, 1), dtype=np.float64)
#for msg in tqdm(msgs, position=0, leave=True):
disable_bar = kwargs.get('disable_bar')
tsByMessage = []
pointsByMessage = []
for msg in tqdm(msgs, disable=disable_bar):
data = msg['data']
ptr = 0
seq, ptr = unpackRosUint32(data, ptr)
ts, ptr = unpackRosTimestamp(data, ptr)
frame_id, ptr = unpackRosString(data, ptr)
height, ptr = unpackRosUint32(data, ptr)
width, ptr = unpackRosUint32(data, ptr)
if width > 0 and height > 0:
arraySize, ptr = unpackRosUint32(data, ptr)
for element in range(arraySize):
# Move through the field definitions - we'll ignore these
# until we encounter a file that uses a different set
name, ptr = unpackRosString(data, ptr)
offset, ptr = unpackRosUint32(data, ptr)
datatype, ptr = unpackRosUint8(data, ptr)
count, ptr = unpackRosUint32(data, ptr)
isBigendian, ptr = unpackRosUint8(data, ptr)
pointStep, ptr = unpackRosUint32(data, ptr)
rowStep, ptr = unpackRosUint32(data, ptr)
numPoints = width * height
points = np.empty((numPoints, 3), dtype=np.float32)
arraySize, ptr = unpackRosUint32(data, ptr)
# assert arraySize = width*height
for x in range(width):
for y in range(height):
points[x*height + y, :] = np.frombuffer(data[ptr:ptr+12], dtype=np.float32)
ptr += pointStep
pointsByMessage.append(points)
tsByMessage.append(np.ones((numPoints), dtype=np.float64) * ts)
if not pointsByMessage: # None of the messages contained any points
return None
points = np.concatenate(pointsByMessage)
ts = np.concatenate(tsByMessage)
# Crop arrays to number of events
outDict = {
'ts': ts,
'point': points,
}
return outDict

View File

@@ -0,0 +1,81 @@
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019 Event-driven Perception for Robotics
Authors: Sim Bamford
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <https://www.gnu.org/licenses/>.
Intended as part of importRosbag.
The importTopic function receives a list of messages and returns
a dict with one field for each data field in the message, where the field
will contain an appropriate iterable to contain the interpretted contents of each message.
In some cases, static info is repeated in each message; in which case a field may not contain an iterable.
This function imports the ros message type defined at:
http://docs.ros.org/api/tf/html/msg/tfMessage.html
Each message contains an array of transform_stamped messages
The result is a ts plus a 7-column np array of np.float64,
where the cols are x, y, z, q-w, q-x, q-y, q-z, (i.e. quaternion orientation)
NOTE: QUATERNION ORDER GETS MODIFIED from xyzw to wxyz
NOTE - this code is similar to geometry_msgs_TransformStamped
"""
#%%
from tqdm import tqdm
import numpy as np
from .common import unpackRosString, unpackRosTimestamp, \
unpackRosFloat64Array, unpackRosUint32
def importTopic(msgs, **kwargs):
#if 'Stamped' not in kwargs.get('messageType', 'Stamped'):
# return interpretMsgsAsPose6qAlt(msgs, **kwargs)
disable_bar = kwargs.get('disable_bar')
sizeOfArray = 1024
tsAll = np.zeros((sizeOfArray), dtype=np.float64)
poseAll = np.zeros((sizeOfArray, 7), dtype=np.float64)
frameIdAll = []
childFrameIdAll = []
idx = 0
for msg in tqdm(msgs, position=0, leave=True, disable=disable_bar):
data = msg['data']
numTfInMsg, ptr = unpackRosUint32(data, 0)
for tfIdx in range(numTfInMsg):
while sizeOfArray <= idx + numTfInMsg:
tsAll = np.append(tsAll, np.zeros((sizeOfArray), dtype=np.float64))
poseAll = np.concatenate((poseAll, np.zeros((sizeOfArray, 7), dtype=np.float64)))
sizeOfArray *= 2
seq, ptr = unpackRosUint32(data, ptr)
tsAll[idx], ptr = unpackRosTimestamp(data, ptr)
frame_id, ptr = unpackRosString(data, ptr)
frameIdAll.append(frame_id)
child_frame_id, ptr = unpackRosString(data, ptr)
childFrameIdAll.append(child_frame_id)
poseAll[idx, :], ptr = unpackRosFloat64Array(data, 7, ptr)
idx += 1
# Crop arrays to number of events
numEvents = idx
tsAll = tsAll[:numEvents]
poseAll = poseAll[:numEvents]
point = poseAll[:, 0:3]
rotation = poseAll[:, [6, 3, 4, 5]] # Switch quaternion form from xyzw to wxyz
outDict = {
'ts': tsAll,
'point': point,
'rotation': rotation,
'frameId': np.array(frameIdAll, dtype='object'),
'childFrameId': np.array(childFrameIdAll, dtype='object')}
return outDict

View File

@@ -0,0 +1,311 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
import rclpy
from rclpy.node import Node
from rclpy import qos
from sensor_msgs.msg import Image as msg_Image
import numpy as np
import ctypes
import struct
import quaternion
import tf2_ros
from sensor_msgs.msg import PointCloud2 as msg_PointCloud2
from sensor_msgs_py import point_cloud2 as pc2
from sensor_msgs.msg import Imu as msg_Imu
try:
from theora_image_transport.msg import Packet as msg_theora
except Exception:
pass
def pc2_to_xyzrgb(point):
point = list(point)
# Thanks to Panos for his code used in this function.
x, y, z = point[:3]
rgb = point[3]
# cast float32 to int so that bitwise operations are possible
s = struct.pack('>f', rgb)
i = struct.unpack('>l', s)[0]
# you can get back the float value by the inverse operations
pack = ctypes.c_uint32(i).value
r = (pack & 0x00FF0000) >> 16
g = (pack & 0x0000FF00) >> 8
b = (pack & 0x000000FF)
return x, y, z, r, g, b
def image_msg_to_numpy(data):
fmtString = data.encoding
if fmtString in ['mono8', '8UC1', 'bgr8', 'rgb8', 'bgra8', 'rgba8']:
img = np.frombuffer(data.data, np.uint8)
elif fmtString in ['mono16', '16UC1', '16SC1']:
img = np.frombuffer(data.data, np.uint16)
elif fmtString == '32FC1':
img = np.frombuffer(data.data, np.float32)
else:
print('image format not supported:' + fmtString)
return None
depth = data.step / (data.width * img.dtype.itemsize)
if depth > 1:
img = img.reshape(data.height, data.width, int(depth))
else:
img = img.reshape(data.height, data.width)
return img
class CWaitForMessage:
def __init__(self, params={}):
self.result = None
self.break_timeout = False
self.timeout = params.get('timeout_secs', -1)
self.time = params.get('time', None)
self.node_name = params.get('node_name', 'rs2_listener')
self.listener = None
self.prev_msg_time = 0
self.fout = None
print ('connect to ROS with name: %s' % self.node_name)
self.themes = {'depthStream': {'topic': '/camera/depth/image_rect_raw', 'callback': self.imageColorCallback, 'msg_type': msg_Image},
'colorStream': {'topic': '/camera/color/image_raw', 'callback': self.imageColorCallback, 'msg_type': msg_Image},
#'pointscloud': {'topic': '/camera/depth/color/points', 'callback': self.pointscloudCallback, 'msg_type': msg_PointCloud2},
'alignedDepthInfra1': {'topic': '/camera/aligned_depth_to_infra1/image_raw', 'callback': self.imageColorCallback, 'msg_type': msg_Image},
'alignedDepthColor': {'topic': '/camera/aligned_depth_to_color/image_raw', 'callback': self.imageColorCallback, 'msg_type': msg_Image},
'static_tf': {'topic': '/camera/color/image_raw', 'callback': self.imageColorCallback, 'msg_type': msg_Image},
'accelStream': {'topic': '/camera/accel/sample', 'callback': self.imuCallback, 'msg_type': msg_Imu},
}
self.func_data = dict()
def imuCallback(self, theme_name):
def _imuCallback(data):
self.prev_time = time.time()
self.func_data[theme_name].setdefault('value', [])
self.func_data[theme_name].setdefault('ros_value', [])
try:
frame_id = data.header.frame_id
value = data.linear_acceleration
self.func_data[theme_name]['value'].append(value)
if (self.tfBuffer.can_transform('camera_link', frame_id, rclpy.time.Time(), rclpy.time.Duration(nanoseconds=3e6))):
msg = self.tfBuffer.lookup_transform('camera_link', frame_id, rclpy.time.Time(), rclpy.time.Duration(nanoseconds=3e6)).transform
quat = np.quaternion(msg.rotation.x, msg.rotation.y, msg.rotation.z, msg.rotation.w)
point = np.matrix([value.x, value.y, value.z], dtype='float32')
point.resize((3, 1))
rotated = quaternion.as_rotation_matrix(quat) * point
rotated.resize(1,3)
self.func_data[theme_name]['ros_value'].append(rotated)
except Exception as e:
print(e)
return
return _imuCallback
def imageColorCallback(self, theme_name):
def _imageColorCallback(data):
self.prev_time = time.time()
self.func_data[theme_name].setdefault('avg', [])
self.func_data[theme_name].setdefault('ok_percent', [])
self.func_data[theme_name].setdefault('num_channels', [])
self.func_data[theme_name].setdefault('shape', [])
self.func_data[theme_name].setdefault('reported_size', [])
pyimg = image_msg_to_numpy(data)
channels = pyimg.shape[2] if len(pyimg.shape) > 2 else 1
ok_number = (pyimg != 0).sum()
self.func_data[theme_name]['avg'].append(pyimg.sum() / ok_number)
self.func_data[theme_name]['ok_percent'].append(float(ok_number) / (pyimg.shape[0] * pyimg.shape[1]) / channels)
self.func_data[theme_name]['num_channels'].append(channels)
self.func_data[theme_name]['shape'].append(pyimg.shape)
self.func_data[theme_name]['reported_size'].append((data.width, data.height, data.step))
return _imageColorCallback
def imageDepthCallback(self, data):
pass
def pointscloudCallback(self, theme_name):
def _pointscloudCallback(data):
self.prev_time = time.time()
print ('Got pointcloud: %d, %d' % (data.width, data.height))
self.func_data[theme_name].setdefault('frame_counter', 0)
self.func_data[theme_name].setdefault('avg', [])
self.func_data[theme_name].setdefault('size', [])
self.func_data[theme_name].setdefault('width', [])
self.func_data[theme_name].setdefault('height', [])
# until parsing pointcloud is done in real time, I'll use only the first frame.
self.func_data[theme_name]['frame_counter'] += 1
if self.func_data[theme_name]['frame_counter'] == 1:
# Known issue - 1st pointcloud published has invalid texture. Skip 1st frame.
return
try:
points = np.array([pc2_to_xyzrgb(pp) for pp in pc2.read_points(data, skip_nans=True, field_names=("x", "y", "z", "rgb")) if pp[0] > 0])
except Exception as e:
print(e)
return
self.func_data[theme_name]['avg'].append(points.mean(0))
self.func_data[theme_name]['size'].append(len(points))
self.func_data[theme_name]['width'].append(data.width)
self.func_data[theme_name]['height'].append(data.height)
return _pointscloudCallback
def wait_for_message(self, params, msg_type=msg_Image):
topic = params['topic']
print ('connect to ROS with name: %s' % self.node_name)
rclpy.init()
node = Node(self.node_name)
out_filename = params.get('filename', None)
if (out_filename):
self.fout = open(out_filename, 'w')
if msg_type is msg_Imu:
col_w = 20
print ('Writing to file: %s' % out_filename)
columns = ['frame_number', 'frame_time(sec)', 'accel.x', 'accel.y', 'accel.z', 'gyro.x', 'gyro.y', 'gyro.z']
line = ('{:<%d}'*len(columns) % (col_w, col_w, col_w, col_w, col_w, col_w, col_w, col_w)).format(*columns) + '\n'
sys.stdout.write(line)
self.fout.write(line)
node.get_logger().info('Subscribing on topic: %s' % topic)
sub = node.create_subscription(msg_type, topic, self.callback, qos.qos_profile_sensor_data)
self.prev_time = time.time()
break_timeout = False
while not any([(not rclpy.ok()), break_timeout, self.result]):
rclpy.spin_once(node)
if self.timeout > 0 and time.time() - self.prev_time > self.timeout:
break_timeout = True
node.destroy_subscription(sub)
return self.result
@staticmethod
def unregister_all(node, registers):
for test_name in registers:
node.get_logger().info('Un-Subscribing test %s' % test_name)
node.destroy_subscription(registers[test_name]['sub'])
registers[test_name]['sub'] = None # unregisters.
def wait_for_messages(self, themes):
# tests_params = {<name>: {'callback', 'topic', 'msg_type', 'internal_params'}}
self.func_data = dict([[theme_name, {}] for theme_name in themes])
node = Node('wait_for_messages')
for theme_name in themes:
theme = self.themes[theme_name]
node.get_logger().info('Subscribing %s on topic: %s' % (theme_name, theme['topic']))
self.func_data[theme_name]['sub'] = node.create_subscription(theme['msg_type'], theme['topic'], theme['callback'](theme_name), qos.qos_profile_sensor_data)
self.tfBuffer = tf2_ros.Buffer()
self.tf_listener = tf2_ros.TransformListener(self.tfBuffer, node)
self.prev_time = time.time()
break_timeout = False
while not break_timeout:
rclpy.spin_once(node, timeout_sec=1)
if self.timeout > 0 and time.time() - self.prev_time > self.timeout:
break_timeout = True
self.unregister_all(node, self.func_data)
node.destroy_node()
return self.func_data
def callback(self, data):
msg_time = data.header.stamp.sec + 1e-9 * data.header.stamp.nanosec
if (self.prev_msg_time > msg_time):
rospy.loginfo('Out of order: %.9f > %.9f' % (self.prev_msg_time, msg_time))
if type(data) == msg_Imu:
col_w = 20
accel = data.linear_acceleration
gyro = data.angular_velocity
line = ('\n{:<%d.6f}{:<%d.4f}{:<%d.4f}{:<%d.4f}{:<%d.4f}{:<%d.4f}{:<%d.4f}' % (col_w, col_w, col_w, col_w, col_w, col_w, col_w)).format(msg_time, accel.x, accel.y, accel.z, gyro.x, gyro.y, gyro.z)
sys.stdout.write(line)
if self.fout:
self.fout.write(line)
self.prev_msg_time = msg_time
self.prev_msg_data = data
self.prev_time = time.time()
if ((not self.time) or (data.header.stamp.sec == self.time['secs'] and data.header.stamp.nanosec == self.time['nsecs'])):
self.result = data
def main():
if len(sys.argv) < 2 or '--help' in sys.argv or '/?' in sys.argv:
print ('USAGE:')
print ('------')
print ('rs2_listener.py <topic | theme> [Options]')
print ('example: rs2_listener.py /camera/color/image_raw --time 1532423022.044515610 --timeout 3')
print ('example: rs2_listener.py pointscloud')
print ('')
print ('Application subscribes on <topic>, wait for the first message matching [Options].')
print ('When found, prints the timestamp.')
print
print ('[Options:]')
print ('-s <sequential number>')
print ('--time <secs.nsecs>')
print ('--timeout <secs>')
print ('--filename <filename> : write output to file')
exit(-1)
# wanted_topic = '/device_0/sensor_0/Depth_0/image/data'
# wanted_seq = 58250
wanted_topic = sys.argv[1]
msg_params = {}
if 'points' in wanted_topic:
msg_type = msg_PointCloud2
elif ('imu' in wanted_topic) or ('gyro' in wanted_topic) or ('accel' in wanted_topic):
msg_type = msg_Imu
elif 'theora' in wanted_topic:
try:
msg_type = msg_theora
except NameError as e:
print ('theora_image_transport is not installed. \nType "sudo apt-get install ros-kinetic-theora-image-transport" to enable registering on messages of type theora.')
raise
else:
msg_type = msg_Image
for idx in range(2, len(sys.argv)):
if sys.argv[idx] == '--time':
msg_params['time'] = dict(zip(['secs', 'nsecs'], [int(part) for part in sys.argv[idx + 1].split('.')]))
if sys.argv[idx] == '--timeout':
msg_params['timeout_secs'] = int(sys.argv[idx + 1])
if sys.argv[idx] == '--filename':
msg_params['filename'] = sys.argv[idx+1]
msg_retriever = CWaitForMessage(msg_params)
if '/' in wanted_topic:
msg_params.setdefault('topic', wanted_topic)
res = msg_retriever.wait_for_message(msg_params, msg_type)
print('Got message: %s' % res.header)
else:
themes = [wanted_topic]
res = msg_retriever.wait_for_messages(themes)
print (res)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,410 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import signal
import shlex
from rs2_listener import CWaitForMessage
import rclpy
from rclpy.node import Node
from importRosbag.importRosbag import importRosbag
import numpy as np
import tf2_ros
import itertools
import subprocess
import time
global tf_timeout
tf_timeout = 5
def ImuGetData(rec_filename, topic):
# res['value'] = first value of topic.
# res['max_diff'] = max difference between returned value and all other values of topic in recording.
res = dict()
res['value'] = None
res['max_diff'] = [0,0,0]
data = importRosbag(rec_filename, importTopics=[topic], log='ERROR', disable_bar=True)[topic]
res['value'] = data['acc'][0,:]
res['max_diff'] = data['acc'].max(0) - data['acc'].min(0)
return res
def AccelGetData(rec_filename):
return ImuGetData(rec_filename, '/device_0/sensor_2/Accel_0/imu/data')
def AccelGetDataDeviceStandStraight(rec_filename):
gt_data = AccelGetData(rec_filename)
gt_data['ros_value'] = np.array([0.63839424, 0.05380408, 9.85343552])
gt_data['ros_max_diff'] = np.array([0.06940174, 0.04032778, 0.05982018])
return gt_data
def ImuTest(data, gt_data):
# check that the imu data received is the same as in the recording.
# check that in the rotated imu received the g-accelartation is pointing up according to ROS standards.
try:
v_data = np.array([data['value'][0].x, data['value'][0].y, data['value'][0].z])
v_gt_data = gt_data['value']
diff = v_data - v_gt_data
max_diff = abs(diff).max()
msg = 'original accel: Expect max diff of %.3f. Got %.3f.' % (gt_data['max_diff'].max(), max_diff)
print (msg)
if max_diff > gt_data['max_diff'].max():
return False, msg
v_data = np.array(data['ros_value']).mean(0)
v_gt_data = gt_data['ros_value']
diff = v_data - v_gt_data
max_diff = abs(diff).max()
msg = 'rotated to ROS: Expect max diff of %.3f. Got %.3f.' % (gt_data['ros_max_diff'].max(), max_diff)
print (msg)
if max_diff > gt_data['ros_max_diff'].max():
return False, msg
except Exception as e:
msg = '%s' % e
print ('Test Failed: %s' % msg)
return False, msg
return True, ''
def ImageGetData(rec_filename, topic):
all_avg = []
ok_percent = []
res = dict()
data = importRosbag(rec_filename, importTopics=[topic], log='ERROR', disable_bar=True)[topic]
for pyimg in data['frames']:
ok_number = (pyimg != 0).sum()
ok_percent.append(float(ok_number) / (pyimg.shape[0] * pyimg.shape[1]))
all_avg.append(pyimg.sum() / ok_number)
all_avg = np.array(all_avg)
channels = pyimg.shape[2] if len(pyimg.shape) > 2 else 1
res['num_channels'] = channels
res['shape'] = pyimg.shape
res['avg'] = all_avg.mean()
res['ok_percent'] = {'value': (np.array(ok_percent).mean()) / channels, 'epsilon': 0.01}
res['epsilon'] = max(all_avg.max() - res['avg'], res['avg'] - all_avg.min())
res['reported_size'] = [pyimg.shape[1], pyimg.shape[0], pyimg.shape[1]*pyimg.dtype.itemsize*channels]
return res
def ImageColorGetData(rec_filename):
return ImageGetData(rec_filename, '/device_0/sensor_1/Color_0/image/data')
def ImageDepthGetData(rec_filename):
return ImageGetData(rec_filename, '/device_0/sensor_0/Depth_0/image/data')
def ImageDepthInColorShapeGetData(rec_filename):
gt_data = ImageDepthGetData(rec_filename)
color_data = ImageColorGetData(rec_filename)
gt_data['shape'] = color_data['shape'][:2]
gt_data['reported_size'] = color_data['reported_size']
gt_data['reported_size'][2] = gt_data['reported_size'][0]*2
gt_data['ok_percent']['epsilon'] *= 3
return gt_data
def ImageDepthGetData_decimation(rec_filename):
gt_data = ImageDepthGetData(rec_filename)
gt_data['shape'] = [x/2 for x in gt_data['shape']]
gt_data['reported_size'] = [x/2 for x in gt_data['reported_size']]
gt_data['epsilon'] *= 3
return gt_data
def ImageColorTest(data, gt_data):
# check that all data['num_channels'] are the same as gt_data['num_channels'] and that avg value of all
# images are within epsilon of gt_data['avg']
try:
if ('num_channels' not in data):
msg = 'No data received'
return False, msg
channels = list(set(data['num_channels']))
msg = 'Expect %d channels. Got %d channels.' % (gt_data['num_channels'], channels[0])
print (msg)
if len(channels) > 1 or channels[0] != gt_data['num_channels']:
return False, msg
msg = 'Expected all received images to be the same shape. Got %s' % str(set(data['shape']))
print (msg)
if len(set(data['shape'])) > 1:
return False, msg
msg = 'Expected shape to be %s. Got %s' % (gt_data['shape'], list(set(data['shape']))[0])
print (msg)
if (np.array(list(set(data['shape']))[0]) != np.array(gt_data['shape'])).any():
return False, msg
msg = 'Expected header [width, height, step] to be %s. Got %s' % (gt_data['reported_size'], list(set(data['reported_size']))[0])
print (msg)
if (np.array(list(set(data['reported_size']))[0]) != np.array(gt_data['reported_size'])).any():
return False, msg
msg = 'Expect average of %.3f (+-%.3f). Got average of %.3f.' % (gt_data['avg'].mean(), gt_data['epsilon'], np.array(data['avg']).mean())
print (msg)
if abs(np.array(data['avg']).mean() - gt_data['avg'].mean()) > gt_data['epsilon']:
return False, msg
msg = 'Expect no holes percent > %.3f. Got %.3f.' % (gt_data['ok_percent']['value']-gt_data['ok_percent']['epsilon'], np.array(data['ok_percent']).mean())
print (msg)
if np.array(data['ok_percent']).mean() < gt_data['ok_percent']['value']-gt_data['ok_percent']['epsilon']:
return False, msg
except Exception as e:
msg = '%s' % e
print ('Test Failed: %s' % msg)
return False, msg
return True, ''
def ImageColorTest_3epsilon(data, gt_data):
gt_data['epsilon'] *= 3
return ImageColorTest(data, gt_data)
def NotImageColorTest(data, gt_data):
res = ImageColorTest(data, gt_data)
return (not res[0], res[1])
def PointCloudTest(data, gt_data):
width = np.array(data['width']).mean()
height = np.array(data['height']).mean()
msg = 'Expect image size %d(+-%d), %d. Got %d, %d.' % (gt_data['width'][0], gt_data['width'][1], gt_data['height'][0], width, height)
print (msg)
if abs(width - gt_data['width'][0]) > gt_data['width'][1] or height != gt_data['height'][0]:
return False, msg
mean_pos = np.array([xx[:3] for xx in data['avg']]).mean(0)
msg = 'Expect average position of %s (+-%.3f). Got average of %s.' % (gt_data['avg'][0][:3], gt_data['epsilon'][0], mean_pos)
print (msg)
if abs(mean_pos - gt_data['avg'][0][:3]).max() > gt_data['epsilon'][0]:
return False, msg
mean_col = np.array([xx[3:] for xx in data['avg']]).mean(0)
msg = 'Expect average color of %s (+-%.3f). Got average of %s.' % (gt_data['avg'][0][3:], gt_data['epsilon'][1], mean_col)
print (msg)
if abs(mean_col - gt_data['avg'][0][3:]).max() > gt_data['epsilon'][1]:
return False, msg
return True, ''
def staticTFTest(data, gt_data):
for couple in gt_data.keys():
if data[couple] is None:
msg = 'Tf is None for couple %s' % '->'.join(couple)
return False, msg
temp = data[couple].translation
np_trans = np.array([temp.x, temp.y, temp.z])
temp = data[couple].rotation
np_rot = np.array([temp.x, temp.y, temp.z, temp.w])
if any(abs(np_trans - gt_data[couple][0]) > 1e-5) or \
any(abs(np_rot - gt_data[couple][1]) > 1e-5):
msg = 'Tf is changed for couple %s' % '->'.join(couple)
return False, msg
return True, ''
test_types = {'vis_avg': {'listener_theme': 'colorStream',
'data_func': ImageColorGetData,
'test_func': ImageColorTest},
'depth_avg': {'listener_theme': 'depthStream',
'data_func': ImageDepthGetData,
'test_func': ImageColorTest},
'no_file': {'listener_theme': 'colorStream',
'data_func': lambda x: None,
'test_func': NotImageColorTest},
'pointscloud_avg': {'listener_theme': 'pointscloud',
'data_func': lambda x: {'width': [660353, 2300], 'height': [1], 'avg': [np.array([ 1.28251814, -0.15839984, 4.82235184, 80, 160, 240])], 'epsilon': [0.04, 5]},
'test_func': PointCloudTest},
'align_depth_ir1': {'listener_theme': 'alignedDepthInfra1',
'data_func': ImageDepthGetData,
'test_func': ImageColorTest},
'align_depth_color': {'listener_theme': 'alignedDepthColor',
'data_func': ImageDepthInColorShapeGetData,
'test_func': ImageColorTest_3epsilon},
'depth_avg_decimation': {'listener_theme': 'depthStream',
'data_func': ImageDepthGetData_decimation,
'test_func': ImageColorTest},
'align_depth_ir1_decimation': {'listener_theme': 'alignedDepthInfra1',
'data_func': ImageDepthGetData,
'test_func': ImageColorTest},
'static_tf': {'listener_theme': 'static_tf',
'data_func': lambda x: {('camera_link', 'camera_color_frame'): ([-0.00010158783697988838, 0.014841210097074509, -0.00022671300393994898], [-0.0008337442995980382, 0.0010442184284329414, -0.0009920650627464056, 0.9999986290931702]),
('camera_link', 'camera_depth_frame'): ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]),
('camera_link', 'camera_infra1_frame'): ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]),
('camera_depth_frame', 'camera_infra1_frame'): ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]),
('camera_depth_frame', 'camera_color_frame'): ([-0.00010158783697988838, 0.014841210097074509, -0.00022671300393994898], [-0.0008337442995980382, 0.0010442184284329414, -0.0009920650627464056, 0.9999986290931702]),
('camera_infra1_frame', 'camera_color_frame'): ([-0.00010158783697988838, 0.014841210097074509, -0.00022671300393994898], [-0.0008337442995980382, 0.0010442184284329414, -0.0009920650627464056, 0.9999986290931702])}
,
'test_func': staticTFTest},
'accel_up': {'listener_theme': 'accelStream',
'data_func': AccelGetDataDeviceStandStraight,
'test_func': ImuTest},
}
def run_test(test, listener_res):
# gather ground truth with test_types[test['type']]['data_func'] and recording from test['rosbag_filename']
# return results from test_types[test['type']]['test_func']
test_type = test_types[test['type']]
gt_data = test_type['data_func'](test['params']['rosbag_filename'])
return test_type['test_func'](listener_res[test_type['listener_theme']], gt_data)
def print_results(results):
title = 'TEST RESULTS'
headers = ['index', 'test name', 'score', 'message']
col_0_width = len(headers[0]) + 1
col_1_width = max([len(headers[1])] + [len(test[0]) for test in results]) + 1
col_2_width = max([len(headers[2]), len('OK'), len('FAILED')]) + 1
col_3_width = max([len(headers[3])] + [len(test[1][1]) for test in results]) + 1
total_width = col_0_width + col_1_width + col_2_width + col_3_width
print
print (('{:^%ds}'%total_width).format(title))
print ('-'*total_width)
print (('{:<%ds}{:<%ds}{:>%ds} : {:<%ds}' % (col_0_width, col_1_width, col_2_width, col_3_width)).format(*headers))
print ('-'*(col_0_width-1) + ' '*1 + '-'*(col_1_width-1) + ' '*2 + '-'*(col_2_width-1) + ' '*3 + '-'*(col_3_width-1))
print ('\n'.join([('{:<%dd}{:<%ds}{:>%ds} : {:<s}' % (col_0_width, col_1_width, col_2_width)).format(idx, test[0], 'OK' if test[1][0] else 'FAILED', test[1][1]) for idx, test in enumerate(results)]))
print
def get_tfs(coupled_frame_ids):
res = dict()
tfBuffer = tf2_ros.Buffer()
node = Node('tf_listener')
listener = tf2_ros.TransformListener(tfBuffer, node)
rclpy.spin_once(node)
for couple in coupled_frame_ids:
from_id, to_id = couple
if (tfBuffer.can_transform(from_id, to_id, rclpy.time.Time(), rclpy.time.Duration(nanoseconds=3e6))):
res[couple] = tfBuffer.lookup_transform(from_id, to_id, rclpy.time.Time(), rclpy.time.Duration(nanoseconds=1e6)).transform
else:
res[couple] = None
return res
def kill_realsense2_camera_node():
cmd = "kill -s INT $(ps aux | grep '[r]ealsense2_camera_node' | awk '{print $2}')"
os.system(cmd)
def run_tests(tests):
msg_params = {'timeout_secs': 5}
results = []
params_strs = set([test['params_str'] for test in tests])
for params_str in params_strs:
rclpy.init()
rec_tests = [test for test in tests if test['params_str'] == params_str]
themes = [test_types[test['type']]['listener_theme'] for test in rec_tests]
msg_retriever = CWaitForMessage(msg_params)
print ('*'*30)
print ('Running the following tests: %s' % ('\n' + '\n'.join([test['name'] for test in rec_tests])))
print ('*'*30)
num_of_startups = 5
is_node_up = False
for run_no in range(num_of_startups):
print
print ('*'*8 + ' Starting ROS ' + '*'*8)
print ('running node (%d/%d)' % (run_no, num_of_startups))
cmd_params = ['ros2', 'launch', 'realsense2_camera', 'rs_launch.py'] + params_str.split(' ')
print ('running command: ' + ' '.join(cmd_params))
p_wrapper = subprocess.Popen(cmd_params, stdout=None, stderr=None)
time.sleep(2)
service_list = subprocess.check_output(['ros2', 'node', 'list']).decode("utf-8")
is_node_up = '/camera/camera' in service_list
if is_node_up:
print ('Node is UP')
break
print ('Node is NOT UP')
print ('*'*8 + ' Killing ROS ' + '*'*9)
try:
p_wrapper.terminate()
p_wrapper.wait(timeout=2)
except subprocess.TimeoutExpired:
kill_realsense2_camera_node()
print ('DONE')
if is_node_up:
listener_res = msg_retriever.wait_for_messages(themes)
if 'static_tf' in [test['type'] for test in rec_tests]:
print ('Gathering static transforms')
frame_ids = ['camera_link', 'camera_depth_frame', 'camera_infra1_frame', 'camera_infra2_frame', 'camera_color_frame']
coupled_frame_ids = [xx for xx in itertools.combinations(frame_ids, 2)]
listener_res['static_tf'] = get_tfs(coupled_frame_ids)
print ('*'*8 + ' Killing ROS ' + '*'*9)
kill_realsense2_camera_node()
p_wrapper.wait()
print ('*'*8 + ' Killed ' + '*'*9)
else:
listener_res = dict([[theme_name, {}] for theme_name in themes])
rclpy.shutdown()
print ('*'*30)
print ('DONE run')
print ('*'*30)
for test in rec_tests:
try:
res = run_test(test, listener_res)
except Exception as e:
print ('Test %s Failed: %s' % (test['name'], e))
res = False, '%s' % e
results.append([test['name'], res])
return results
def main():
outdoors_filename = './records/outdoors_1color.bag'
all_tests = [{'name': 'non_existent_file', 'type': 'no_file', 'params': {'rosbag_filename': '/home/non_existent_file.txt'}},
{'name': 'vis_avg_2', 'type': 'vis_avg', 'params': {'rosbag_filename': outdoors_filename}},
{'name': 'depth_avg_1', 'type': 'depth_avg', 'params': {'rosbag_filename': outdoors_filename}},
#{'name': 'points_cloud_1', 'type': 'pointscloud_avg', 'params': {'rosbag_filename': outdoors_filename, 'pointcloud.enable': 'true'}},
{'name': 'depth_w_cloud_1', 'type': 'depth_avg', 'params': {'rosbag_filename': outdoors_filename, 'pointcloud.enable': 'true'}},
{'name': 'align_depth_color_1', 'type': 'align_depth_color', 'params': {'rosbag_filename': outdoors_filename, 'align_depth.enable':'true'}},
{'name': 'align_depth_ir1_1', 'type': 'align_depth_ir1', 'params': {'rosbag_filename': outdoors_filename, 'align_depth.enable': 'true', 'enable_infra1':'true', 'enable_infra2':'true'}},
{'name': 'depth_avg_decimation_1', 'type': 'depth_avg_decimation', 'params': {'rosbag_filename': outdoors_filename, 'decimation_filter.enable':'true'}},
{'name': 'align_depth_ir1_decimation_1', 'type': 'align_depth_ir1_decimation', 'params': {'rosbag_filename': outdoors_filename, 'align_depth.enable':'true', 'decimation_filter.enable':'true'}},
{'name': 'static_tf_1', 'type': 'static_tf', 'params': {'rosbag_filename': outdoors_filename, 'enable_infra1':'true', 'enable_infra2':'true'}},
{'name': 'accel_up_1', 'type': 'accel_up', 'params': {'rosbag_filename': './records/D435i_Depth_and_IMU_Stands_still.bag', 'enable_accel': 'true', 'accel_fps': '0.0'}},
]
# Normalize parameters:
for test in all_tests:
test['params']['rosbag_filename'] = os.path.abspath(test['params']['rosbag_filename'])
test['params']['color_width'] = '0'
test['params']['color_height'] = '0'
test['params']['depth_width'] = '0'
test['params']['depth_height'] = '0'
test['params']['infra_width'] = '0'
test['params']['infra_height'] = '0'
test['params_str'] = ' '.join([key + ':=' + test['params'][key] for key in sorted(test['params'].keys())])
if len(sys.argv) < 2 or '--help' in sys.argv or '/?' in sys.argv:
print ('USAGE:')
print ('------')
print ('rs2_test.py --all | <test_name> [<test_name> [...]]')
print
print ('Available tests are:')
print ('\n'.join([test['name'] for test in all_tests]))
exit(-1)
if '--all' in sys.argv[1:]:
tests_to_run = all_tests
else:
tests_to_run = [test for test in all_tests if test['name'] in sys.argv[1:]]
results = run_tests(tests_to_run)
print_results(results)
res = int(all([result[1][0] for result in results])) - 1
print ('exit (%d)' % res)
exit(res)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,148 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from rclpy.node import Node
import sys
import geometry_msgs.msg
import termios
import tty
import os
import math
import json
from tf2_ros.static_transform_broadcaster import StaticTransformBroadcaster
from tf_transformations import quaternion_from_euler
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def main():
return
def print_status(status):
sys.stdout.write('%-8s%-8s%-8s%-40s\r' % (status['mode'], status[status['mode']]['value'], status[status['mode']]['step'], status['message']))
def publish_status(node, broadcaster, status):
static_transformStamped = geometry_msgs.msg.TransformStamped()
static_transformStamped.header.stamp = node.get_clock().now().to_msg()
static_transformStamped.header.frame_id = from_cam
static_transformStamped.child_frame_id = to_cam
static_transformStamped.transform.translation.x = status['x']['value']
static_transformStamped.transform.translation.y = status['y']['value']
static_transformStamped.transform.translation.z = status['z']['value']
quat = quaternion_from_euler(math.radians(status['roll']['value']),
math.radians(status['pitch']['value']),
math.radians(status['azimuth']['value']))
static_transformStamped.transform.rotation.x = quat[0]
static_transformStamped.transform.rotation.y = quat[1]
static_transformStamped.transform.rotation.z = quat[2]
static_transformStamped.transform.rotation.w = quat[3]
broadcaster.sendTransform(static_transformStamped)
if __name__ == '__main__':
if len(sys.argv) < 3:
print ('USAGE:')
print ('set_cams_transforms.py from_cam to_cam x y z azimuth pitch roll')
print ('x, y, z: in meters')
print ('azimuth, pitch, roll: in degrees')
print ('If parameters are not given read last used parameters.')
print ('[OPTIONS]')
print ('--file <file name> : if given, default values are loaded from file')
sys.exit(-1)
from_cam, to_cam = sys.argv[1:3]
try:
filename = sys.argv[sys.argv.index('--file')+1]
print ('Using file %s' % os.path.abspath(filename))
except:
filename = os.path.join(os.path.dirname(__file__), '_set_cams_info_file.txt')
print ('Using default file %s' % os.path.abspath(filename))
if len(sys.argv) >= 9:
x, y, z, yaw, pitch, roll = [float(arg) for arg in sys.argv[3:10]]
status = {'mode': 'pitch',
'x': {'value': x, 'step': 0.1},
'y': {'value': y, 'step': 0.1},
'z': {'value': z, 'step': 0.1},
'azimuth': {'value': yaw, 'step': 1},
'pitch': {'value': pitch, 'step': 1},
'roll': {'value': roll, 'step': 1},
'message': ''}
print ('Use given initial values.')
else:
try:
status = json.load(open(filename, 'r'))
print ('Read initial values from file.')
except IOError as e:
print ('Failed reading initial parameters from file %s' % filename)
print ('Initial parameters must be given for initial run or if an un-initialized file has been given.')
sys.exit(-1)
rclpy.init()
node = Node('my_static_tf2_broadcaster')
#rospy.init_node('my_static_tf2_broadcaster')
broadcaster = StaticTransformBroadcaster(node)
print
print ('Press the following keys to change mode: x, y, z, (a)zimuth, (p)itch, (r)oll')
print ('For each mode, press 6 to increase by step and 4 to decrease')
print ('Press + to multiply step by 2 or - to divide')
print ('Press Q to quit')
status_keys = [key[0] for key in status.keys()]
print ('%-8s%-8s%-8s%s' % ('Mode', 'value', 'step', 'message'))
print_status(status)
publish_status(node, broadcaster, status)
while True:
kk = getch()
status['message'] = ''
try:
key_idx = status_keys.index(kk)
status['mode'] = list(status.keys())[key_idx]
except ValueError as e:
if kk.upper() == 'Q':
sys.stdout.write('\n')
exit(0)
elif kk == '4':
status[status['mode']]['value'] -= status[status['mode']]['step']
elif kk == '6':
status[status['mode']]['value'] += status[status['mode']]['step']
elif kk == '-':
status[status['mode']]['step'] /= 2.0
elif kk == '+':
status[status['mode']]['step'] *= 2.0
else:
status['message'] = 'Invalid key:' + kk
print_status(status)
publish_status(node, broadcaster, status)
json.dump(status, open(filename, 'w'), indent=4)
#rospy.spin()

View File

@@ -0,0 +1,105 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image as msg_Image
from sensor_msgs.msg import CameraInfo
from cv_bridge import CvBridge, CvBridgeError
import sys
import os
import numpy as np
import pyrealsense2 as rs2
if (not hasattr(rs2, 'intrinsics')):
import pyrealsense2.pyrealsense2 as rs2
class ImageListener(Node):
def __init__(self, depth_image_topic, depth_info_topic):
node_name = os.path.basename(sys.argv[0]).split('.')[0]
super().__init__(node_name)
self.bridge = CvBridge()
self.sub = self.create_subscription(msg_Image, depth_image_topic, self.imageDepthCallback, 1)
self.sub_info = self.create_subscription(CameraInfo, depth_info_topic, self.imageDepthInfoCallback, 1)
self.intrinsics = None
self.pix = None
self.pix_grade = None
def imageDepthCallback(self, data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, data.encoding)
# pick one pixel among all the pixels with the closest range:
indices = np.array(np.where(cv_image == cv_image[cv_image > 0].min()))[:,0]
pix = (indices[1], indices[0])
self.pix = pix
line = '\rDepth at pixel(%3d, %3d): %7.1f(mm).' % (pix[0], pix[1], cv_image[pix[1], pix[0]])
if self.intrinsics:
depth = cv_image[pix[1], pix[0]]
result = rs2.rs2_deproject_pixel_to_point(self.intrinsics, [pix[0], pix[1]], depth)
line += ' Coordinate: %8.2f %8.2f %8.2f.' % (result[0], result[1], result[2])
if (not self.pix_grade is None):
line += ' Grade: %2d' % self.pix_grade
line += '\r'
sys.stdout.write(line)
sys.stdout.flush()
except CvBridgeError as e:
print(e)
return
except ValueError as e:
return
def imageDepthInfoCallback(self, cameraInfo):
try:
if self.intrinsics:
return
self.intrinsics = rs2.intrinsics()
self.intrinsics.width = cameraInfo.width
self.intrinsics.height = cameraInfo.height
self.intrinsics.ppx = cameraInfo.k[2]
self.intrinsics.ppy = cameraInfo.k[5]
self.intrinsics.fx = cameraInfo.k[0]
self.intrinsics.fy = cameraInfo.k[4]
if cameraInfo.distortion_model == 'plumb_bob':
self.intrinsics.model = rs2.distortion.brown_conrady
elif cameraInfo.distortion_model == 'equidistant':
self.intrinsics.model = rs2.distortion.kannala_brandt4
self.intrinsics.coeffs = [i for i in cameraInfo.d]
except CvBridgeError as e:
print(e)
return
def main():
depth_image_topic = '/camera/depth/image_rect_raw'
depth_info_topic = '/camera/depth/camera_info'
print ()
print ('show_center_depth.py')
print ('--------------------')
print ('App to demontrate the usage of the /camera/depth topics.')
print ()
print ('Application subscribes to %s and %s topics.' % (depth_image_topic, depth_info_topic))
print ('Application then calculates and print the range to the closest object.')
print ('If intrinsics data is available, it also prints the 3D location of the object')
print ()
listener = ImageListener(depth_image_topic, depth_info_topic)
rclpy.spin(listener)
listener.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
rclpy.init(args=sys.argv)
main()

View File

@@ -0,0 +1,65 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from rclpy.node import Node
import sensor_msgs.msg
import sys
import time
class ImageListener(Node):
def __init__(self, topic):
self.topic = topic
super().__init__('topic_hz')
if 'points' in topic:
self.sub = self.create_subscription(sensor_msgs.msg.PointCloud2, topic, self.imageDepthCallback, 1)
elif 'image' in topic:
self.sub = self.create_subscription(sensor_msgs.msg.Image, topic, self.imageDepthCallback, 1)
else:
raise ('Unknown message type for topic ', topic)
# self.sub # prevent unused variable warning
self.message_times = []
self.max_buffer_size = 100
self.print_time = time.time()
def imageDepthCallback(self, data):
crnt_time = time.time()
self.message_times.append(crnt_time)
if (len(self.message_times) > self.max_buffer_size):
del self.message_times[0]
if (crnt_time - self.print_time > 1):
rate = len(self.message_times) / (self.message_times[-1] - self.message_times[0])
print('Frame rate at time: %s: %.02f(Hz)' % (time.ctime(crnt_time), rate))
# sys.stdout.write('%s: Depth at center(%d, %d): %f(mm)\r' % (self.topic, pix[0], pix[1], cv_image[pix[1], pix[0]]))
# sys.stdout.flush()
def main():
if (len(sys.argv) < 2 or '-h' in sys.argv or '--help' in sys.argv):
print ()
print ('USAGE:')
print ('------')
print ('python3 ./topic_hz.py <topic>')
print ('Application to act as ros2 topic hz : print the rate of which the messages on given topic arrive.')
print ()
sys.exit(0)
topic = sys.argv[1]
listener = ImageListener(topic)
rclpy.spin(listener)
listener.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
rclpy.init(args=sys.argv)
main()

View File

@@ -0,0 +1,133 @@
// Copyright 2024 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "../include/base_realsense_node.h"
using namespace realsense2_camera;
using namespace rs2;
/***
* Implementation of ROS2 Actions based on:
* ROS2 Actions Design: https://design.ros2.org/articles/actions.html
* ROS2 Actions Tutorials/Examples: https://docs.ros.org/en/rolling/Tutorials/Intermediate/Writing-an-Action-Server-Client/Cpp.html
*/
// Triggered Calibration Action Struct (Message)
/*
# request
string json "calib run"
---
# result
string calibration
float32 health
---
# feedback
float32 progress
*/
/***
* A callback for handling new goals (requests) for Triggered Calibration
* This implementation just accepts all goals with no restriction on the json input
*/
rclcpp_action::GoalResponse BaseRealSenseNode::TriggeredCalibrationHandleGoal(const rclcpp_action::GoalUUID & uuid,
std::shared_ptr<const TriggeredCalibration::Goal> goal)
{
(void)uuid; // unused parameter
ROS_INFO_STREAM("TriggeredCalibrationAction: Received request with json " << goal->json);
return rclcpp_action::GoalResponse::ACCEPT_AND_EXECUTE;
}
/***
* A callback for handling cancel events
* This implementation just tells the client that it accepted the cancellation.
*/
rclcpp_action::CancelResponse BaseRealSenseNode::TriggeredCalibrationHandleCancel(const std::shared_ptr<GoalHandleTriggeredCalibration> goal_handle)
{
(void)goal_handle; // unused parameter
ROS_INFO("TriggeredCalibrationAction: Received request to cancel");
return rclcpp_action::CancelResponse::ACCEPT;
}
/***
* A callback that accepts a new goal (request) and starts processing it.
* Since the execution is a long-running operation, we spawn off a
* thread to do the actual work and return from handle_accepted quickly.
*/
void BaseRealSenseNode::TriggeredCalibrationHandleAccepted(const std::shared_ptr<GoalHandleTriggeredCalibration> goal_handle)
{
using namespace std::placeholders;
ROS_INFO("TriggeredCalibrationAction: Request accepted");
// this needs to return quickly to avoid blocking the executor, so spin up a new thread
std::thread{std::bind(&BaseRealSenseNode::TriggeredCalibrationExecute, this, _1), goal_handle}.detach();
}
/***
* All processing and updates of Triggered Calibration operation
* are done in this execute method in the new thread called by the
* TriggeredCalibrationHandleAccepted() above.
*/
void BaseRealSenseNode::TriggeredCalibrationExecute(const std::shared_ptr<GoalHandleTriggeredCalibration> goal_handle)
{
ROS_INFO("TriggeredCalibrationAction: Executing...");
const auto goal = goal_handle->get_goal(); // get the TriggeredCalibration srv struct
auto feedback = std::make_shared<TriggeredCalibration::Feedback>();
float & _progress = feedback->progress;
auto result = std::make_shared<TriggeredCalibration::Result>();
try
{
rs2::auto_calibrated_device ac_dev = _dev.as<auto_calibrated_device>();
float health = 0.f; // output health
int timeout_ms = 120000; // 2 minutes timout
auto progress_callback = [&](const float progress) {
_progress = progress;
goal_handle->publish_feedback(feedback);
};
auto ans = ac_dev.run_on_chip_calibration(goal->json,
&health,
progress_callback,
timeout_ms);
// the new calibration is the result without the first 3 bytes
rs2::calibration_table new_calib = std::vector<uint8_t>(ans.begin() + 3, ans.end());
if (rclcpp::ok() && _progress == 100.0)
{
result->calibration = vectorToJsonString(new_calib);
result->health = health;
result->success = true;
goal_handle->succeed(result);
ROS_INFO("TriggeredCalibrationExecute: Succeded");
}
else
{
result->calibration = "{}";
result->success = false;
result->error_msg = "Canceled";
goal_handle->canceled(result);
ROS_WARN("TriggeredCalibrationExecute: Canceled");
}
}
catch(const std::runtime_error& e)
{
// exception must have been thrown from run_on_chip_calibration call
std::string error_msg = "TriggeredCalibrationExecute: Aborted. Error: " + std::string(e.what());
result->calibration = "{}";
result->success = false;
result->error_msg = error_msg;
goal_handle->abort(result);
ROS_ERROR(error_msg.c_str());
}
}

View File

@@ -0,0 +1,29 @@
// Copyright 2025 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <align_depth_filter.h>
using namespace realsense2_camera;
AlignDepthFilter::AlignDepthFilter(std::shared_ptr<rs2::filter> filter,
std::function<void(const rclcpp::Parameter&)> update_align_depth_func,
std::shared_ptr<Parameters> parameters, rclcpp::Logger logger, bool is_enabled):
NamedFilter(filter, parameters, logger, is_enabled, false)
{
_params.registerDynamicOptions(*(_filter.get()), "align_depth");
_params.getParameters()->setParamT("align_depth.enable", _is_enabled, update_align_depth_func);
_parameters_names.push_back("align_depth.enable");
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,332 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <dynamic_params.h>
namespace realsense2_camera
{
Parameters::Parameters(RosNodeBase& node) :
_node(node),
_logger(node.get_logger()),
_params_backend(node),
_is_running(true)
{
_params_backend.add_on_set_parameters_callback(
[this](const std::vector<rclcpp::Parameter> & parameters)
{
rcl_interfaces::msg::SetParametersResult result;
result.successful = true;
for (const auto & parameter : parameters)
{
try
{
const auto& func_iter = _param_functions.find(parameter.get_name());
if (func_iter != _param_functions.end())
{
std::list<std::string>::iterator name_iter(std::find(self_set_parameters.begin(), self_set_parameters.end(), parameter.get_name()));
if (name_iter != self_set_parameters.end())
{
self_set_parameters.erase(name_iter);
}
else
{
(func_iter->second)(parameter);
}
}
}
catch(const std::exception& e)
{
result.successful = false;
result.reason = e.what();
ROS_WARN_STREAM("Set parameter {" << parameter.get_name()
<< "} failed: " << e.what());
}
}
return result;
});
monitor_update_functions(); // Start parameters update thread
}
// pushUpdateFunctions:
// Cannot update ros parameters from within ros parameters callback.
// This function is used by the parameter callback function to update other ros parameters.
void Parameters::pushUpdateFunctions(std::vector<std::function<void()> > funcs)
{
_update_functions_v.insert(_update_functions_v.end(), funcs.begin(), funcs.end());
_update_functions_cv.notify_one();
}
void Parameters::monitor_update_functions()
{
int time_interval(1000);
std::function<void()> func = [this, time_interval](){
std::unique_lock<std::mutex> lock(_mu);
while(_is_running) {
_update_functions_cv.wait_for(lock, std::chrono::milliseconds(time_interval), [&]{return !_is_running || !_update_functions_v.empty();});
while (!_update_functions_v.empty())
{
_update_functions_v.front()();
_update_functions_v.pop_front();
}
}
};
_update_functions_t = std::make_shared<std::thread>(func);
}
Parameters::~Parameters()
{
_is_running = false;
if (_update_functions_t && _update_functions_t->joinable())
_update_functions_t->join();
for (auto const& param : _param_functions)
{
_node.undeclare_parameter(param.first);
}
// remove_on_set_parameters_callback(_params_backend);
}
template <class T>
T Parameters::readAndDeleteParam(std::string param_name, const T& initial_value)
{
// Function is meant for reading parameters needed in initialization but should not be declared by the app.
T result_value = setParam(param_name, initial_value);
removeParam(param_name);
return result_value;
}
template <class T>
T Parameters::setParam(std::string param_name, const T& initial_value,
std::function<void(const rclcpp::Parameter&)> func,
rcl_interfaces::msg::ParameterDescriptor descriptor)
{
T result_value(initial_value);
try
{
ROS_DEBUG_STREAM("setParam::Setting parameter: " << param_name);
#if defined(FOXY)
//do nothing for old versions (foxy)
#else
descriptor.dynamic_typing=true;
#endif
if (!_node.get_parameter(param_name, result_value))
{
result_value = _node.declare_parameter(param_name, initial_value, descriptor);
}
}
catch(const std::exception& e)
{
std::stringstream range;
for (auto val : descriptor.floating_point_range)
{
range << val.from_value << ", " << val.to_value;
}
for (auto val : descriptor.integer_range)
{
range << val.from_value << ", " << val.to_value;
}
ROS_WARN_STREAM("Could not set param: " << param_name << " with " <<
initial_value <<
" Range: [" << range.str() << "]" <<
": " << e.what());
return initial_value;
}
if (_param_functions.find(param_name) != _param_functions.end())
ROS_DEBUG_STREAM("setParam::Replace function for : " << param_name);
if (func)
_param_functions[param_name] = func;
else
_param_functions[param_name] = [this](const rclcpp::Parameter& )
{
ROS_WARN_STREAM("Parameter can not be changed in runtime.");
};
if (result_value != initial_value && func)
{
try
{
func(rclcpp::Parameter(param_name, result_value));
}
catch(const std::exception& e)
{
ROS_WARN_STREAM("Set parameter {" << param_name << "} failed: " << e.what());
}
}
return result_value;
}
// setParamT: Used to automatically update param based on its parallel ros parameter.
// Notice: param must remain alive as long as the callback is active -
// if param is destroyed the behavior of the callback is undefined.
template <class T>
void Parameters::setParamT(std::string param_name, T& param,
std::function<void(const rclcpp::Parameter&)> func,
rcl_interfaces::msg::ParameterDescriptor descriptor)
{
param = setParam<T>(param_name, param,
[&param, func](const rclcpp::Parameter& parameter)
{
param = parameter.get_value<T>();
if (func) func(parameter);
}, descriptor);
}
template <class T>
void Parameters::setParamValue(T& param, const T& value)
{
// setParamValue updates a variable and its parallel in the parameters server.
// NOTICE: <param> must have the same address it was declared with.
param = value;
try
{
std::string param_name = _param_names.at(&param);
rcl_interfaces::msg::SetParametersResult results = _node.set_parameter(rclcpp::Parameter(param_name, value));
if (!results.successful)
{
ROS_WARN_STREAM("Parameter: " << param_name << " was not set:" << results.reason);
}
}
catch(const std::out_of_range& e)
{
ROS_WARN_STREAM("Parameter was not internally declared.");
}
catch(const rclcpp::exceptions::ParameterNotDeclaredException& e)
{
std::string param_name = _param_names.at(&param);
ROS_WARN_STREAM("Parameter: " << param_name << " was not declared:" << e.what());
}
catch(const std::exception& e)
{
ROS_ERROR_STREAM(__FILE__ << ":" << __LINE__ << ":" << e.what());
}
}
// setRosParamValue - Used to set ROS parameter back to a valid value if an invalid value was set by user.
void Parameters::setRosParamValue(const std::string param_name, void const* const value)
{
// setRosParamValue sets a value to a parameter in the parameters server.
// The callback for the specified parameter is NOT called.
self_set_parameters.push_back(param_name);
rclcpp::ParameterType param_type = _node.get_parameter(param_name).get_type();
rcl_interfaces::msg::SetParametersResult results;
switch(param_type)
{
case rclcpp::PARAMETER_BOOL:
ROS_DEBUG_STREAM("Set " << param_name << " to " << *(bool*)value);
results = _node.set_parameter(rclcpp::Parameter(param_name, *(bool*)value));
break;
case rclcpp::PARAMETER_INTEGER:
ROS_DEBUG_STREAM("Set " << param_name << " to " << *(int*)value);
results = _node.set_parameter(rclcpp::Parameter(param_name, *(int*)value));
break;
case rclcpp::PARAMETER_DOUBLE:
ROS_DEBUG_STREAM("Set " << param_name << " to " << *(double*)value);
results = _node.set_parameter(rclcpp::Parameter(param_name, *(double*)value));
break;
case rclcpp::PARAMETER_STRING:
ROS_DEBUG_STREAM("Set " << param_name << " to " << *(std::string*)value);
results = _node.set_parameter(rclcpp::Parameter(param_name, *(std::string*)value));
break;
default:
ROS_ERROR_STREAM("Setting parameter of type " << _node.get_parameter(param_name).get_type_name() << " is not implemented.");
}
if (!results.successful)
{
ROS_WARN_STREAM("Parameter: " << param_name << " was not set:" << results.reason);
self_set_parameters.pop_back();
}
}
// queueSetRosValue - Set parameter in queue to be pushed to ROS parameter by monitor_update_functions
template <class T>
void Parameters::queueSetRosValue(const std::string& param_name, const T value)
{
std::vector<std::function<void()> > funcs;
funcs.push_back([this, param_name, value](){setRosParamValue(param_name, &value);});
pushUpdateFunctions(funcs);
}
void Parameters::removeParam(std::string param_name)
{
if (_node.has_parameter(param_name))
{
_node.undeclare_parameter(param_name);
}
_param_functions.erase(param_name);
}
/**
* @brief Retrieves an existing parameter or declares it with an initial value if not already declared.
*
* This function ensures that a parameter exists within the node. If the parameter is already
* declared, its current value is retrieved. Otherwise, the function declares the parameter
* with the provided `initial_value` and then returns it.
*
* @tparam T The type of the parameter.
* @param param_name The name of the parameter.
* @param initial_value The default value to declare if the parameter is not already declared.
* @return The value of the parameter after declaration or retrieval.
*
* @note This function is useful for ensuring that required parameters are available without
* explicitly checking for their existence beforehand.
*/
template<class T>
T Parameters::getOrDeclareParameter(const std::string param_name, const T& initial_value)
{
// Declare parameter if not already declared
if (!_node.has_parameter(param_name))
{
_node.declare_parameter(param_name, rclcpp::ParameterValue(initial_value));
}
// Retrieve the parameter value safely
return getParam<T>(param_name);
}
template <class T>
T Parameters::getParam(std::string param_name)
{
return _node.get_parameter(param_name).get_value<T>();
}
template void Parameters::setParamT<bool>(std::string param_name, bool& param, std::function<void(const rclcpp::Parameter&)> func, rcl_interfaces::msg::ParameterDescriptor descriptor);
template void Parameters::setParamT<int>(std::string param_name, int& param, std::function<void(const rclcpp::Parameter&)> func, rcl_interfaces::msg::ParameterDescriptor descriptor);
template void Parameters::setParamT<double>(std::string param_name, double& param, std::function<void(const rclcpp::Parameter&)> func, rcl_interfaces::msg::ParameterDescriptor descriptor);
template bool Parameters::setParam<bool>(std::string param_name, const bool& initial_value, std::function<void(const rclcpp::Parameter&)> func, rcl_interfaces::msg::ParameterDescriptor descriptor);
template int Parameters::setParam<int>(std::string param_name, const int& initial_value, std::function<void(const rclcpp::Parameter&)> func, rcl_interfaces::msg::ParameterDescriptor descriptor);
template double Parameters::setParam<double>(std::string param_name, const double& initial_value, std::function<void(const rclcpp::Parameter&)> func, rcl_interfaces::msg::ParameterDescriptor descriptor);
template std::string Parameters::setParam<std::string>(std::string param_name, const std::string& initial_value, std::function<void(const rclcpp::Parameter&)> func, rcl_interfaces::msg::ParameterDescriptor descriptor);
template void Parameters::setParamValue<int>(int& param, const int& value);
template void Parameters::setParamValue<bool>(bool& param, const bool& value);
template void Parameters::setParamValue<double>(double& param, const double& value);
template void Parameters::queueSetRosValue<std::string>(const std::string& param_name, const std::string value);
template void Parameters::queueSetRosValue<int>(const std::string& param_name, const int value);
template int Parameters::readAndDeleteParam<int>(std::string param_name, const int& initial_value);
template int Parameters::getOrDeclareParameter<int>(const std::string param_name, const int& initial_value);
template bool Parameters::getOrDeclareParameter<bool>(const std::string param_name, const bool& initial_value);
template double Parameters::getOrDeclareParameter<double>(const std::string param_name, const double& initial_value);
template std::string Parameters::getOrDeclareParameter<std::string>(const std::string param_name, const std::string& initial_value);
template bool Parameters::getParam<bool>(std::string param_name);
}

View File

@@ -0,0 +1,47 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "../include/base_realsense_node.h"
#include <chrono>
using namespace realsense2_camera;
using namespace std::chrono_literals;
#if defined (ACCELERATE_GPU_WITH_GLSL)
void BaseRealSenseNode::initOpenGLProcessing(bool use_gpu_processing)
{
// Once we have a window, initialize GL module
// Pass our window to enable sharing of textures between processed frames and the window
// The "use_gpu_processing" is going to control if we will use CPU or GPU for data processing
rs2::gl::init_processing(_app, use_gpu_processing);
if (use_gpu_processing)
rs2::gl::init_rendering();
_timer = _node.create_wall_timer(1000ms, std::bind(&BaseRealSenseNode::glfwPollEventCallback, this));
}
void BaseRealSenseNode::glfwPollEventCallback()
{
// Must poll the GLFW events perodically, else window will hang or crash
glfwPollEvents();
}
void BaseRealSenseNode::shutdownOpenGLProcessing()
{
rs2::gl::shutdown_rendering();
rs2::gl::shutdown_processing();
}
#endif

View File

@@ -0,0 +1,55 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <image_publisher.h>
using namespace realsense2_camera;
// --- image_rcl_publisher implementation ---
image_rcl_publisher::image_rcl_publisher( RosNodeBase & node,
const std::string & topic_name,
const rmw_qos_profile_t & qos )
{
image_publisher_impl = node.create_publisher< sensor_msgs::msg::Image >(
topic_name,
rclcpp::QoS( rclcpp::QoSInitialization::from_rmw( qos ), qos ) );
}
void image_rcl_publisher::publish( sensor_msgs::msg::Image::UniquePtr image_ptr )
{
image_publisher_impl->publish( std::move( image_ptr ) );
}
size_t image_rcl_publisher::get_subscription_count() const
{
return image_publisher_impl->get_subscription_count();
}
// --- image_transport_publisher implementation ---
image_transport_publisher::image_transport_publisher( rclcpp::Node & node,
const std::string & topic_name,
const rmw_qos_profile_t & qos )
{
image_publisher_impl = std::make_shared< image_transport::Publisher >(
image_transport::create_publisher( &node, topic_name, qos ) );
}
void image_transport_publisher::publish( sensor_msgs::msg::Image::UniquePtr image_ptr )
{
image_publisher_impl->publish( *image_ptr );
}
size_t image_transport_publisher::get_subscription_count() const
{
return image_publisher_impl->getNumSubscribers();
}

View File

@@ -0,0 +1,73 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <named_filter.h>
#include <fstream>
#include <sensor_msgs/point_cloud2_iterator.hpp>
using namespace realsense2_camera;
NamedFilter::NamedFilter(std::shared_ptr<rs2::filter> filter, std::shared_ptr<Parameters> parameters, rclcpp::Logger logger, bool is_enabled, bool is_set_parameters):
_filter(filter), _is_enabled(is_enabled), _params(parameters, logger), _logger(logger)
{
if (is_set_parameters)
setParameters();
}
void NamedFilter::setParameters(std::function<void(const rclcpp::Parameter&)> enable_param_func)
{
std::stringstream module_name_str;
std::string module_name = create_graph_resource_name(rs2_to_ros(_filter->get_info(RS2_CAMERA_INFO_NAME)));
module_name_str << module_name;
_params.registerDynamicOptions(*(_filter.get()), module_name_str.str());
module_name_str << ".enable";
_params.getParameters()->setParamT(module_name_str.str(), _is_enabled, enable_param_func);
_parameters_names.push_back(module_name_str.str());
}
void NamedFilter::clearParameters()
{
while ( !_parameters_names.empty() )
{
auto name = _parameters_names.back();
_params.getParameters()->removeParam(name);
_parameters_names.pop_back();
}
}
rs2::frameset NamedFilter::Process(rs2::frameset frameset)
{
if (_is_enabled)
{
return _filter->process(frameset);
}
else
{
return frameset;
}
}
rs2::frame NamedFilter::Process(rs2::frame frame)
{
if (_is_enabled)
{
return _filter->process(frame);
}
else
{
return frame;
}
}

View File

@@ -0,0 +1,168 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "../include/base_realsense_node.h"
#include <ros_utils.h>
#include <iomanip>
using namespace realsense2_camera;
void BaseRealSenseNode::getParameters()
{
ROS_INFO("getParameters...");
std::string param_name;
param_name = std::string("camera_name");
_camera_name = _parameters->setParam<std::string>(param_name, "camera");
_parameters_names.push_back(param_name);
param_name = std::string("publish_tf");
_publish_tf = _parameters->setParam<bool>(param_name, PUBLISH_TF);
_parameters_names.push_back(param_name);
param_name = std::string("tf_publish_rate");
_parameters->setParamT(param_name, _tf_publish_rate, [this](const rclcpp::Parameter& )
{
startDynamicTf();
});
_parameters_names.push_back(param_name);
param_name = std::string("diagnostics_period");
_diagnostics_period = _parameters->setParam<double>(param_name, DIAGNOSTICS_PERIOD);
_parameters_names.push_back(param_name);
param_name = std::string("enable_sync");
_parameters->setParamT(param_name, _sync_frames);
_parameters_names.push_back(param_name);
param_name = std::string("enable_rgbd");
_parameters->setParamT(param_name, _enable_rgbd, [this](const rclcpp::Parameter& )
{
{
std::lock_guard<std::mutex> lock_guard(_profile_changes_mutex);
_is_profile_changed = true;
}
_cv_mpc.notify_one();
});
_parameters_names.push_back(param_name);
param_name = std::string("json_file_path");
_json_file_path = _parameters->setParam<std::string>(param_name, "");
_parameters_names.push_back(param_name);
param_name = std::string("clip_distance");
_clipping_distance = _parameters->setParam<double>(param_name, -1.0);
_parameters_names.push_back(param_name);
param_name = std::string("linear_accel_cov");
_linear_accel_cov = _parameters->setParam<double>(param_name, 0.01);
_parameters_names.push_back(param_name);
param_name = std::string("angular_velocity_cov");
_angular_velocity_cov = _parameters->setParam<double>(param_name, 0.01);
_parameters_names.push_back(param_name);
param_name = std::string("hold_back_imu_for_frames");
_hold_back_imu_for_frames = _parameters->setParam<bool>(param_name, HOLD_BACK_IMU_FOR_FRAMES);
_parameters_names.push_back(param_name);
param_name = std::string("base_frame_id");
_base_frame_id = _parameters->setParam<std::string>(param_name, DEFAULT_BASE_FRAME_ID);
_base_frame_id = (static_cast<std::ostringstream&&>(std::ostringstream() << _camera_name << "_" << _base_frame_id)).str();
_parameters_names.push_back(param_name);
#if defined (ACCELERATE_GPU_WITH_GLSL)
param_name = std::string("accelerate_gpu_with_glsl");
_parameters->setParam<bool>(param_name, false,
[this](const rclcpp::Parameter& parameter)
{
bool temp_value = parameter.get_value<bool>();
if (_accelerate_gpu_with_glsl != temp_value)
{
_accelerate_gpu_with_glsl = temp_value;
std::lock_guard<std::mutex> lock_guard(_profile_changes_mutex);
_is_accelerate_gpu_with_glsl_changed = true;
}
_cv_mpc.notify_one();
});
_parameters_names.push_back(param_name);
#endif
}
void BaseRealSenseNode::setDynamicParams()
{
// Set default values:
_imu_sync_method = imu_sync_method::NONE;
auto imu_sync_method_string = [](imu_sync_method value)
{
switch (value)
{
case imu_sync_method::COPY:
return "COPY";
case imu_sync_method::LINEAR_INTERPOLATION:
return "LINEAR_INTERPOLATION";
default:
return "NONE";
}
};
// Register ROS parameter:
std::string param_name("unite_imu_method");
std::vector<std::pair<std::string, int> > enum_vec;
size_t longest_desc(0);
for (int i=0; i<=int(imu_sync_method::LINEAR_INTERPOLATION); i++)
{
std::string enum_str(imu_sync_method_string(imu_sync_method(i)));
enum_vec.push_back(std::make_pair(enum_str, i));
longest_desc = std::max(longest_desc, enum_str.size());
}
sort(enum_vec.begin(), enum_vec.end(), [](std::pair<std::string, int> e1, std::pair<std::string, int> e2){return (e1.second < e2.second);});
std::stringstream enum_str_values;
for (auto vec_iter : enum_vec)
{
enum_str_values << std::setw(longest_desc+6) << std::left << vec_iter.first << " : " << vec_iter.second << std::endl;
}
rcl_interfaces::msg::ParameterDescriptor crnt_descriptor;
rcl_interfaces::msg::IntegerRange range;
range.from_value = int(imu_sync_method::NONE);
range.to_value = int(imu_sync_method::LINEAR_INTERPOLATION);
crnt_descriptor.integer_range.push_back(range);
std::stringstream desc;
desc << "Available options are:" << std::endl << enum_str_values.str();
crnt_descriptor.description = desc.str();
_parameters->setParam<int>(param_name, int(imu_sync_method::NONE),
[this](const rclcpp::Parameter& parameter)
{
_imu_sync_method = imu_sync_method(parameter.get_value<int>());
ROS_WARN("For the 'unite_imu_method' param update to take effect, "
"re-enable either gyro or accel stream.");
}, crnt_descriptor);
_parameters_names.push_back(param_name);
}
void BaseRealSenseNode::clearParameters()
{
while ( !_parameters_names.empty() )
{
auto name = _parameters_names.back();
_parameters->removeParam(name);
_parameters_names.pop_back();
}
}

View File

@@ -0,0 +1,236 @@
// Copyright 2025 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pointcloud_filter.h>
#include <fstream>
#include <sensor_msgs/point_cloud2_iterator.hpp>
using namespace realsense2_camera;
PointcloudFilter::PointcloudFilter(std::shared_ptr<rs2::filter> filter, RosNodeBase& node, std::shared_ptr<Parameters> parameters, rclcpp::Logger logger, bool is_enabled):
NamedFilter(filter, parameters, logger, is_enabled, false),
_node(node),
_allow_no_texture_points(ALLOW_NO_TEXTURE_POINTS),
_ordered_pc(ORDERED_PC)
{
setParameters();
}
void PointcloudFilter::setParameters()
{
std::string module_name = create_graph_resource_name(rs2_to_ros(_filter->get_info(RS2_CAMERA_INFO_NAME)));
std::string param_name(module_name + "." + "allow_no_texture_points");
_params.getParameters()->setParamT(param_name, _allow_no_texture_points);
_parameters_names.push_back(param_name);
param_name = module_name + "." + std::string("ordered_pc");
_params.getParameters()->setParamT(param_name, _ordered_pc);
_parameters_names.push_back(param_name);
param_name = module_name + "." + std::string("pointcloud_qos");
rcl_interfaces::msg::ParameterDescriptor crnt_descriptor;
crnt_descriptor.description = "Available options are:\n" + list_available_qos_strings();
_pointcloud_qos = _params.getParameters()->setParam<std::string>(param_name, DEFAULT_QOS, [this](const rclcpp::Parameter& parameter)
{
try
{
qos_string_to_qos(parameter.get_value<std::string>());
_pointcloud_qos = parameter.get_value<std::string>();
ROS_WARN_STREAM("re-enable the stream for the change to take effect.");
}
catch(const std::exception& e)
{
ROS_ERROR_STREAM("Given value, " << parameter.get_value<std::string>() << " is unknown. Set ROS param back to: " << _pointcloud_qos);
_params.getParameters()->queueSetRosValue(parameter.get_name(), _pointcloud_qos);
}
}, crnt_descriptor);
_parameters_names.push_back(param_name);
NamedFilter::setParameters([this](const rclcpp::Parameter& )
{
setPublisher();
});
}
void PointcloudFilter::setPublisher()
{
std::lock_guard<std::mutex> lock_guard(_mutex_publisher);
if ((_is_enabled) && (!_pointcloud_publisher))
{
_pointcloud_publisher = _node.create_publisher<sensor_msgs::msg::PointCloud2>("~/depth/color/points",
rclcpp::QoS(rclcpp::QoSInitialization::from_rmw(qos_string_to_qos(_pointcloud_qos)),
qos_string_to_qos(_pointcloud_qos)));
}
else if ((!_is_enabled) && (_pointcloud_publisher))
{
_pointcloud_publisher.reset();
}
}
void reverse_memcpy(unsigned char* dst, const unsigned char* src, size_t n)
{
size_t i;
for (i=0; i < n; ++i)
dst[n-1-i] = src[i];
}
void PointcloudFilter::Publish(rs2::points pc, const rclcpp::Time& t, const rs2::frameset& frameset, const std::string& frame_id)
{
{
std::lock_guard<std::mutex> lock_guard(_mutex_publisher);
if ((!_pointcloud_publisher) || (!(_pointcloud_publisher->get_subscription_count())))
return;
}
rs2_stream texture_source_id = static_cast<rs2_stream>(_filter->get_option(rs2_option::RS2_OPTION_STREAM_FILTER));
bool use_texture = texture_source_id != RS2_STREAM_ANY;
static int warn_count(0);
static const int DISPLAY_WARN_NUMBER(5);
rs2::frameset::iterator texture_frame_itr = frameset.end();
if (use_texture)
{
std::set<rs2_format> available_formats{ rs2_format::RS2_FORMAT_RGB8, rs2_format::RS2_FORMAT_Y8 };
texture_frame_itr = std::find_if(frameset.begin(), frameset.end(), [&texture_source_id, &available_formats] (rs2::frame f)
{return (rs2_stream(f.get_profile().stream_type()) == texture_source_id) &&
(available_formats.find(f.get_profile().format()) != available_formats.end()); });
if (texture_frame_itr == frameset.end())
{
warn_count++;
std::string texture_source_name = _filter->get_option_value_description(rs2_option::RS2_OPTION_STREAM_FILTER, static_cast<float>(texture_source_id));
ROS_WARN_STREAM_COND(warn_count == DISPLAY_WARN_NUMBER, "No stream match for pointcloud chosen texture " << texture_source_name);
return;
}
warn_count = 0;
}
int texture_width(0), texture_height(0);
int num_colors(0);
const rs2::vertex* vertex = pc.get_vertices();
const rs2::texture_coordinate* color_point = pc.get_texture_coordinates();
rs2_intrinsics depth_intrin = pc.get_profile().as<rs2::video_stream_profile>().get_intrinsics();
sensor_msgs::msg::PointCloud2::UniquePtr msg_pointcloud = std::make_unique<sensor_msgs::msg::PointCloud2>();
sensor_msgs::PointCloud2Modifier modifier(*msg_pointcloud);
modifier.setPointCloud2FieldsByString(1, "xyz");
modifier.resize(pc.size());
if (_ordered_pc)
{
msg_pointcloud->width = depth_intrin.width;
msg_pointcloud->height = depth_intrin.height;
msg_pointcloud->is_dense = false;
}
vertex = pc.get_vertices();
size_t valid_count(0);
if (use_texture)
{
rs2::video_frame texture_frame = (*texture_frame_itr).as<rs2::video_frame>();
texture_width = texture_frame.get_width();
texture_height = texture_frame.get_height();
num_colors = texture_frame.get_bytes_per_pixel();
uint8_t* color_data = (uint8_t*)texture_frame.get_data();
std::string format_str;
switch(texture_frame.get_profile().format())
{
case RS2_FORMAT_RGB8:
format_str = "rgb";
break;
case RS2_FORMAT_Y8:
format_str = "intensity";
break;
default:
throw std::runtime_error("Unhandled texture format passed in pointcloud " + std::to_string(texture_frame.get_profile().format()));
}
msg_pointcloud->point_step = addPointField(*msg_pointcloud, format_str.c_str(), 1, sensor_msgs::msg::PointField::FLOAT32, msg_pointcloud->point_step);
msg_pointcloud->row_step = msg_pointcloud->width * msg_pointcloud->point_step;
msg_pointcloud->data.resize(msg_pointcloud->height * msg_pointcloud->row_step);
sensor_msgs::PointCloud2Iterator<float>iter_x(*msg_pointcloud, "x");
sensor_msgs::PointCloud2Iterator<float>iter_y(*msg_pointcloud, "y");
sensor_msgs::PointCloud2Iterator<float>iter_z(*msg_pointcloud, "z");
sensor_msgs::PointCloud2Iterator<uint8_t>iter_color(*msg_pointcloud, format_str);
color_point = pc.get_texture_coordinates();
float color_pixel[2];
for (size_t point_idx=0; point_idx < pc.size(); point_idx++, vertex++, color_point++)
{
float i(color_point->u);
float j(color_point->v);
bool valid_color_pixel(i >= 0.f && i <=1.f && j >= 0.f && j <=1.f);
bool valid_pixel(vertex->z > 0 && (valid_color_pixel || _allow_no_texture_points));
if (valid_pixel || _ordered_pc)
{
*iter_x = vertex->x;
*iter_y = vertex->y;
*iter_z = vertex->z;
if (valid_color_pixel)
{
color_pixel[0] = i * texture_width;
color_pixel[1] = j * texture_height;
int pixx = static_cast<int>(color_pixel[0]);
int pixy = static_cast<int>(color_pixel[1]);
int offset = (pixy * texture_width + pixx) * num_colors;
reverse_memcpy(&(*iter_color), color_data+offset, num_colors); // PointCloud2 order of rgb is bgr.
}
++iter_x; ++iter_y; ++iter_z;
++iter_color;
++valid_count;
}
}
}
else
{
msg_pointcloud->row_step = msg_pointcloud->width * msg_pointcloud->point_step;
msg_pointcloud->data.resize(msg_pointcloud->height * msg_pointcloud->row_step);
sensor_msgs::PointCloud2Iterator<float>iter_x(*msg_pointcloud, "x");
sensor_msgs::PointCloud2Iterator<float>iter_y(*msg_pointcloud, "y");
sensor_msgs::PointCloud2Iterator<float>iter_z(*msg_pointcloud, "z");
for (size_t point_idx=0; point_idx < pc.size(); point_idx++, vertex++)
{
bool valid_pixel(vertex->z > 0);
if (valid_pixel || _ordered_pc)
{
*iter_x = vertex->x;
*iter_y = vertex->y;
*iter_z = vertex->z;
++iter_x; ++iter_y; ++iter_z;
++valid_count;
}
}
}
msg_pointcloud->header.stamp = t;
msg_pointcloud->header.frame_id = frame_id;
if (!_ordered_pc)
{
msg_pointcloud->width = valid_count;
msg_pointcloud->height = 1;
msg_pointcloud->is_dense = true;
modifier.resize(valid_count);
}
{
std::lock_guard<std::mutex> lock_guard(_mutex_publisher);
if (_pointcloud_publisher)
_pointcloud_publisher->publish(std::move(msg_pointcloud));
}
}

View File

@@ -0,0 +1,663 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <profile_manager.h>
#include <regex>
using namespace realsense2_camera;
using namespace rs2;
ProfilesManager::ProfilesManager(std::shared_ptr<Parameters> parameters, rclcpp::Logger logger):
_logger(logger),
_params(parameters, _logger)
{
}
void ProfilesManager::clearParameters()
{
while ( !_parameters_names.empty() )
{
auto name = _parameters_names.back();
_params.getParameters()->removeParam(name);
_parameters_names.pop_back();
}
}
std::string applyTemplateName(std::string template_name, stream_index_pair sip)
{
const std::string stream_name(create_graph_resource_name(STREAM_NAME(sip)));
char* param_name = new char[template_name.size() + stream_name.size()];
sprintf(param_name, template_name.c_str(), stream_name.c_str());
std::string full_name(param_name);
delete [] param_name;
return full_name;
}
void ProfilesManager::registerSensorQOSParam(std::string template_name,
std::set<stream_index_pair> unique_sips,
std::map<stream_index_pair, std::shared_ptr<std::string> >& params,
std::string value)
{
// For each pair of stream-index, Function add a QOS parameter to <params> and advertise it by <template_name>.
// parameters in <params> are dynamically being updated. If invalid they are reverted.
for (auto& sip : unique_sips)
{
std::string param_name = applyTemplateName(template_name, sip);
params[sip] = std::make_shared<std::string>(value);
std::shared_ptr<std::string> param = params[sip];
rcl_interfaces::msg::ParameterDescriptor crnt_descriptor;
crnt_descriptor.description = "Available options are:\n" + list_available_qos_strings();
_params.getParameters()->setParam<std::string>(param_name, value, [this, param](const rclcpp::Parameter& parameter)
{
try
{
qos_string_to_qos(parameter.get_value<std::string>());
*param = parameter.get_value<std::string>();
ROS_WARN_STREAM("re-enable the stream for the change to take effect.");
}
catch(const std::exception& e)
{
ROS_ERROR_STREAM("Given value, " << parameter.get_value<std::string>() << " is unknown. Set ROS param back to: " << *param);
_params.getParameters()->queueSetRosValue(parameter.get_name(), *param);
}
}, crnt_descriptor);
_parameters_names.push_back(param_name);
}
}
template<class T>
void ProfilesManager::registerSensorUpdateParam(std::string template_name,
std::set<stream_index_pair> unique_sips,
std::map<stream_index_pair, std::shared_ptr<T> >& params,
T value,
std::function<void()> update_sensor_func)
{
// This function registers parameters that their modification requires a sensor update.
// For each pair of stream-index, Function add a parameter to <params>, if does not exist yet, and advertise it by <template_name>.
// parameters in <params> are dynamically being updated.
for (auto& sip : unique_sips)
{
std::string param_name = applyTemplateName(template_name, sip);
if (params.find(sip) == params.end())
{
if (sip == INFRA0)
// Disabling Infra 0 stream by default
params[sip] = std::make_shared<T>(false);
else
params[sip] = std::make_shared<T>(value);
}
std::shared_ptr<T> param = params[sip];
_params.getParameters()->setParam<T>(param_name, *(params[sip]), [param, update_sensor_func](const rclcpp::Parameter& parameter)
{
*param = parameter.get_value<T>();
update_sensor_func();
});
_parameters_names.push_back(param_name);
}
}
template void ProfilesManager::registerSensorUpdateParam<bool>(std::string template_name, std::set<stream_index_pair> unique_sips, std::map<stream_index_pair, std::shared_ptr<bool> >& params, bool value, std::function<void()> update_sensor_func);
template void ProfilesManager::registerSensorUpdateParam<int>(std::string template_name, std::set<stream_index_pair> unique_sips, std::map<stream_index_pair, std::shared_ptr<int> >& params, int value, std::function<void()> update_sensor_func);
bool ProfilesManager::isTypeExist()
{
return (!_enabled_profiles.empty());
}
std::map<stream_index_pair, rs2::stream_profile> ProfilesManager::getDefaultProfiles()
{
std::map<stream_index_pair, rs2::stream_profile> sip_default_profiles;
if (_all_profiles.empty())
throw std::runtime_error("Wrong commands sequence. No profiles set.");
for (auto profile : _all_profiles)
{
stream_index_pair sip(profile.stream_type(), profile.stream_index());
if (profile.is_default() && (sip_default_profiles.find(sip) == sip_default_profiles.end()))
{
sip_default_profiles[sip] = profile;
}
}
if (sip_default_profiles.empty())
{
ROS_INFO_STREAM("No default profile found. Setting the first available profile as the default one.");
rs2::stream_profile first_profile = _all_profiles.front();
sip_default_profiles[{first_profile.stream_type(), first_profile.stream_index()}] = first_profile;
}
return sip_default_profiles;
}
rs2::stream_profile VideoProfilesManager::validateAndGetSuitableProfile(rs2_stream stream_type, rs2::stream_profile given_profile)
{
rs2::stream_profile suitable_profile = given_profile;
bool is_given_profile_suitable = false;
for (auto temp_profile : _all_profiles)
{
if (temp_profile.stream_type() == stream_type)
{
auto video_profile = given_profile.as<rs2::video_stream_profile>();
if (isSameProfileValues(temp_profile, video_profile.width(), video_profile.height(), video_profile.fps(), RS2_FORMAT_ANY))
{
is_given_profile_suitable = true;
break;
}
}
}
// If given profile is not suitable, choose the first available profile for the given stream.
if (!is_given_profile_suitable)
{
for (auto temp_profile : _all_profiles)
{
if (temp_profile.stream_type() == stream_type)
{
suitable_profile = temp_profile;
}
}
}
return suitable_profile;
}
void ProfilesManager::addWantedProfiles(std::vector<rs2::stream_profile>& wanted_profiles)
{
std::map<stream_index_pair, bool> found_sips;
for (auto profile : _all_profiles)
{
stream_index_pair sip(profile.stream_type(), profile.stream_index());
if (!(*_enabled_profiles[sip])) continue;
if (found_sips.find(sip) == found_sips.end())
{
found_sips[sip] = false;
}
else
{
if (found_sips.at(sip) == true) continue;
}
if (isWantedProfile(profile))
{
wanted_profiles.push_back(profile);
found_sips[sip] = true;
ROS_DEBUG_STREAM("Found profile for " << ros_stream_to_string(sip.first) << ":" << sip.second);
}
}
// Warn the user if the enabled stream cannot be opened due to wrong profile selection
for (auto const & profile : _enabled_profiles)
{
auto sip = profile.first;
auto stream_enabled = profile.second;
if (*stream_enabled && !found_sips[sip])
{
ROS_WARN_STREAM("Couldn't open " << ros_stream_to_string(sip.first) << ":" << sip.second << " stream "
<< "due to wrong profile selection. "
<< "Please verify and update the profile settings (such as width, height, fps, format) "
<< "and re-enable the stream for the changes to take effect. "
<< "Run 'rs-enumerate-devices' command to know the list of profiles supported by the sensors.");
}
}
}
std::string ProfilesManager::profile_string(const rs2::stream_profile& profile)
{
std::stringstream profile_str;
if (profile.is<rs2::video_stream_profile>())
{
auto video_profile = profile.as<rs2::video_stream_profile>();
profile_str << "stream_type: " << ros_stream_to_string(video_profile.stream_type()) << "(" << video_profile.stream_index() << ")" <<
", Format: " << video_profile.format() <<
", Width: " << video_profile.width() <<
", Height: " << video_profile.height() <<
", FPS: " << video_profile.fps();
}
else
{
profile_str << "stream_type: " << ros_stream_to_string(profile.stream_type()) << "(" << profile.stream_index() << ")" <<
"Format: " << profile.format() <<
", FPS: " << profile.fps();
}
return profile_str.str();
}
bool ProfilesManager::hasSIP(const stream_index_pair& sip) const
{
return (_enabled_profiles.find(sip) != _enabled_profiles.end());
}
rmw_qos_profile_t ProfilesManager::getQOS(const stream_index_pair& sip) const
{
return qos_string_to_qos(*(_profiles_image_qos_str.at(sip)));
}
rmw_qos_profile_t ProfilesManager::getInfoQOS(const stream_index_pair& sip) const
{
return qos_string_to_qos(*(_profiles_info_qos_str.at(sip)));
}
VideoProfilesManager::VideoProfilesManager(std::shared_ptr<Parameters> parameters,
const std::string& module_name, rclcpp::Logger logger, bool force_image_default_qos):
ProfilesManager(parameters, logger),
_module_name(module_name),
_force_image_default_qos(force_image_default_qos)
{
}
bool VideoProfilesManager::isSameProfileValues(const rs2::stream_profile& profile, const int width, const int height, const int fps, const rs2_format format)
{
if (!profile.is<rs2::video_stream_profile>())
return false;
auto video_profile = profile.as<rs2::video_stream_profile>();
ROS_DEBUG_STREAM("Sensor profile: " << profile_string(profile));
return ((video_profile.width() == width) &&
(video_profile.height() == height) &&
(video_profile.fps() == fps) &&
(RS2_FORMAT_ANY == format ||
video_profile.format() == format));
}
bool VideoProfilesManager::isWantedProfile(const rs2::stream_profile& profile)
{
stream_index_pair sip = {profile.stream_type(), profile.stream_index()};
return isSameProfileValues(profile, _width[sip.first], _height[sip.first], _fps[sip.first], _formats[sip]);
}
void VideoProfilesManager::registerProfileParameters(std::vector<stream_profile> all_profiles, std::function<void()> update_sensor_func)
{
std::set<stream_index_pair> checked_sips;
for (auto& profile : all_profiles)
{
if (!profile.is<video_stream_profile>()) continue;
ROS_DEBUG_STREAM("Register profile: " << profile_string(profile));
_all_profiles.push_back(profile);
stream_index_pair sip(profile.stream_type(), profile.stream_index());
checked_sips.insert(sip);
}
if (!checked_sips.empty())
{
ROS_DEBUG_STREAM(__LINE__ << ": _enabled_profiles.size(): " << _enabled_profiles.size());
registerSensorUpdateParam("enable_%s", checked_sips, _enabled_profiles, true, update_sensor_func);
registerSensorQOSParam("%s_qos", checked_sips, _profiles_image_qos_str, _force_image_default_qos ? DEFAULT_QOS : IMAGE_QOS);
registerSensorQOSParam("%s_info_qos", checked_sips, _profiles_info_qos_str, DEFAULT_QOS);
for (auto& sip : checked_sips)
{
ROS_DEBUG_STREAM(__LINE__ << ": _enabled_profiles[" << ros_stream_to_string(sip.first) << ":" << sip.second << "]: " << *(_enabled_profiles[sip]));
}
registerVideoSensorParams(checked_sips);
}
}
std::string VideoProfilesManager::get_profiles_descriptions(rs2_stream stream_type)
{
std::set<std::string> profiles_str;
for (auto& profile : _all_profiles)
{
if (stream_type == profile.stream_type())
{
auto video_profile = profile.as<rs2::video_stream_profile>();
std::stringstream crnt_profile_str;
crnt_profile_str << video_profile.width() << "x" << video_profile.height() << "x" << video_profile.fps();
profiles_str.insert(crnt_profile_str.str());
}
}
std::stringstream descriptors_strm;
for (auto& profile_str : profiles_str)
{
descriptors_strm << profile_str << "\n";
}
std::string descriptors(descriptors_strm.str());
descriptors.pop_back();
return descriptors;
}
std::string VideoProfilesManager::getProfileFormatsDescriptions(stream_index_pair sip)
{
std::set<std::string> profile_formats_str;
for (auto& profile : _all_profiles)
{
auto video_profile = profile.as<rs2::video_stream_profile>();
stream_index_pair profile_sip = {video_profile.stream_type(), video_profile.stream_index()};
if (sip == profile_sip)
{
std::stringstream crnt_profile_str;
crnt_profile_str << video_profile.format();
profile_formats_str.insert(crnt_profile_str.str());
}
}
std::stringstream descriptors_strm;
for (auto& profile_format_str : profile_formats_str)
{
descriptors_strm << profile_format_str << "\n";
}
std::string descriptors(descriptors_strm.str());
descriptors.pop_back();
return descriptors;
}
void VideoProfilesManager::registerVideoSensorProfileFormat(stream_index_pair sip)
{
if (sip == DEPTH)
_formats[DEPTH] = RS2_FORMAT_Z16;
else if (sip == INFRA0)
_formats[INFRA0] = RS2_FORMAT_RGB8;
else if (sip == INFRA1)
_formats[INFRA1] = RS2_FORMAT_Y8;
else if (sip == INFRA2)
_formats[INFRA2] = RS2_FORMAT_Y8;
else if (sip == COLOR)
_formats[COLOR] = RS2_FORMAT_RGB8;
else
_formats[sip] = RS2_FORMAT_ANY;
}
void VideoProfilesManager::registerVideoSensorParams(std::set<stream_index_pair> sips)
{
std::set<rs2_stream> available_streams;
for (auto sip : sips)
{
available_streams.insert(sip.first);
}
// Set default values:
std::map<stream_index_pair, rs2::stream_profile> sip_default_profiles = getDefaultProfiles();
for (auto stream_type : available_streams)
{
rs2::stream_profile default_profile = sip_default_profiles.begin()->second;
switch (stream_type)
{
case RS2_STREAM_COLOR:
if (sip_default_profiles.find(COLOR) != sip_default_profiles.end())
{
default_profile = sip_default_profiles[COLOR];
}
break;
case RS2_STREAM_DEPTH:
if (sip_default_profiles.find(DEPTH) != sip_default_profiles.end())
{
default_profile = sip_default_profiles[DEPTH];
}
break;
case RS2_STREAM_INFRARED:
if (sip_default_profiles.find(INFRA1) != sip_default_profiles.end())
{
default_profile = sip_default_profiles[INFRA1];
}
else if (sip_default_profiles.find(DEPTH) != sip_default_profiles.end())
{
default_profile = validateAndGetSuitableProfile(stream_type, sip_default_profiles[DEPTH]);
}
break;
default:
default_profile = validateAndGetSuitableProfile(stream_type, sip_default_profiles.begin()->second);
}
auto video_profile = default_profile.as<rs2::video_stream_profile>();
_width[stream_type] = video_profile.width();
_height[stream_type] = video_profile.height();
_fps[stream_type] = video_profile.fps();
}
// Set the stream formats
for (auto sip : sips)
{
registerVideoSensorProfileFormat(sip);
}
// Overwrite the _formats with default values queried from LibRealsense
for (auto sip_default_profile : sip_default_profiles)
{
stream_index_pair sip = sip_default_profile.first;
auto default_profile = sip_default_profile.second;
auto video_profile = default_profile.as<rs2::video_stream_profile>();
if (isSameProfileValues(default_profile, _width[sip.first], _height[sip.first], _fps[sip.first], video_profile.format()))
{
_formats[sip] = video_profile.format();
}
}
for (auto stream_type : available_streams)
{
// Register ROS parameter:
std::stringstream param_name_str;
param_name_str << _module_name << "." << create_graph_resource_name(ros_stream_to_string(stream_type)) << "_profile";
rcl_interfaces::msg::ParameterDescriptor crnt_descriptor;
crnt_descriptor.description = "Available options are:\n" + get_profiles_descriptions(stream_type);
std::stringstream crnt_profile_str;
crnt_profile_str << _width[stream_type] << "x" << _height[stream_type] << "x" << _fps[stream_type];
_params.getParameters()->setParam<std::string>(param_name_str.str(), crnt_profile_str.str(), [this, stream_type](const rclcpp::Parameter& parameter)
{
std::regex self_regex("\\s*([0-9]+)\\s*[xX,]\\s*([0-9]+)\\s*[xX,]\\s*([0-9]+)\\s*", std::regex_constants::ECMAScript);
std::smatch match;
std::string profile_str(parameter.get_value<std::string>());
bool found = std::regex_match(profile_str, match, self_regex);
bool request_default(false);
if (found)
{
int temp_width(std::stoi(match[1])), temp_height(std::stoi(match[2])), temp_fps(std::stoi(match[3]));
if (temp_width <= 0 || temp_height <= 0 || temp_fps <= 0)
{
found = false;
request_default = true;
}
else
{
for (const auto& profile : _all_profiles)
{
found = false;
if (isSameProfileValues(profile, temp_width, temp_height, temp_fps, RS2_FORMAT_ANY))
{
_width[stream_type] = temp_width;
_height[stream_type] = temp_height;
_fps[stream_type] = temp_fps;
found = true;
ROS_WARN_STREAM("re-enable the stream for the change to take effect.");
break;
}
}
}
}
if (!found)
{
std::stringstream crnt_profile_str;
crnt_profile_str << _width[stream_type] << "x" << _height[stream_type] << "x" << _fps[stream_type];
if (request_default)
{
ROS_INFO_STREAM("Set ROS param " << parameter.get_name() << " to default: " << crnt_profile_str.str());
}
else
{
ROS_ERROR_STREAM("Given value, " << parameter.get_value<std::string>() << " is invalid. "
<< "Run 'ros2 param describe <your_node_name> " << parameter.get_name()
<< "' to get the list of supported profiles. "
<< "Setting ROS param back to: " << crnt_profile_str.str());
}
_params.getParameters()->queueSetRosValue(parameter.get_name(), crnt_profile_str.str());
}
}, crnt_descriptor);
_parameters_names.push_back(param_name_str.str());
}
for (auto sip : sips)
{
std::string param_name(_module_name + "." + STREAM_NAME(sip) + "_format");
std::string param_value = rs2_format_to_string(_formats[sip]);
rcl_interfaces::msg::ParameterDescriptor crnt_descriptor;
crnt_descriptor.description = "Available options are:\n" + getProfileFormatsDescriptions(sip);
_params.getParameters()->setParam(param_name, param_value, [this, sip](const rclcpp::Parameter& parameter)
{
std::string format_str(parameter.get_value<std::string>());
rs2_format temp_format = string_to_rs2_format(format_str);
bool found = false;
if (temp_format != RS2_FORMAT_ANY)
{
for (const auto& profile : _all_profiles)
{
stream_index_pair profile_sip = {profile.stream_type(), profile.stream_index()};
if (sip == profile_sip && temp_format == profile.format())
{
ROS_WARN_STREAM("re-enable the " << STREAM_NAME(sip) << " stream for the change to take effect.");
found = true;
_formats[sip] = temp_format;
break;
}
}
}
if (!found)
{
ROS_ERROR_STREAM(STREAM_NAME(sip) <<" stream doesn't support " << format_str <<" format. "
<< "Run 'ros2 param describe <your_node_name> " << parameter.get_name()
<< "' to get the list of supported formats. "
<< "Setting the ROS param '" << parameter.get_name() <<"' back to: " << _formats[sip]);
_params.getParameters()->queueSetRosValue(parameter.get_name(),
(std::string)rs2_format_to_string(_formats[sip]));
}
}, crnt_descriptor);
_parameters_names.push_back(param_name);
}
}
///////////////////////////////////////////////////////////////////////////////////////
bool MotionProfilesManager::isSameProfileValues(const rs2::stream_profile& profile, const rs2_stream stype, const int fps)
{
return (profile.stream_type() == stype && profile.fps() == fps);
}
bool MotionProfilesManager::isWantedProfile(const rs2::stream_profile& profile)
{
stream_index_pair stream(profile.stream_type(), profile.stream_index());
return (isSameProfileValues(profile, profile.stream_type(), *(_fps[stream])));
}
void MotionProfilesManager::registerProfileParameters(std::vector<stream_profile> all_profiles, std::function<void()> update_sensor_func)
{
std::set<stream_index_pair> checked_sips;
for (auto& profile : all_profiles)
{
if (!profile.is<motion_stream_profile>()) continue;
_all_profiles.push_back(profile);
stream_index_pair sip(profile.stream_type(), profile.stream_index());
checked_sips.insert(sip);
}
if (_all_profiles.empty()) return;
registerFPSParams();
registerSensorUpdateParam("enable_%s", checked_sips, _enabled_profiles, true, update_sensor_func);
registerSensorQOSParam("%s_qos", checked_sips, _profiles_image_qos_str, HID_QOS);
registerSensorQOSParam("%s_info_qos", checked_sips, _profiles_info_qos_str, DEFAULT_QOS);
}
std::map<stream_index_pair, std::vector<int>> MotionProfilesManager::getAvailableFPSValues()
{
std::map<stream_index_pair, std::vector<int>> res;
for (auto& profile : _all_profiles)
{
stream_index_pair sip(profile.stream_type(), profile.stream_index());
res[sip].push_back(profile.as<rs2::motion_stream_profile>().fps());
}
return res;
}
void MotionProfilesManager::registerFPSParams()
{
if (_all_profiles.empty()) return;
std::map<stream_index_pair, std::vector<int>> sips_fps_values = getAvailableFPSValues();
// Set default fps to minimum fps available for the stream:
for (auto& sip_fps_values : sips_fps_values)
{
int min_fps = *(std::min_element(sip_fps_values.second.begin(), sip_fps_values.second.end()));
_fps.insert(std::pair<stream_index_pair, std::shared_ptr<int>>(sip_fps_values.first, std::make_shared<int>(min_fps)));
}
// Overwrite with default values:
std::map<stream_index_pair, rs2::stream_profile> sip_default_profiles = getDefaultProfiles();
for (auto sip_default_profile : sip_default_profiles)
{
stream_index_pair sip = sip_default_profile.first;
*(_fps[sip]) = sip_default_profile.second.as<rs2::motion_stream_profile>().fps();
}
// Register ROS parameters:
for (auto& fps : _fps)
{
stream_index_pair sip(fps.first);
std::string param_name = applyTemplateName("%s_fps", sip);
std::stringstream description_str;
std::copy(sips_fps_values[sip].begin(), sips_fps_values[sip].end(), std::ostream_iterator<int>(description_str, "\n"));
std::string description(description_str.str());
description.pop_back();
rcl_interfaces::msg::ParameterDescriptor crnt_descriptor;
crnt_descriptor.description = "Available options are:\n" + description;
std::shared_ptr<int> param(_fps[sip]);
std::vector<int> available_values(sips_fps_values[sip]);
_params.getParameters()->setParam<int>(param_name, *(fps.second), [this, sip](const rclcpp::Parameter& parameter)
{
int next_fps(parameter.get_value<int>());
bool found(false);
bool request_default(false);
if (next_fps <= 0)
{
found = false;
request_default = true;
}
else
{
for (const auto& profile : _all_profiles)
{
found = false;
if (isSameProfileValues(profile, sip.first, next_fps))
{
*(_fps[sip]) = next_fps;
found = true;
ROS_WARN_STREAM("re-enable the stream for the change to take effect.");
break;
}
}
}
if (!found)
{
if (request_default)
{
ROS_INFO_STREAM("Set ROS param " << parameter.get_name() << " to default: " << *(_fps[sip]));
}
else
{
ROS_ERROR_STREAM("Given value, " << parameter.get_value<int>() << " is invalid. Set ROS param back to: " << *(_fps[sip]));
}
_params.getParameters()->queueSetRosValue(parameter.get_name(), *(_fps[sip]));
}
}, crnt_descriptor);
_parameters_names.push_back(param_name);
}
}

View File

@@ -0,0 +1,554 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "../include/realsense_node_factory.h"
#include "../include/base_realsense_node.h"
#include <iostream>
#include <map>
#include <mutex>
#include <condition_variable>
#include <signal.h>
#include <thread>
#ifndef _WIN32
#include <sys/time.h>
#endif
#include <regex>
using namespace realsense2_camera;
#define REALSENSE_ROS_EMBEDDED_VERSION_STR (VAR_ARG_STRING(VERSION: REALSENSE_ROS_MAJOR_VERSION.REALSENSE_ROS_MINOR_VERSION.REALSENSE_ROS_PATCH_VERSION))
constexpr auto realsense_ros_camera_version = REALSENSE_ROS_EMBEDDED_VERSION_STR;
RealSenseNodeFactory::RealSenseNodeFactory(const rclcpp::NodeOptions & node_options) :
RosNodeBase("camera", "/camera", node_options),
_logger(this->get_logger())
{
#ifndef USE_LIFECYCLE_NODE
init();
#else
RCLCPP_INFO(get_logger(), "Transition: Unconfigured...");
#endif
}
#ifdef USE_LIFECYCLE_NODE
RealSenseNodeFactory::CallbackReturn
RealSenseNodeFactory::on_configure(const rclcpp_lifecycle::State & state)
{
RCLCPP_INFO(get_logger(), "🚀 Lifecycle Transition | FROM: %s (%d) → TO: CONFIGURING",
state.label().c_str(), state.id());
init();
return CallbackReturn::SUCCESS;
}
RealSenseNodeFactory::CallbackReturn
RealSenseNodeFactory::on_activate(const rclcpp_lifecycle::State & state)
{
RCLCPP_INFO(get_logger(), "🚀 Lifecycle Transition | FROM: %s (%d) → TO: ACTIVATING",
state.label().c_str(), state.id());
startDevice();
return CallbackReturn::SUCCESS;
}
RealSenseNodeFactory::CallbackReturn
RealSenseNodeFactory::on_deactivate(const rclcpp_lifecycle::State & state)
{
RCLCPP_INFO(get_logger(), "🛑 Lifecycle Transition | FROM: %s (%d) → TO: DEACTIVATING",
state.label().c_str(), state.id());
stopDevice();
return CallbackReturn::SUCCESS;
}
RealSenseNodeFactory::CallbackReturn
RealSenseNodeFactory::on_cleanup(const rclcpp_lifecycle::State & state)
{
RCLCPP_INFO(get_logger(), "🧹 Lifecycle Transition | FROM: %s (%d) → TO: CLEANING UP",
state.label().c_str(), state.id());
closeDevice();
return CallbackReturn::SUCCESS;
}
RealSenseNodeFactory::CallbackReturn
RealSenseNodeFactory::on_shutdown(const rclcpp_lifecycle::State & state)
{
RCLCPP_INFO(get_logger(), "⚡ Lifecycle Transition | FROM: %s (%d) → TO: SHUTTING DOWN",
state.label().c_str(), state.id());
closeDevice();
return CallbackReturn::SUCCESS;
}
#endif
RealSenseNodeFactory::RealSenseNodeFactory(const std::string & node_name, const std::string & ns,
const rclcpp::NodeOptions & node_options) :
RosNodeBase(node_name, ns, node_options),
_logger(this->get_logger())
{
init();
}
RealSenseNodeFactory::~RealSenseNodeFactory()
{
_is_alive = false;
if (_query_thread.joinable())
{
_query_thread.join();
}
}
std::string RealSenseNodeFactory::parseUsbPort(std::string line)
{
std::string port_id;
std::regex self_regex("(?:[^ ]+/usb[0-9]+[0-9./-]*/){0,1}([0-9.-]+)(:){0,1}[^ ]*", std::regex_constants::ECMAScript);
std::smatch base_match;
bool found = std::regex_match(line, base_match, self_regex);
if (found)
{
port_id = base_match[1].str();
if (base_match[2].str().size() == 0) //This is libuvc string. Remove counter is exists.
{
std::regex end_regex = std::regex(".+(-[0-9]+$)", std::regex_constants::ECMAScript);
bool found_end = std::regex_match(port_id, base_match, end_regex);
if (found_end)
{
port_id = port_id.substr(0, port_id.size() - base_match[1].str().size());
}
}
}
return port_id;
}
void RealSenseNodeFactory::getDevice(rs2::device_list list)
{
if (!_device)
{
if (0 == list.size())
{
ROS_WARN("No RealSense devices were found!");
}
else
{
bool found = false;
rs2::device dev;
for (size_t count = 0; count < list.size(); count++)
{
try
{
dev = list[count];
}
catch(const std::exception& ex)
{
ROS_WARN_STREAM("Device " << count+1 << "/" << list.size() << " failed with exception: " << ex.what());
continue;
}
auto sn = dev.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER);
ROS_INFO_STREAM("Device with serial number " << sn << " was found."<<std::endl);
std::string pn = dev.get_info(RS2_CAMERA_INFO_PHYSICAL_PORT);
std::string name = dev.get_info(RS2_CAMERA_INFO_NAME);
ROS_INFO_STREAM("Device with physical ID " << pn << " was found.");
std::vector<std::string> results;
ROS_INFO_STREAM("Device with name " << name << " was found.");
std::string port_id = parseUsbPort(pn);
std::string pid_str(dev.get_info(RS2_CAMERA_INFO_PRODUCT_ID));
if(pid_str != "DDS")
{
if (port_id.empty())
{
std::stringstream msg;
msg << "Error extracting usb port from device with physical ID: " << pn << std::endl
<< "Please report on github issue at https://github.com/IntelRealSense/realsense-ros";
if (_usb_port_id.empty())
{
ROS_WARN_STREAM(msg.str());
}
else
{
ROS_ERROR_STREAM(msg.str());
ROS_ERROR_STREAM("Please use serial number instead of usb port.");
}
}
else
{
ROS_INFO_STREAM("Device with port number " << port_id << " was found.");
}
}
bool found_device_type(true);
if (!_device_type.empty())
{
std::smatch match_results;
std::regex device_type_regex(_device_type.c_str(), std::regex::icase);
found_device_type = std::regex_search(name, match_results, device_type_regex);
}
if ((_serial_no.empty() || sn == _serial_no) && (_usb_port_id.empty() || port_id == _usb_port_id) && found_device_type)
{
_device = dev;
_serial_no = sn;
found = true;
break;
}
}
if (!found)
{
std::string msg ("The requested device with ");
bool add_and(false);
if (!_serial_no.empty())
{
msg += "serial number " + _serial_no;
add_and = true;
}
if (!_usb_port_id.empty())
{
if (add_and)
{
msg += " and ";
}
msg += "usb port id " + _usb_port_id;
add_and = true;
}
if (!_device_type.empty())
{
if (add_and)
{
msg += " and ";
}
msg += "device name containing " + _device_type;
}
msg += " is NOT found. Will Try again.";
ROS_ERROR_STREAM(msg);
}
else
{
if (_device.supports(RS2_CAMERA_INFO_USB_TYPE_DESCRIPTOR))
{
std::string usb_type = _device.get_info(RS2_CAMERA_INFO_USB_TYPE_DESCRIPTOR);
ROS_INFO_STREAM("Device USB type: " << usb_type);
if (usb_type.find("2.") != std::string::npos)
{
ROS_WARN_STREAM("Device " << _serial_no << " is connected using a " << usb_type << " port. Reduced performance is expected.");
}
}
}
}
}
if (_device && _initial_reset)
{
_initial_reset = false;
try
{
ROS_INFO("Resetting device...");
_device.hardware_reset();
_device = rs2::device();
}
catch(const std::exception& ex)
{
ROS_WARN_STREAM("An exception has been thrown: " << __FILE__ << ":" << __LINE__ << ":" << ex.what());
}
}
}
void RealSenseNodeFactory::changeDeviceCallback(rs2::event_information& info)
{
if (info.was_removed(_device))
{
ROS_ERROR("The device has been disconnected!");
_realSenseNode.reset(nullptr);
_device = rs2::device();
}
if (!_device)
{
rs2::device_list new_devices = info.get_new_devices();
if (new_devices.size() > 0)
{
ROS_INFO("Checking new devices...");
getDevice(new_devices);
if (_device)
{
startDevice();
}
}
}
}
std::string api_version_to_string(int version)
{
std::ostringstream ss;
if (version / 10000 == 0)
ss << version;
else
ss << (version / 10000) << "." << (version % 10000) / 100 << "." << (version % 100);
return ss.str();
}
void RealSenseNodeFactory::init()
{
try
{
_is_alive = true;
_parameters = std::make_shared<Parameters>(*this);
rs2_error* e = nullptr;
std::string running_librealsense_version(api_version_to_string(rs2_get_api_version(&e)));
ROS_INFO("RealSense ROS v%s", REALSENSE_ROS_VERSION_STR);
ROS_INFO("Built with LibRealSense v%s", RS2_API_VERSION_STR);
ROS_INFO_STREAM("Running with LibRealSense v" << running_librealsense_version);
if (RS2_API_VERSION_STR != running_librealsense_version)
{
ROS_WARN("***************************************************");
ROS_WARN("** running with a different librealsense version **");
ROS_WARN("** than the one the wrapper was compiled with! **");
ROS_WARN("***************************************************");
}
auto severity = rs2_log_severity::RS2_LOG_SEVERITY_WARN;
tryGetLogSeverity(severity);
if (rs2_log_severity::RS2_LOG_SEVERITY_DEBUG == severity)
console_bridge::setLogLevel(console_bridge::CONSOLE_BRIDGE_LOG_DEBUG);
rs2::log_to_console(severity);
#ifdef BPDEBUG
std::cout << "Attach to Process: " << getpid() << std::endl;
std::cout << "Press <ENTER> key to continue." << std::endl;
std::cin.get();
#endif
// Using `getOrDeclareParameter()` to avoid re-declaration issues
_serial_no = _parameters->getOrDeclareParameter<std::string>("serial_no", "");
_usb_port_id = _parameters->getOrDeclareParameter<std::string>("_usb_port_id", "");
_device_type = _parameters->getOrDeclareParameter<std::string>("_device_type", "");
_wait_for_device_timeout = _parameters->getOrDeclareParameter<double>("wait_for_device_timeout", -1.0);
_reconnect_timeout = _parameters->getOrDeclareParameter<double>("reconnect_timeout", 6.0);
// A ROS2 hack: until a better way is found to avoid auto convertion of strings containing only digits to integers:
if (!_serial_no.empty() && _serial_no.front() == '_') _serial_no = _serial_no.substr(1); // remove '_' prefix
std::string rosbag_filename(_parameters->getOrDeclareParameter<std::string>("rosbag_filename", ""));
if (!rosbag_filename.empty())
{
{
ROS_INFO_STREAM("publish topics from rosbag file: " << rosbag_filename.c_str());
rs2::context ctx;
_device = ctx.load_device(rosbag_filename.c_str());
_serial_no = _device.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER);
}
if (_device)
{
bool rosbag_loop(_parameters->getOrDeclareParameter<bool>("rosbag_loop", false));
startDevice();
if (rosbag_loop)
{
auto playback = _device.as<rs2::playback>(); // Object to check the playback status periodically.
bool is_playing = true; // Flag to indicate if the playback is active
while (rclcpp::ok())
{
// Check the current status only if the playback is not active
auto status = playback.current_status();
if (!is_playing && status == RS2_PLAYBACK_STATUS_STOPPED)
{
RCLCPP_INFO(this->get_logger(), "Bag file playback has completed and it is going to be replayed.");
startDevice(); // Re-start bag file execution
is_playing = true; // Set the flag to true as playback has been restarted
}
else if (status != RS2_PLAYBACK_STATUS_STOPPED)
{
// If the playback status is not stopped, it means the playback is active
is_playing = true;
}
else
{
// If the playback status is stopped, set the flag to false
is_playing = false;
}
// Add a small delay to prevent busy-waiting
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
}
}
}
else
{
_initial_reset = _parameters->getOrDeclareParameter<bool>("initial_reset",false);
_query_thread = std::thread([=]()
{
std::chrono::milliseconds timespan(static_cast<int>(_reconnect_timeout*1e3));
rclcpp::Time first_try_time = this->get_clock()->now();
while (_is_alive && !_device)
{
try
{
getDevice(_ctx.query_devices());
if (_device)
{
std::function<void(rs2::event_information&)> change_device_callback_function = [this](rs2::event_information& info){changeDeviceCallback(info);};
_ctx.set_devices_changed_callback(change_device_callback_function);
// unconfigure lifecycle state ends here for lifecycled node
#ifndef USE_LIFECYCLE_NODE
startDevice();
#endif
}
else
{
std::chrono::milliseconds actual_timespan(timespan);
if (_wait_for_device_timeout > 0)
{
auto time_to_timeout(_wait_for_device_timeout - (this->get_clock()->now() - first_try_time).seconds());
if (time_to_timeout < 0)
{
ROS_ERROR_STREAM("wait for device timeout of " << _wait_for_device_timeout << " secs expired");
exit(1);
}
else
{
double max_timespan_secs(std::chrono::duration_cast<std::chrono::seconds>(timespan).count());
actual_timespan = std::chrono::milliseconds (static_cast<int>(std::min(max_timespan_secs, time_to_timeout) * 1e3));
}
}
std::this_thread::sleep_for(actual_timespan);
}
}
catch(const std::exception& e)
{
ROS_ERROR_STREAM("Error starting device: " << e.what());
}
}
});
}
}
catch(const std::exception& ex)
{
ROS_ERROR_STREAM("An exception has been thrown: " << __FILE__ << ":" << __LINE__ << ":" << ex.what());
exit(1);
}
catch(...)
{
ROS_ERROR_STREAM("Unknown exception has occured!");
exit(1);
}
}
void RealSenseNodeFactory::startDevice()
{
if (_realSenseNode) _realSenseNode.reset();
std::string device_name(_device.get_info(RS2_CAMERA_INFO_NAME));
std::string pid_str(_device.get_info(RS2_CAMERA_INFO_PRODUCT_ID));
uint16_t pid;
if (device_name == "Intel RealSense D555")
{
// currently the PID of DDS devices is hardcoded as "DDS"
// need to be fixed in librealsense
pid = RS555_PID;
}
else
{
pid = std::stoi(pid_str, 0, 16);
}
try
{
switch(pid)
{
case RS400_PID:
case RS405_PID:
case RS410_PID:
case RS460_PID:
case RS415_PID:
case RS420_PID:
case RS421_PID:
case RS420_MM_PID:
case RS430_PID:
case RS430i_PID:
case RS430_MM_PID:
case RS430_MM_RGB_PID:
case RS435_RGB_PID:
case RS435i_RGB_PID:
case RS455_PID:
case RS457_PID:
case RS555_PID:
case RS_USB2_PID:
_realSenseNode = std::unique_ptr<BaseRealSenseNode>(new BaseRealSenseNode(*this, _device, _parameters, this->get_node_options().use_intra_process_comms()));
break;
default:
ROS_FATAL_STREAM("Unsupported device!" << " Product ID: 0x" << pid_str);
rclcpp::shutdown();
exit(1);
}
_realSenseNode->publishTopics();
}
catch(const rs2::backend_error& e)
{
std::cerr << "Failed to start device: " << e.what() << '\n';
_device.hardware_reset();
_device = rs2::device();
}
}
void RealSenseNodeFactory::stopDevice()
{
if (_realSenseNode)
{
ROS_INFO("Stopping RealSense device...");
_realSenseNode.reset(); // Calls the destructor and releases the device node
}
}
void RealSenseNodeFactory::closeDevice()
{
_is_alive = false;
if (_query_thread.joinable())
{
_query_thread.join();
}
stopDevice();
if (_device)
{
ROS_INFO("Closing RealSense device...");
_ctx.set_devices_changed_callback([](rs2::event_information&) {});
// To go into unconfigured lifecycle state for lifecycled node we have to also disconnect the device
_device = rs2::device();
}
}
void RealSenseNodeFactory::tryGetLogSeverity(rs2_log_severity& severity) const
{
static const char* severity_var_name = "LRS_LOG_LEVEL";
auto content = getenv(severity_var_name);
if (content)
{
std::string content_str(content);
std::transform(content_str.begin(), content_str.end(), content_str.begin(), ::toupper);
for (uint32_t i = 0; i < RS2_LOG_SEVERITY_COUNT; i++)
{
auto current = std::string(rs2_log_severity_to_string((rs2_log_severity)i));
std::transform(current.begin(), current.end(), current.begin(), ::toupper);
if (content_str == current)
{
severity = (rs2_log_severity)i;
break;
}
}
}
}
RCLCPP_COMPONENTS_REGISTER_NODE(realsense2_camera::RealSenseNodeFactory)

View File

@@ -0,0 +1,32 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ros_param_backend.h"
namespace realsense2_camera
{
void ParametersBackend::add_on_set_parameters_callback(ros2_param_callback_type callback)
{
_ros_callback = _node.add_on_set_parameters_callback(callback);
}
ParametersBackend::~ParametersBackend()
{
if (_ros_callback)
{
_node.remove_on_set_parameters_callback((rclcpp::node_interfaces::OnSetParametersCallbackHandle*)(_ros_callback.get()));
_ros_callback.reset();
}
}
}

View File

@@ -0,0 +1,474 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ros_sensor.h>
#include <assert.h>
using namespace realsense2_camera;
using namespace rs2;
void RosSensor::setupErrorCallback()
{
set_notifications_callback([&](const rs2::notification& n)
{
std::vector<std::string> error_strings({"RT IC2 Config error",
"Left IC2 Config error"
});
ROS_WARN_STREAM("XXX Hardware Notification:" << n.get_description() << "," << n.get_timestamp() << "," << n.get_severity() << "," << n.get_category());
if (n.get_severity() >= RS2_LOG_SEVERITY_ERROR)
{
ROS_WARN_STREAM("Hardware Notification:" << n.get_description() << "," << n.get_timestamp() << "," << n.get_severity() << "," << n.get_category());
}
if (error_strings.end() != find_if(error_strings.begin(), error_strings.end(), [&n] (std::string err)
{return (n.get_description().find(err) != std::string::npos); }))
{
_hardware_reset_func();
}
});
}
RosSensor::RosSensor(rs2::sensor sensor,
std::shared_ptr<Parameters> parameters,
std::function<void(rs2::frame)> frame_callback,
std::function<void()> update_sensor_func,
std::function<void()> hardware_reset_func,
std::shared_ptr<diagnostic_updater::Updater> diagnostics_updater,
rclcpp::Logger logger,
bool force_image_default_qos,
bool is_rosbag_file):
rs2::sensor(sensor),
_logger(logger),
_origin_frame_callback(frame_callback),
_params(parameters, _logger),
_update_sensor_func(update_sensor_func),
_hardware_reset_func(hardware_reset_func),
_diagnostics_updater(diagnostics_updater),
_force_image_default_qos(force_image_default_qos)
{
_frame_callback = [this](rs2::frame frame)
{
auto stream_type = frame.get_profile().stream_type();
auto stream_index = frame.get_profile().stream_index();
stream_index_pair sip{stream_type, stream_index};
try
{
_origin_frame_callback(frame);
if (_frequency_diagnostics.find(sip) != _frequency_diagnostics.end())
_frequency_diagnostics.at(sip).Tick();
}
catch(const std::exception& ex)
{
// don't tick the frequency diagnostics for this publisher
ROS_ERROR_STREAM("An error has occurred during frame callback: " << ex.what());
}
};
setParameters(is_rosbag_file);
}
RosSensor::~RosSensor()
{
try
{
clearParameters();
stop();
}
catch(...){} // Not allowed to throw from Dtor
}
void RosSensor::setParameters(bool is_rosbag_file)
{
std::string module_name = create_graph_resource_name(rs2_to_ros(get_info(RS2_CAMERA_INFO_NAME)));
// From FW version 5.14.x.x, if HDR is enabled, updating UVC controls like exposure, gain , etc are restricted.
// So, during init of the node, forcefully disabling the HDR upfront and update it with new values.
if((!is_rosbag_file) && supports(RS2_OPTION_HDR_ENABLED))
{
set_option(RS2_OPTION_HDR_ENABLED, false);
}
_params.registerDynamicOptions(*this, module_name);
// for rosbag files, don't set hdr(sequence_id) / gain / exposure options
// since these options can be changed only in real devices
if(!is_rosbag_file)
UpdateSequenceIdCallback();
registerSensorParameters();
}
void RosSensor::UpdateSequenceIdCallback()
{
// Function replaces the trivial parameter callback with one that
// also updates ros server about the gain and exposure of the selected sequence id.
if (!supports(RS2_OPTION_SEQUENCE_ID))
return;
std::string module_name = create_graph_resource_name(rs2_to_ros(get_info(RS2_CAMERA_INFO_NAME)));
std::string param_name = module_name + "." + create_graph_resource_name(rs2_option_to_string(RS2_OPTION_ENABLE_AUTO_EXPOSURE));
bool user_set_enable_ae_value = _params.getParameters()->getParam<bool>(param_name);
bool is_hdr_enabled = static_cast<bool>(get_option(RS2_OPTION_HDR_ENABLED));
if (is_hdr_enabled && user_set_enable_ae_value)
{
bool is_ae_enabled = static_cast<bool>(get_option(RS2_OPTION_ENABLE_AUTO_EXPOSURE));
// If AE got auto-disabled, update the Enable_Auto_Exposure ROS paramerter as well accordingly.
if (!is_ae_enabled)
{
ROS_WARN_STREAM("Auto Exposure functionality is not supported when HDR is enabled. " <<
"So, disabling the '" << param_name << "' param.");
try
{
std::vector<std::function<void()> > funcs;
funcs.push_back([this](){set_sensor_parameter_to_ros<bool>(RS2_OPTION_ENABLE_AUTO_EXPOSURE);});
_params.getParameters()->pushUpdateFunctions(funcs);
}
catch(const std::exception& e)
{
ROS_WARN_STREAM("Failed to set parameter:" << param_name << " : " << e.what());
}
}
}
// Deleter to revert back the RS2_OPTION_HDR_ENABLED value at the end.
auto deleter_to_revert_hdr = std::unique_ptr<bool, std::function<void(bool*)>>(&is_hdr_enabled,
[&](bool* enable_back_hdr) {
if (enable_back_hdr && *enable_back_hdr)
{
set_option(RS2_OPTION_HDR_ENABLED, true);
}
});
if (is_hdr_enabled)
{
// From FW version 5.14.x.x, if HDR is enabled, updating UVC controls like exposure, gain , etc are restricted.
// So, disable it before updating. It will be reverted back by the deleter 'deleter_to_revert_hdr'.
set_option(RS2_OPTION_HDR_ENABLED, false);
int original_seq_id = static_cast<int>(get_option(RS2_OPTION_SEQUENCE_ID)); // To Set back to default.
// Read initialization parameters and set to sensor:
std::vector<rs2_option> options{RS2_OPTION_EXPOSURE, RS2_OPTION_GAIN};
unsigned int seq_size = get_option(RS2_OPTION_SEQUENCE_SIZE);
for (unsigned int seq_id = 1; seq_id <= seq_size; seq_id++ )
{
set_option(RS2_OPTION_SEQUENCE_ID, seq_id);
for (rs2_option& option : options)
{
std::stringstream param_name_str;
param_name_str << module_name << "." << create_graph_resource_name(rs2_option_to_string(option)) << "." << seq_id;
int option_value = get_option(option);
int user_set_option_value = _params.getParameters()->readAndDeleteParam(param_name_str.str(), option_value);
if (option_value != user_set_option_value)
{
ROS_INFO_STREAM("Set " << rs2_option_to_string(option) << "." << seq_id << " to " << user_set_option_value);
set_option(option, user_set_option_value);
}
}
}
set_option(RS2_OPTION_SEQUENCE_ID, original_seq_id); // Set back to default.
}
// Set callback to update ros parameters to gain and exposure matching the selected sequence_id:
const std::string option_name(module_name + "." + create_graph_resource_name(rs2_option_to_string(RS2_OPTION_SEQUENCE_ID)));
try
{
int option_value = static_cast<int>(get_option(RS2_OPTION_SEQUENCE_ID));
_params.getParameters()->setParam<int>(option_name, option_value,
[this](const rclcpp::Parameter& parameter)
{
set_option(RS2_OPTION_SEQUENCE_ID, parameter.get_value<int>());
std::vector<std::function<void()> > funcs;
funcs.push_back([this](){set_sensor_parameter_to_ros<int>(RS2_OPTION_GAIN);});
funcs.push_back([this](){set_sensor_parameter_to_ros<int>(RS2_OPTION_EXPOSURE);});
_params.getParameters()->pushUpdateFunctions(funcs);
});
}
catch(const rclcpp::exceptions::InvalidParameterValueException& e)
{
ROS_WARN_STREAM("Setting alternative callback: Failed to set parameter:" << option_name << " : " << e.what());
return;
}
}
template<class T>
void RosSensor::set_sensor_parameter_to_ros(rs2_option option)
{
std::string module_name = create_graph_resource_name(rs2_to_ros(get_info(RS2_CAMERA_INFO_NAME)));
const std::string option_name(module_name + "." + create_graph_resource_name(rs2_option_to_string(option)));
auto value = static_cast<T>(get_option(option));
_params.getParameters()->setRosParamValue(option_name, &value);
}
void RosSensor::registerSensorParameters()
{
std::vector<stream_profile> all_profiles = get_stream_profiles();
const std::string module_name(create_graph_resource_name(rs2_to_ros(get_info(RS2_CAMERA_INFO_NAME))));
std::shared_ptr<ProfilesManager> profile_manager = std::make_shared<VideoProfilesManager>(_params.getParameters(), module_name, _logger, _force_image_default_qos);
profile_manager->registerProfileParameters(all_profiles, _update_sensor_func);
if (profile_manager->isTypeExist())
{
_profile_managers.push_back(profile_manager);
registerAutoExposureROIOptions();
}
profile_manager = std::make_shared<MotionProfilesManager>(_params.getParameters(), _logger);
profile_manager->registerProfileParameters(all_profiles, _update_sensor_func);
if (profile_manager->isTypeExist())
{
_profile_managers.push_back(profile_manager);
}
}
bool RosSensor::start(const std::vector<stream_profile>& profiles)
{
if (get_active_streams().size() > 0)
return false;
setupErrorCallback();
rs2::sensor::open(profiles);
for (auto& profile : profiles)
ROS_INFO_STREAM("Open profile: " << ProfilesManager::profile_string(profile));
rs2::sensor::start(_frame_callback);
for (auto& profile : profiles)
{
stream_index_pair sip(profile.stream_type(), profile.stream_index());
if (_diagnostics_updater)
_frequency_diagnostics.emplace(sip, FrequencyDiagnostics(STREAM_NAME(sip), profile.fps(), _diagnostics_updater));
}
return true;
}
void RosSensor::stop()
{
if (get_active_streams().size() == 0)
return;
ROS_INFO_STREAM("Stop Sensor: " << rs2_to_ros(get_info(RS2_CAMERA_INFO_NAME)));
_frequency_diagnostics.clear();
try
{
rs2::sensor::stop();
}
catch (const std::exception& e)
{
ROS_ERROR_STREAM("Exception: " << __FILE__ << ":" << __LINE__ << ":" << e.what());
}
ROS_INFO_STREAM("Close Sensor. ");
try
{
close();
}
catch (const std::exception& e)
{
ROS_ERROR_STREAM("Exception: " << __FILE__ << ":" << __LINE__ << ":" << e.what());
}
ROS_INFO_STREAM("Close Sensor - Done. ");
}
rmw_qos_profile_t RosSensor::getQOS(const stream_index_pair& sip) const
{
for(auto& profile_manager : _profile_managers)
{
if (profile_manager->hasSIP(sip))
{
return profile_manager->getQOS(sip);
}
}
throw std::runtime_error("Given stream has no profile manager: " + std::string(rs2_stream_to_string(sip.first)) + "." + std::to_string(sip.second));
}
rmw_qos_profile_t RosSensor::getInfoQOS(const stream_index_pair& sip) const
{
for(auto& profile_manager : _profile_managers)
{
if (profile_manager->hasSIP(sip))
{
return profile_manager->getInfoQOS(sip);
}
}
throw std::runtime_error("Given stream has no profile manager: " + std::string(rs2_stream_to_string(sip.first)) + "." + std::to_string(sip.second));
}
bool profiles_equal(const rs2::stream_profile& a, const rs2::stream_profile& b)
{
if (a.is<rs2::video_stream_profile>() && b.is<rs2::video_stream_profile>())
{
auto va = a.as<rs2::video_stream_profile>();
auto vb = b.as<rs2::video_stream_profile>();
return (va == vb && va.width() == vb.width() && va.height() == vb.height());
}
return ((rs2::stream_profile)a==(rs2::stream_profile)b);
}
bool is_profiles_in_profiles(const std::vector<stream_profile>& sub_profiles, const std::vector<stream_profile>& all_profiles)
{
for (auto& a : sub_profiles)
{
bool found_profile(false);
for (auto& b : all_profiles)
{
if (profiles_equal(a, b))
{
found_profile = true;
break;
}
}
if (!found_profile)
{
return false;
}
}
return true;
}
bool compare_profiles_lists(const std::vector<stream_profile>& active_profiles, const std::vector<stream_profile>& wanted_profiles)
{
return (is_profiles_in_profiles(active_profiles, wanted_profiles) && is_profiles_in_profiles(wanted_profiles, active_profiles));
}
bool RosSensor::getUpdatedProfiles(std::vector<stream_profile>& wanted_profiles)
{
wanted_profiles.clear();
std::vector<stream_profile> active_profiles = get_active_streams();
for (auto profile_manager : _profile_managers)
{
profile_manager->addWantedProfiles(wanted_profiles);
}
ROS_DEBUG_STREAM(rs2_to_ros(get_info(RS2_CAMERA_INFO_NAME)) << ":" << "active_profiles.size() = " << active_profiles.size());
for (auto& profile : active_profiles)
{
ROS_DEBUG_STREAM("Sensor profile: " << ProfilesManager::profile_string(profile));
}
ROS_DEBUG_STREAM(rs2_to_ros(get_info(RS2_CAMERA_INFO_NAME)) << ":" << "wanted_profiles");
for (auto& profile : wanted_profiles)
{
ROS_DEBUG_STREAM("Sensor profile: " << ProfilesManager::profile_string(profile));
}
if (compare_profiles_lists(active_profiles, wanted_profiles))
{
return false;
}
return true;
}
void RosSensor::set_sensor_auto_exposure_roi()
{
try
{
rs2_stream stream_type = RS2_STREAM_ANY;
if (this->rs2::sensor::is<rs2::depth_sensor>())
{
stream_type = RS2_STREAM_DEPTH;
}
else if (this->rs2::sensor::is<rs2::color_sensor>())
{
stream_type = RS2_STREAM_COLOR;
}
assert(stream_type != RS2_STREAM_ANY);
int width = std::dynamic_pointer_cast<VideoProfilesManager>(_profile_managers[0])->getWidth(stream_type);
int height = std::dynamic_pointer_cast<VideoProfilesManager>(_profile_managers[0])->getHeight(stream_type);
bool update_roi_range(false);
if (_auto_exposure_roi.max_x > width)
{
_params.getParameters()->setParamValue(_auto_exposure_roi.max_x, width-1);
update_roi_range = true;
}
if (_auto_exposure_roi.max_y > height)
{
_params.getParameters()->setParamValue(_auto_exposure_roi.max_y, height-1);
update_roi_range = true;
}
if (update_roi_range)
{
registerAutoExposureROIOptions();
}
this->as<rs2::roi_sensor>().set_region_of_interest(_auto_exposure_roi);
}
catch(const std::runtime_error& e)
{
ROS_ERROR_STREAM(__FILE__ << ":" << __LINE__ << ":" << e.what());
}
}
void RosSensor::registerAutoExposureROIOptions()
{
std::string module_base_name(rs2_to_ros(get_info(RS2_CAMERA_INFO_NAME)));
if (this->rs2::sensor::is<rs2::roi_sensor>())
{
rs2_stream stream_type = RS2_STREAM_ANY;
if (this->rs2::sensor::is<rs2::depth_sensor>())
{
stream_type = RS2_STREAM_DEPTH;
}
else if (this->rs2::sensor::is<rs2::color_sensor>())
{
stream_type = RS2_STREAM_COLOR;
}
assert(stream_type != RS2_STREAM_ANY);
int width = std::dynamic_pointer_cast<VideoProfilesManager>(_profile_managers[0])->getWidth(stream_type);
int height = std::dynamic_pointer_cast<VideoProfilesManager>(_profile_managers[0])->getHeight(stream_type);
int max_x(width-1);
int max_y(height-1);
std::string module_name = create_graph_resource_name(module_base_name) +".auto_exposure_roi";
_auto_exposure_roi = {0, 0, max_x, max_y};
ROS_DEBUG_STREAM("Publish roi for " << module_name);
std::string param_name(module_name + ".left");
_params.getParameters()->setParamT(param_name, _auto_exposure_roi.min_x, [this](const rclcpp::Parameter&){set_sensor_auto_exposure_roi();});
_parameters_names.push_back(param_name);
param_name = std::string(module_name + ".right");
_params.getParameters()->setParamT(module_name + ".right", _auto_exposure_roi.max_x, [this](const rclcpp::Parameter&){set_sensor_auto_exposure_roi();});
_parameters_names.push_back(param_name);
param_name = std::string(module_name + ".top");
_params.getParameters()->setParamT(module_name + ".top", _auto_exposure_roi.min_y, [this](const rclcpp::Parameter&){set_sensor_auto_exposure_roi();});
_parameters_names.push_back(param_name);
param_name = std::string(module_name + ".bottom");
_params.getParameters()->setParamT(module_name + ".bottom", _auto_exposure_roi.max_y, [this](const rclcpp::Parameter&){set_sensor_auto_exposure_roi();});
_parameters_names.push_back(param_name);
}
}
void RosSensor::clearParameters()
{
for (auto profile_manager : _profile_managers)
{
profile_manager->clearParameters();
}
_params.clearParameters();
while ( !_parameters_names.empty() )
{
auto name = _parameters_names.back();
_params.getParameters()->removeParam(name);
_parameters_names.pop_back();
}
}

View File

@@ -0,0 +1,157 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ros_utils.h>
#include <algorithm>
#include <map>
#include <cctype>
namespace realsense2_camera
{
template <typename K, typename V>
std::ostream& operator<<(std::ostream& os, const std::map<K, V>& m)
{
os << '{';
for (const auto& kv : m)
{
os << " {" << kv.first << ": " << kv.second << '}';
}
os << " }";
return os;
}
/**
* Same as ros::names::isValidCharInName, but re-implemented here because it's not exposed.
*/
bool isValidCharInName(char c)
{
return std::isalnum(c) || c == '/' || c == '_';
}
/**
* ROS Graph Resource names don't allow spaces and hyphens (see http://wiki.ros.org/Names),
* so we replace them here with underscores.
*/
std::string rs2_to_ros(std::string rs2_name)
{
static std::map<std::string, std::string> libname_to_rosname = {
{"Infrared", "Infra"},
{"Stereo Module", "Depth Module"},
{"L500 Depth Sensor", "Depth Module"} ,
{"Pointcloud (SSE3)", "Pointcloud"},
{"Pointcloud (CUDA)", "Pointcloud"},
{"Align (SSE3)", "Align Depth"},
{"Align (CUDA)", "Align Depth"},
{"Depth to Disparity", "disparity filter"},
{"Depth Visualization", "colorizer"}
};
// std::cout << "rs2_name: " << rs2_name << std::endl;
auto name_iter = libname_to_rosname.find(rs2_name);
if (name_iter == libname_to_rosname.end())
return rs2_name;
else
return name_iter->second;
}
std::string ros_stream_to_string(rs2_stream stream)
{
return rs2_to_ros(rs2_stream_to_string(stream));
}
std::string create_graph_resource_name(const std::string &original_name)
{
std::string fixed_name = original_name;
std::transform(fixed_name.begin(), fixed_name.end(), fixed_name.begin(),
[](unsigned char c) { return std::tolower(c); });
std::replace_if(fixed_name.begin(), fixed_name.end(), [](const char c) { return !(realsense2_camera::isValidCharInName(c)); },
'_');
return fixed_name;
}
rs2_format string_to_rs2_format(std::string str)
{
rs2_format format = RS2_FORMAT_ANY;
for (int i = 0; i < RS2_FORMAT_COUNT; i++)
{
transform(str.begin(), str.end(), str.begin(), ::toupper);
if (str.compare(rs2_format_to_string((rs2_format)i)) == 0)
{
format = (rs2_format)i;
break;
}
}
return format;
}
static const rmw_qos_profile_t rmw_qos_profile_latched =
{
RMW_QOS_POLICY_HISTORY_KEEP_LAST,
1,
RMW_QOS_POLICY_RELIABILITY_RELIABLE,
RMW_QOS_POLICY_DURABILITY_TRANSIENT_LOCAL,
RMW_QOS_DEADLINE_DEFAULT,
RMW_QOS_LIFESPAN_DEFAULT,
RMW_QOS_POLICY_LIVELINESS_SYSTEM_DEFAULT,
RMW_QOS_LIVELINESS_LEASE_DURATION_DEFAULT,
false
};
const rmw_qos_profile_t qos_string_to_qos(std::string str)
{
if (str == "UNKNOWN")
return rmw_qos_profile_unknown;
if (str == "SYSTEM_DEFAULT")
return rmw_qos_profile_system_default;
if (str == "DEFAULT")
return rmw_qos_profile_default;
if (str == "PARAMETER_EVENTS")
return rmw_qos_profile_parameter_events;
if (str == "SERVICES_DEFAULT")
return rmw_qos_profile_services_default;
if (str == "PARAMETERS")
return rmw_qos_profile_parameters;
if (str == "SENSOR_DATA")
return rmw_qos_profile_sensor_data;
throw std::runtime_error("Unknown QoS string " + str);
}
const std::string list_available_qos_strings()
{
std::stringstream res;
res << "UNKNOWN" << "\n"
<< "SYSTEM_DEFAULT" << "\n"
<< "DEFAULT" << "\n"
<< "PARAMETER_EVENTS" << "\n"
<< "SERVICES_DEFAULT" << "\n"
<< "PARAMETERS" << "\n"
<< "SENSOR_DATA";
return res.str();
}
std::string vectorToJsonString(const std::vector<uint8_t>& vec) {
std::ostringstream oss;
oss << "[";
for (size_t i = 0; i < vec.size(); ++i) {
oss << static_cast<int>(vec[i]);
if (i < vec.size() - 1) {
oss << ",";
}
}
oss << "]";
return oss.str();
}
}

View File

@@ -0,0 +1,640 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "../include/base_realsense_node.h"
#include <image_publisher.h>
#include <fstream>
#include <rclcpp/qos.hpp>
#include "pointcloud_filter.h"
#include "align_depth_filter.h"
using namespace realsense2_camera;
using namespace rs2;
void BaseRealSenseNode::setup()
{
#if defined (ACCELERATE_GPU_WITH_GLSL)
initOpenGLProcessing(_accelerate_gpu_with_glsl);
_is_accelerate_gpu_with_glsl_changed = false;
#endif
setDynamicParams();
startDiagnosticsUpdater();
setAvailableSensors();
SetBaseStream();
setupFilters();
setCallbackFunctions();
monitoringProfileChanges();
updateSensors();
publishServices();
publishActions();
}
void BaseRealSenseNode::monitoringProfileChanges()
{
int time_interval(10000);
std::function<void()> func = [this, time_interval](){
std::unique_lock<std::mutex> lock(_profile_changes_mutex);
while(_is_running) {
_cv_mpc.wait_for(lock, std::chrono::milliseconds(time_interval),
[&]{return (!_is_running || _is_profile_changed
|| _is_align_depth_changed
#if defined (ACCELERATE_GPU_WITH_GLSL)
|| _is_accelerate_gpu_with_glsl_changed
#endif
);});
if (_is_running && (_is_profile_changed
|| _is_align_depth_changed
#if defined (ACCELERATE_GPU_WITH_GLSL)
|| _is_accelerate_gpu_with_glsl_changed
#endif
))
{
ROS_DEBUG("Profile has changed");
try
{
updateSensors();
}
catch(const std::exception& e)
{
ROS_ERROR_STREAM("Error updating the sensors: " << e.what());
}
_is_profile_changed = false;
_is_align_depth_changed = false;
#if defined (ACCELERATE_GPU_WITH_GLSL)
_is_accelerate_gpu_with_glsl_changed = false;
#endif
}
}
};
_monitoring_pc = std::make_shared<std::thread>(func);
}
void BaseRealSenseNode::setAvailableSensors()
{
if (!_json_file_path.empty())
{
if (_dev.is<rs400::advanced_mode>())
{
std::stringstream ss;
std::ifstream in(_json_file_path);
if (in.is_open())
{
ss << in.rdbuf();
std::string json_file_content = ss.str();
auto adv = _dev.as<rs400::advanced_mode>();
adv.load_json(json_file_content);
ROS_INFO_STREAM("JSON file is loaded! (" << _json_file_path << ")");
}
else
ROS_WARN_STREAM("JSON file provided doesn't exist! (" << _json_file_path << ")");
}
else
ROS_WARN("Device does not support advanced settings!");
}
else
ROS_INFO("JSON file is not provided");
auto device_name = _dev.get_info(RS2_CAMERA_INFO_NAME);
ROS_INFO_STREAM("Device Name: " << device_name);
auto serial_no = _dev.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER);
ROS_INFO_STREAM("Device Serial No: " << serial_no);
auto device_port_id = _dev.get_info(RS2_CAMERA_INFO_PHYSICAL_PORT);
ROS_INFO_STREAM("Device physical port: " << device_port_id);
auto fw_ver = _dev.get_info(RS2_CAMERA_INFO_FIRMWARE_VERSION);
ROS_INFO_STREAM("Device FW version: " << fw_ver);
auto pid = _dev.get_info(RS2_CAMERA_INFO_PRODUCT_ID);
ROS_INFO_STREAM("Device Product ID: 0x" << pid);
ROS_INFO_STREAM("Sync Mode: " << ((_sync_frames)?"On":"Off"));
std::function<void(rs2::frame)> frame_callback_function = [this](rs2::frame frame){
bool is_filter(_filters.end() != find_if(_filters.begin(), _filters.end(), [](std::shared_ptr<NamedFilter> f){return (f->is_enabled()); }));
if (_sync_frames || is_filter)
this->_asyncer.invoke(frame);
else
frame_callback(frame);
};
std::function<void(rs2::frame)> imu_callback_function = [this](rs2::frame frame){
imu_callback(frame);
if (_imu_sync_method != imu_sync_method::NONE)
imu_callback_sync(frame);
};
std::function<void(rs2::frame)> multiple_message_callback_function = [this](rs2::frame frame){multiple_message_callback(frame, _imu_sync_method);};
std::function<void()> update_sensor_func = [this](){
{
std::lock_guard<std::mutex> lock_guard(_profile_changes_mutex);
_is_profile_changed = true;
}
_cv_mpc.notify_one();
};
std::function<void()> hardware_reset_func = [this](){hardwareResetRequest();};
_dev_sensors = _dev.query_sensors();
for(auto&& sensor : _dev_sensors)
{
const std::string module_name(rs2_to_ros(sensor.get_info(RS2_CAMERA_INFO_NAME)));
std::unique_ptr<RosSensor> rosSensor;
if (sensor.is<rs2::depth_sensor>() ||
sensor.is<rs2::color_sensor>())
{
ROS_DEBUG_STREAM("Set " << module_name << " as VideoSensor.");
rosSensor = std::make_unique<RosSensor>(sensor, _parameters, frame_callback_function, update_sensor_func, hardware_reset_func, _diagnostics_updater, _logger, _use_intra_process, _dev.is<playback>());
}
else if (sensor.is<rs2::motion_sensor>())
{
ROS_DEBUG_STREAM("Set " << module_name << " as ImuSensor.");
rosSensor = std::make_unique<RosSensor>(sensor, _parameters, imu_callback_function, update_sensor_func, hardware_reset_func, _diagnostics_updater, _logger, false, _dev.is<playback>());
}
else
{
ROS_WARN_STREAM("Module Name \"" << module_name << "\" does not define a callback.");
continue;
}
_available_ros_sensors.push_back(std::move(rosSensor));
}
}
void BaseRealSenseNode::setCallbackFunctions()
{
_asyncer.start([this](rs2::frame f)
{
frame_callback(f);
});
}
void BaseRealSenseNode::stopPublishers(const std::vector<stream_profile>& profiles)
{
for (auto& profile : profiles)
{
stream_index_pair sip(profile.stream_type(), profile.stream_index());
if (profile.is<rs2::video_stream_profile>())
{
_image_publishers.erase(sip);
_info_publishers.erase(sip);
_depth_aligned_image_publishers.erase(sip);
_depth_aligned_info_publisher.erase(sip);
}
else if (profile.is<rs2::motion_stream_profile>())
{
_is_accel_enabled = false;
_is_gyro_enabled = false;
_synced_imu_publisher.reset();
_imu_publishers.erase(sip);
_imu_info_publishers.erase(sip);
}
_metadata_publishers.erase(sip);
_extrinsics_publishers.erase(sip);
if (_publish_tf)
{
eraseTransformMsgs(sip, profile);
}
}
}
void BaseRealSenseNode::startPublishers(const std::vector<stream_profile>& profiles, const RosSensor& sensor)
{
const std::string module_name(create_graph_resource_name(rs2_to_ros(sensor.get_info(RS2_CAMERA_INFO_NAME))));
for (auto& profile : profiles)
{
stream_index_pair sip(profile.stream_type(), profile.stream_index());
std::string stream_name(STREAM_NAME(sip));
rmw_qos_profile_t qos = sensor.getQOS(sip);
rmw_qos_profile_t info_qos = sensor.getInfoQOS(sip);
if (profile.is<rs2::video_stream_profile>())
{
if(profile.stream_type() == RS2_STREAM_COLOR)
_is_color_enabled = true;
else if (profile.stream_type() == RS2_STREAM_DEPTH)
_is_depth_enabled = true;
std::stringstream image_raw, camera_info;
bool rectified_image = false;
if (sensor.rs2::sensor::is<rs2::depth_sensor>())
rectified_image = true;
// adding "~/" to the topic name will add node namespace and node name to the topic
// see "Private Namespace Substitution Character" section on https://design.ros2.org/articles/topic_and_service_names.html
image_raw << "~/" << stream_name << "/image_" << ((rectified_image)?"rect_":"") << "raw";
camera_info << "~/" << stream_name << "/camera_info";
// We can use 2 types of publishers:
// 1. Native RCL publisher (supports intra-process zero-copy communication)
// 2. Image-transport package publisher (adds a compressed image topic if installed)
#ifdef USE_LIFECYCLE_NODE
// Always use `image_rcl_publisher` when lifecycle nodes are enabled
_image_publishers[sip] = std::make_shared<image_rcl_publisher>(_node, image_raw.str(), qos);
#else
// 🚀 Use intra-process if enabled, otherwise use image_transport
if (_use_intra_process)
{
_image_publishers[sip] = std::make_shared<image_rcl_publisher>(_node, image_raw.str(), qos);
}
else
{
_image_publishers[sip] = std::make_shared<image_transport_publisher>(_node, image_raw.str(), qos);
ROS_DEBUG_STREAM("image transport publisher was created for topic " << image_raw.str());
}
#endif
_info_publishers[sip] = _node.create_publisher<sensor_msgs::msg::CameraInfo>(camera_info.str(),
rclcpp::QoS(rclcpp::QoSInitialization::from_rmw(info_qos), info_qos));
if (_align_depth_filter->is_enabled() && (sip != DEPTH))
{
std::stringstream aligned_image_raw, aligned_camera_info;
aligned_image_raw << "~/" << "aligned_depth_to_" << stream_name << "/image_raw";
aligned_camera_info << "~/" << "aligned_depth_to_" << stream_name << "/camera_info";
std::string aligned_stream_name = "aligned_depth_to_" + stream_name;
// We can use 2 types of publishers:
// Native RCL publisher that support intra-process zero-copy comunication
// image-transport package publisher that add's a commpressed image topic if the package is installed
#ifdef USE_LIFECYCLE_NODE
// Always use `image_rcl_publisher` when lifecycle nodes are enabled
_depth_aligned_image_publishers[sip] = std::make_shared<image_rcl_publisher>(_node, aligned_image_raw.str(), qos);
#else
// Use intra-process if enabled, otherwise use image_transport
if (_use_intra_process)
{
_depth_aligned_image_publishers[sip] = std::make_shared<image_rcl_publisher>(_node, aligned_image_raw.str(), qos);
}
else
{
_depth_aligned_image_publishers[sip] = std::make_shared<image_transport_publisher>(_node, aligned_image_raw.str(), qos);
ROS_DEBUG_STREAM("image transport publisher was created for topic " << aligned_image_raw.str());
}
#endif
_depth_aligned_info_publisher[sip] = _node.create_publisher<sensor_msgs::msg::CameraInfo>(aligned_camera_info.str(),
rclcpp::QoS(rclcpp::QoSInitialization::from_rmw(info_qos), info_qos));
}
}
else if (profile.is<rs2::motion_stream_profile>())
{
if(profile.stream_type() == RS2_STREAM_ACCEL)
_is_accel_enabled = true;
else if (profile.stream_type() == RS2_STREAM_GYRO)
_is_gyro_enabled = true;
std::stringstream data_topic_name, info_topic_name;
data_topic_name << "~/" << stream_name << "/sample";
_imu_publishers[sip] = _node.create_publisher<sensor_msgs::msg::Imu>(data_topic_name.str(),
rclcpp::QoS(rclcpp::QoSInitialization::from_rmw(qos), qos));
// Publish Intrinsics:
info_topic_name << "~/" << stream_name << "/imu_info";
// IMU Info will have latched QoS, and it will publish its data only once during the ROS Node lifetime.
// intra-process do not support latched QoS, so we need to disable intra-process for this topic
rclcpp::PublisherOptionsWithAllocator<std::allocator<void>> options;
options.use_intra_process_comm = rclcpp::IntraProcessSetting::Disable;
_imu_info_publishers[sip] = _node.create_publisher<IMUInfo>(info_topic_name.str(),
rclcpp::QoS(rclcpp::QoSInitialization::from_rmw(rmw_qos_profile_latched), rmw_qos_profile_latched),
std::move(options));
IMUInfo info_msg = getImuInfo(profile);
_imu_info_publishers[sip]->publish(info_msg);
}
std::string topic_metadata("~/" + stream_name + "/metadata");
_metadata_publishers[sip] = _node.create_publisher<realsense2_camera_msgs::msg::Metadata>(topic_metadata,
rclcpp::QoS(rclcpp::QoSInitialization::from_rmw(info_qos), info_qos));
if (!((rs2::stream_profile)profile==(rs2::stream_profile)_base_profile))
{
// intra-process do not support latched QoS, so we need to disable intra-process for this topic
rclcpp::PublisherOptionsWithAllocator<std::allocator<void>> options;
options.use_intra_process_comm = rclcpp::IntraProcessSetting::Disable;
rmw_qos_profile_t extrinsics_qos = rmw_qos_profile_latched;
std::string topic_extrinsics("~/extrinsics/" + create_graph_resource_name(ros_stream_to_string(_base_profile.stream_type()) + "_to_" + stream_name));
_extrinsics_publishers[sip] = _node.create_publisher<realsense2_camera_msgs::msg::Extrinsics>(topic_extrinsics,
rclcpp::QoS(rclcpp::QoSInitialization::from_rmw(extrinsics_qos), extrinsics_qos), std::move(options));
}
}
if (_is_accel_enabled && _is_gyro_enabled && (_imu_sync_method > imu_sync_method::NONE))
{
rmw_qos_profile_t qos = _use_intra_process ? qos_string_to_qos(DEFAULT_QOS) : qos_string_to_qos(HID_QOS);
_synced_imu_publisher = std::make_shared<SyncedImuPublisher>(_node.create_publisher<sensor_msgs::msg::Imu>("~/imu",
rclcpp::QoS(rclcpp::QoSInitialization::from_rmw(qos), qos)));
}
}
void BaseRealSenseNode::startRGBDPublisherIfNeeded()
{
_rgbd_publisher.reset();
if(_enable_rgbd && !_rgbd_publisher)
{
if (_sync_frames && _is_color_enabled && _is_depth_enabled && _align_depth_filter->is_enabled())
{
rmw_qos_profile_t qos = _use_intra_process ? qos_string_to_qos(DEFAULT_QOS) : qos_string_to_qos(IMAGE_QOS);
// adding "~/" to the topic name will add node namespace and node name to the topic
// see "Private Namespace Substitution Character" section on https://design.ros2.org/articles/topic_and_service_names.html
_rgbd_publisher = _node.create_publisher<realsense2_camera_msgs::msg::RGBD>("~/rgbd",
rclcpp::QoS(rclcpp::QoSInitialization::from_rmw(qos), qos));
}
else {
ROS_ERROR("In order to get rgbd topic enabled, "\
"you should enable: color stream, depth stream, sync_mode and align_depth");
}
}
}
void BaseRealSenseNode::updateSensors()
{
std::lock_guard<std::mutex> lock_guard(_update_sensor_mutex);
try{
stopRequiredSensors();
#if defined (ACCELERATE_GPU_WITH_GLSL)
if (_is_accelerate_gpu_with_glsl_changed)
{
shutdownOpenGLProcessing();
initOpenGLProcessing(_accelerate_gpu_with_glsl);
}
#endif
startUpdatedSensors();
}
catch(const std::exception& ex)
{
ROS_ERROR_STREAM(__FILE__ << ":" << __LINE__ << ":" << "An exception has been thrown: " << ex.what());
throw;
}
catch(...)
{
ROS_ERROR_STREAM(__FILE__ << ":" << __LINE__ << ":" << "Unknown exception has occured!");
throw;
}
}
void BaseRealSenseNode::stopRequiredSensors()
{
try{
for(auto&& sensor : _available_ros_sensors)
{
std::string module_name(rs2_to_ros(sensor->get_info(RS2_CAMERA_INFO_NAME)));
// if active_profiles != wanted_profiles: stop sensor.
std::vector<stream_profile> wanted_profiles;
bool is_profile_changed(sensor->getUpdatedProfiles(wanted_profiles));
bool is_video_sensor = (sensor->is<rs2::depth_sensor>() || sensor->is<rs2::color_sensor>());
// do all updates if profile has been changed, or if the align depth filter or gpu acceleration status has been changed
// and we are on a video sensor. TODO: explore better options to monitor and update changes
// without resetting the whole sensors and topics.
if (is_profile_changed || (is_video_sensor && (_is_align_depth_changed
#if defined (ACCELERATE_GPU_WITH_GLSL)
|| _is_accelerate_gpu_with_glsl_changed
#endif
)))
{
std::vector<stream_profile> active_profiles = sensor->get_active_streams();
if (is_profile_changed
#if defined (ACCELERATE_GPU_WITH_GLSL)
|| _is_accelerate_gpu_with_glsl_changed
#endif
)
{
// Start/stop sensors only if profile or gpu acceleration status was changed
// No need to start/stop sensors if align_depth was changed
ROS_INFO_STREAM("Stopping Sensor: " << module_name);
sensor->stop();
}
stopPublishers(active_profiles);
}
}
}
catch(const std::exception& ex)
{
ROS_ERROR_STREAM(__FILE__ << ":" << __LINE__ << ":" << "An exception has been thrown: " << ex.what());
throw;
}
catch(...)
{
ROS_ERROR_STREAM(__FILE__ << ":" << __LINE__ << ":" << "Unknown exception has occured!");
throw;
}
}
void BaseRealSenseNode::startUpdatedSensors()
{
try{
for(auto&& sensor : _available_ros_sensors)
{
std::string module_name(rs2_to_ros(sensor->get_info(RS2_CAMERA_INFO_NAME)));
// if active_profiles != wanted_profiles: stop sensor.
std::vector<stream_profile> wanted_profiles;
bool is_profile_changed(sensor->getUpdatedProfiles(wanted_profiles));
bool is_video_sensor = (sensor->is<rs2::depth_sensor>() || sensor->is<rs2::color_sensor>());
if (is_profile_changed || (is_video_sensor && (_is_align_depth_changed
#if defined (ACCELERATE_GPU_WITH_GLSL)
|| _is_accelerate_gpu_with_glsl_changed
#endif
)))
{
if (!wanted_profiles.empty())
{
startPublishers(wanted_profiles, *sensor);
updateProfilesStreamCalibData(wanted_profiles);
if (_publish_tf)
{
std::lock_guard<std::mutex> lock_guard(_publish_tf_mutex);
for (auto &profile : wanted_profiles)
{
calcAndAppendTransformMsgs(profile, _base_profile);
}
}
if (is_profile_changed
#if defined (ACCELERATE_GPU_WITH_GLSL)
|| _is_accelerate_gpu_with_glsl_changed
#endif
)
{
// Start/stop sensors only if profile or gpu acceleration was changed
// No need to start/stop sensors if align_depth was changed
ROS_INFO_STREAM("Starting Sensor: " << module_name);
sensor->start(wanted_profiles);
}
if (sensor->rs2::sensor::is<rs2::depth_sensor>())
{
_depth_scale_meters = sensor->as<rs2::depth_sensor>().get_depth_scale();
}
}
}
}
if (_publish_tf)
{
std::lock_guard<std::mutex> lock_guard(_publish_tf_mutex);
publishStaticTransforms();
}
startRGBDPublisherIfNeeded();
}
catch(const std::exception& ex)
{
ROS_ERROR_STREAM(__FILE__ << ":" << __LINE__ << ":" << "An exception has been thrown: " << ex.what());
throw;
}
catch(...)
{
ROS_ERROR_STREAM(__FILE__ << ":" << __LINE__ << ":" << "Unknown exception has occured!");
throw;
}
}
void BaseRealSenseNode::publishServices()
{
// adding "~/" to the service name will add node namespace and node name to the service
// see "Private Namespace Substitution Character" section on https://design.ros2.org/articles/topic_and_service_names.html
_reset_srv = _node.create_service<std_srvs::srv::Empty>(
"~/hw_reset",
[&](const std_srvs::srv::Empty::Request::SharedPtr req,
std_srvs::srv::Empty::Response::SharedPtr res)
{handleHWReset(req, res);});
_device_info_srv = _node.create_service<realsense2_camera_msgs::srv::DeviceInfo>(
"~/device_info",
[&](const realsense2_camera_msgs::srv::DeviceInfo::Request::SharedPtr req,
realsense2_camera_msgs::srv::DeviceInfo::Response::SharedPtr res)
{getDeviceInfo(req, res);});
_calib_config_read_srv = _node.create_service<realsense2_camera_msgs::srv::CalibConfigRead>(
"~/calib_config_read",
[&](const realsense2_camera_msgs::srv::CalibConfigRead::Request::SharedPtr req,
realsense2_camera_msgs::srv::CalibConfigRead::Response::SharedPtr res)
{CalibConfigReadService(req, res);});
_calib_config_write_srv = _node.create_service<realsense2_camera_msgs::srv::CalibConfigWrite>(
"~/calib_config_write",
[&](const realsense2_camera_msgs::srv::CalibConfigWrite::Request::SharedPtr req,
realsense2_camera_msgs::srv::CalibConfigWrite::Response::SharedPtr res)
{CalibConfigWriteService(req, res);});
}
void BaseRealSenseNode::publishActions()
{
using namespace std::placeholders;
_triggered_calibration_action_server = rclcpp_action::create_server<TriggeredCalibration>(
_node.get_node_base_interface(),
_node.get_node_clock_interface(),
_node.get_node_logging_interface(),
_node.get_node_waitables_interface(),
"~/triggered_calibration",
std::bind(&BaseRealSenseNode::TriggeredCalibrationHandleGoal, this, _1, _2),
std::bind(&BaseRealSenseNode::TriggeredCalibrationHandleCancel, this, _1),
std::bind(&BaseRealSenseNode::TriggeredCalibrationHandleAccepted, this, _1));
}
void BaseRealSenseNode::handleHWReset(const std_srvs::srv::Empty::Request::SharedPtr req,
const std_srvs::srv::Empty::Response::SharedPtr res)
{
(void)req;
(void)res;
ROS_INFO_STREAM("Reset requested through service call");
if (_dev)
{
try
{
for(auto&& sensor : _available_ros_sensors)
{
std::string module_name(rs2_to_ros(sensor->get_info(RS2_CAMERA_INFO_NAME)));
ROS_INFO_STREAM("Stopping Sensor: " << module_name);
sensor->stop();
}
ROS_INFO("Resetting device...");
_dev.hardware_reset();
}
catch(const std::exception& ex)
{
ROS_WARN_STREAM("An exception has been thrown: " << __FILE__ << ":" << __LINE__ << ":" << ex.what());
}
}
}
void BaseRealSenseNode::getDeviceInfo(const realsense2_camera_msgs::srv::DeviceInfo::Request::SharedPtr,
realsense2_camera_msgs::srv::DeviceInfo::Response::SharedPtr res)
{
res->device_name = _dev.supports(RS2_CAMERA_INFO_NAME) ? create_graph_resource_name(_dev.get_info(RS2_CAMERA_INFO_NAME)) : "";
res->serial_number = _dev.supports(RS2_CAMERA_INFO_SERIAL_NUMBER) ? _dev.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER) : "";
res->firmware_version = _dev.supports(RS2_CAMERA_INFO_FIRMWARE_VERSION) ? _dev.get_info(RS2_CAMERA_INFO_FIRMWARE_VERSION) : "";
res->usb_type_descriptor = _dev.supports(RS2_CAMERA_INFO_USB_TYPE_DESCRIPTOR) ? _dev.get_info(RS2_CAMERA_INFO_USB_TYPE_DESCRIPTOR) : "";
res->firmware_update_id = _dev.supports(RS2_CAMERA_INFO_FIRMWARE_UPDATE_ID) ? _dev.get_info(RS2_CAMERA_INFO_FIRMWARE_UPDATE_ID) : "";
std::stringstream sensors_names;
for(auto&& sensor : _available_ros_sensors)
{
sensors_names << create_graph_resource_name(rs2_to_ros(sensor->get_info(RS2_CAMERA_INFO_NAME))) << ",";
}
res->sensors = sensors_names.str().substr(0, sensors_names.str().size()-1);
res->physical_port = _dev.supports(RS2_CAMERA_INFO_PHYSICAL_PORT) ? _dev.get_info(RS2_CAMERA_INFO_PHYSICAL_PORT) : "";
}
void BaseRealSenseNode::CalibConfigReadService(const realsense2_camera_msgs::srv::CalibConfigRead::Request::SharedPtr req,
realsense2_camera_msgs::srv::CalibConfigRead::Response::SharedPtr res){
try
{
(void)req; // silence unused parameter warning
res->calib_config = _dev.as<rs2::auto_calibrated_device>().get_calibration_config();
res->success = true;
}
catch (const std::exception &e)
{
res->success = false;
res->error_message = std::string("Exception occurred: ") + e.what();
}
}
void BaseRealSenseNode::CalibConfigWriteService(const realsense2_camera_msgs::srv::CalibConfigWrite::Request::SharedPtr req,
realsense2_camera_msgs::srv::CalibConfigWrite::Response::SharedPtr res){
try
{
_dev.as<rs2::auto_calibrated_device>().set_calibration_config(req->calib_config);
res->success = true;
}
catch (const std::exception &e)
{
res->success = false;
res->error_message = std::string("Exception occurred: ") + e.what();
}
}

View File

@@ -0,0 +1,295 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <sensor_params.h>
#include <constants.h>
#include <iomanip>
namespace realsense2_camera
{
bool is_checkbox(rs2::options sensor, rs2_option option)
{
rs2::option_range op_range = sensor.get_option_range(option);
return op_range.max == 1.0f &&
op_range.min == 0.0f &&
op_range.step == 1.0f;
}
bool is_enum_option(rs2::options sensor, rs2_option option)
{
static const int MAX_ENUM_OPTION_VALUES(100);
static const float EPSILON(0.05);
rs2::option_range op_range = sensor.get_option_range(option);
if (abs((op_range.step - 1)) > EPSILON || (op_range.max > MAX_ENUM_OPTION_VALUES)) return false;
for (auto i = op_range.min; i <= op_range.max; i += op_range.step)
{
if (sensor.get_option_value_description(option, i) == nullptr)
continue;
return true;
}
return false;
}
bool is_int_option(rs2::options sensor, rs2_option option)
{
rs2::option_range op_range = sensor.get_option_range(option);
return (op_range.step == 1.0);
}
std::map<std::string, int> get_enum_method(rs2::options sensor, rs2_option option)
{
std::map<std::string, int> dict; // An enum to set size
if (is_enum_option(sensor, option))
{
rs2::option_range op_range = sensor.get_option_range(option);
const auto op_range_min = int(op_range.min);
const auto op_range_max = int(op_range.max);
const auto op_range_step = int(op_range.step);
for (auto val = op_range_min; val <= op_range_max; val += op_range_step)
{
if (sensor.get_option_value_description(option, val) == nullptr)
continue;
dict[sensor.get_option_value_description(option, val)] = val;
}
}
return dict;
}
template<class T>
void param_set_option(rs2::options sensor, rs2_option option, const rclcpp::Parameter& parameter)
{
sensor.set_option(option, parameter.get_value<T>());
}
void SensorParams::clearParameters()
{
while ( !_parameters_names.empty() )
{
auto name = _parameters_names.back();
_parameters->removeParam(name);
_parameters_names.pop_back();
}
}
SensorParams::~SensorParams()
{
clearParameters();
}
double SensorParams::float_to_double(float val, rs2::option_range option_range)
{
// casting from float to double, while trying to increase precision
// in order to be comply with the FloatingPointRange configurations
// and to succeed the rclcpp NodeParameters::declare_parameter call
// for more info, see https://github.com/ros2/rclcpp/blob/rolling/rclcpp/src/rclcpp/node_interfaces/node_parameters.cpp
// rcl_interfaces::msg::SetParametersResult and __are_doubles_equal methods
float range = option_range.max - option_range.min;
int steps = range / option_range.step;
double step = static_cast<double>(range) / steps;
double rounded_div = std::round((val - option_range.min) / step);
return option_range.min + rounded_div * step;
}
//for more info, see https://docs.ros2.org/galactic/api/rcl_interfaces/msg/ParameterDescriptor.html
template<class T>
rcl_interfaces::msg::ParameterDescriptor SensorParams::get_parameter_descriptor(const std::string& option_name,
rs2::option_range option_range, T option_value, const std::string& option_description, const std::string& description_addition)
{
rcl_interfaces::msg::ParameterDescriptor parameter_descriptor;
// set descriptor name to be the option name in order to get more detailed warnings/errors from rclcpp
parameter_descriptor.name = option_name;
// create description stream, and start filling it.
std::stringstream description_stream;
description_stream << option_description << std::endl << description_addition;
if(description_addition.length() > 0)
description_stream << std::endl;
if (std::is_same<T, double>::value)
{
parameter_descriptor.type = rclcpp::PARAMETER_DOUBLE;
// set option range values (floats)
rcl_interfaces::msg::FloatingPointRange range;
range.from_value = float_to_double(option_range.min, option_range);
range.to_value = float_to_double(option_range.max, option_range);
range.step = float_to_double(option_range.step, option_range);
parameter_descriptor.floating_point_range.push_back(range);
// add the default value of this option to the description stream
description_stream << "Default value: " << static_cast<double>(option_range.def);
ROS_DEBUG_STREAM("Declare: DOUBLE::" << option_name << "=" << option_value << " [" << option_range.min << ", " << option_range.max << "]");
}
else // T is bool or int
{
// set option range values (integers) (suitable for boolean ranges also: [0,1])
rcl_interfaces::msg::IntegerRange range;
range.from_value = static_cast<int64_t>(option_range.min);
range.to_value = static_cast<int64_t>(option_range.max);
range.step = static_cast<uint64_t>(option_range.step);
parameter_descriptor.integer_range.push_back(range);
if (std::is_same<T, bool>::value)
{
parameter_descriptor.type = rclcpp::PARAMETER_BOOL;
// add the default value of this option to the description stream
description_stream << "Default value: " << (option_range.def == 0 ? "False" : "True");
ROS_DEBUG_STREAM("Declare: BOOL::" << option_name << "=" << (option_range.def == 0 ? "False" : "True") << " [" << option_range.min << ", " << option_range.max << "]");
}
else
{
parameter_descriptor.type = rclcpp::PARAMETER_INTEGER;
// add the default value of this option to the description stream
description_stream << "Default value: " << static_cast<int64_t>(option_range.def);
ROS_DEBUG_STREAM("Declare: INT::" << option_name << "=" << option_value << " [" << option_range.min << ", " << option_range.max << "]");
}
}
parameter_descriptor.description = description_stream.str();
return parameter_descriptor;
}
template<class T>
void SensorParams::set_parameter(rs2::options sensor, rs2_option option, const std::string& module_name, const std::string& description_addition)
{
// set the option name, for example: depth_module.exposure
const std::string option_name(module_name + "." + create_graph_resource_name(rs2_option_to_string(option)));
// get option current value, and option range values from the sensor
T option_value;
rs2::option_range option_range;
try
{
option_range = sensor.get_option_range(option);
float current_val = sensor.get_option(option);
if(std::is_same<T, double>::value)
{
option_value = float_to_double(current_val, option_range);
}
else
{
option_value = static_cast<T>(current_val);
}
}
catch(const std::exception& ex)
{
ROS_ERROR_STREAM("An error has occurred while calling sensor for: " << option_name << ":" << ex.what());
return;
}
// get parameter descriptor for this option
rcl_interfaces::msg::ParameterDescriptor parameter_descriptor =
get_parameter_descriptor(option_name, option_range, option_value, sensor.get_option_description(option), description_addition);
T new_val;
try
{
new_val = (_parameters->setParam<T>(option_name, option_value, [option, sensor](const rclcpp::Parameter& parameter)
{
param_set_option<T>(sensor, option, parameter);
}, parameter_descriptor));
_parameters_names.push_back(option_name);
}
catch(const rclcpp::exceptions::InvalidParameterValueException& e)
{
ROS_WARN_STREAM("Failed to set parameter:" << option_name << " = " << option_value << "[" << option_range.min << ", " << option_range.max << "]\n" << e.what());
return;
}
if (new_val != option_value)
{
try
{
sensor.set_option(option, new_val);
}
catch(std::exception& e)
{
ROS_WARN_STREAM("Failed to set value to sensor: " << option_name << " = " << option_value << "[" << option_range.min << ", " << option_range.max << "]\n" << e.what());
}
}
}
void SensorParams::registerDynamicOptions(rs2::options sensor, const std::string& module_name)
{
for (auto i = 0; i < RS2_OPTION_COUNT; i++)
{
rs2_option option = static_cast<rs2_option>(i);
const std::string option_name(module_name + "." + create_graph_resource_name(rs2_option_to_string(option)));
if (!sensor.supports(option) || sensor.is_option_read_only(option))
{
continue;
}
if (is_checkbox(sensor, option))
{
set_parameter<bool>(sensor, option, module_name);
continue;
}
const auto enum_dict = get_enum_method(sensor, option);
if (enum_dict.empty())
{
if (is_int_option(sensor, option))
{
set_parameter<int>(sensor, option, module_name);
}
else
{
if (i == RS2_OPTION_DEPTH_UNITS)
{
rs2::option_range op_range = sensor.get_option_range(option);
if (ROS_DEPTH_SCALE >= op_range.min && ROS_DEPTH_SCALE <= op_range.max)
{
try
{
sensor.set_option(option, ROS_DEPTH_SCALE);
}
catch(const std::exception& e)
{
std::cout << "Failed to set value: " << e.what() << std::endl;
}
op_range.min = ROS_DEPTH_SCALE;
op_range.max = ROS_DEPTH_SCALE;
}
}
else
{
set_parameter<double>(sensor, option, module_name);
}
}
}
else
{
std::vector<std::pair<std::string, int> > enum_vec;
size_t longest_desc(0);
for (auto enum_iter : enum_dict)
{
enum_vec.push_back(std::make_pair(enum_iter.first, enum_iter.second));
longest_desc = std::max(longest_desc, enum_iter.first.size());
}
sort(enum_vec.begin(), enum_vec.end(), [](std::pair<std::string, int> e1, std::pair<std::string, int> e2){return (e1.second < e2.second);});
std::stringstream description;
for (auto vec_iter : enum_vec)
{
description << std::setw(longest_desc+6) << std::left << vec_iter.first << " : " << vec_iter.second << std::endl;
}
set_parameter<int>(sensor, option, module_name, description.str());
}
}
}
}

View File

@@ -0,0 +1,306 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "../include/base_realsense_node.h"
#include <geometry_msgs/msg/vector3_stamped.hpp>
using namespace realsense2_camera;
void BaseRealSenseNode::restartStaticTransformBroadcaster()
{
// Since the _static_tf_broadcaster is a latched topic, the old transforms will
// be alive even if the sensors are dynamically disabled. So, reset the
// broadcaster everytime and publish the transforms of enabled sensors alone.
if (_static_tf_broadcaster)
{
_static_tf_broadcaster.reset();
}
// intra-process do not support latched QoS, so we need to disable intra-process for this topic
rclcpp::PublisherOptionsWithAllocator<std::allocator<void>> options;
options.use_intra_process_comm = rclcpp::IntraProcessSetting::Disable;
_static_tf_broadcaster = std::make_shared<tf2_ros::StaticTransformBroadcaster>(_node,
tf2_ros::StaticBroadcasterQoS(),
std::move(options));
}
void BaseRealSenseNode::append_static_tf_msg(const rclcpp::Time& t,
const float3& trans,
const tf2::Quaternion& q,
const std::string& from,
const std::string& to)
{
geometry_msgs::msg::TransformStamped msg;
msg.header.stamp = t;
msg.header.frame_id = from;
msg.child_frame_id = to;
// Convert translation vector (x,y,z) (taken from camera extrinsics)
// from optical cooridnates to ros coordinates
msg.transform.translation.x = trans.z;
msg.transform.translation.y = -trans.x;
msg.transform.translation.z = -trans.y;
msg.transform.rotation.x = q.getX();
msg.transform.rotation.y = q.getY();
msg.transform.rotation.z = q.getZ();
msg.transform.rotation.w = q.getW();
_static_tf_msgs.push_back(msg);
}
struct compare_tf_ids
{
const std::string& frame_id;
const std::string& child_frame_id;
// Struct Constructor
compare_tf_ids(const std::string& frame_id, const std::string& child_frame_id) :
frame_id(frame_id), child_frame_id(child_frame_id) {}
bool operator()(const geometry_msgs::msg::TransformStamped &static_tf_msg) const
{
return (static_tf_msg.header.frame_id == frame_id &&
static_tf_msg.child_frame_id == child_frame_id);
}
};
void BaseRealSenseNode::erase_static_tf_msg(const std::string& frame_id,
const std::string& child_frame_id)
{
std::vector<geometry_msgs::msg::TransformStamped>::iterator it;
// Find whether there is any TF with given 'frame_id' and 'child_frame_id'
it = std::find_if(_static_tf_msgs.begin(), _static_tf_msgs.end(),
compare_tf_ids(frame_id, child_frame_id));
// If found, erase that specific TF
if (it != std::end(_static_tf_msgs))
{
_static_tf_msgs.erase(it);
}
}
void BaseRealSenseNode::publishExtrinsicsTopic(const stream_index_pair& sip, const rs2_extrinsics& ex){
// Publish extrinsics topic:
Extrinsics msg = rsExtrinsicsToMsg(ex);
if (_extrinsics_publishers.find(sip) != _extrinsics_publishers.end())
{
_extrinsics_publishers[sip]->publish(msg);
}
}
tf2::Quaternion BaseRealSenseNode::rotationMatrixToQuaternion(const float rotation[9]) const
{
Eigen::Matrix3f m;
// We need to be careful about the order, as RS2 rotation matrix is
// column-major, while Eigen::Matrix3f expects row-major.
m << rotation[0], rotation[3], rotation[6],
rotation[1], rotation[4], rotation[7],
rotation[2], rotation[5], rotation[8];
Eigen::Quaternionf q(m);
return tf2::Quaternion(q.x(), q.y(), q.z(), q.w());
}
void BaseRealSenseNode::calcAndAppendTransformMsgs(const rs2::stream_profile& profile,
const rs2::stream_profile& base_profile)
{
// Transform base to stream
stream_index_pair sip(profile.stream_type(), profile.stream_index());
// rotation quaternion from ROS CS to Optical CS
tf2::Quaternion quaternion_optical;
quaternion_optical.setRPY(-M_PI / 2, 0, -M_PI/2); // R,P,Y rotations over the original axes
// zero rotation quaternion, used for IMU
tf2::Quaternion zero_rot_quaternions;
zero_rot_quaternions.setRPY(0, 0, 0);
// zero translation, used for moving from ROS frame to its correspending Optical frame
float3 zero_trans{0, 0, 0};
rclcpp::Time transform_ts_ = _node.now();
// extrinsic from A to B is the position of A relative to B
// TF from A to B is the transformation to be done on A to get to B
// so, we need to calculate extrinsics in two opposite ways, one for extrinsic topic
// and the second is for transformation topic (TF)
rs2_extrinsics normal_ex; // used to for extrinsics topic
rs2_extrinsics tf_ex; // used for TF
try
{
normal_ex = base_profile.get_extrinsics_to(profile);
tf_ex = profile.get_extrinsics_to(base_profile);
}
catch (std::exception& e)
{
if (!strcmp(e.what(), "Requested extrinsics are not available!"))
{
ROS_WARN_STREAM("(" << rs2_stream_to_string(base_profile.stream_type()) << ", "
<< base_profile.stream_index() << ") -> ("
<< rs2_stream_to_string(profile.stream_type()) << ", "
<< profile.stream_index() << "): "
<< e.what()
<< " : using unity as default.");
normal_ex = rs2_extrinsics({{1, 0, 0, 0, 1, 0, 0, 0, 1}, {0,0,0}});
tf_ex = normal_ex;
}
else
{
throw e;
}
}
// publish normal extrinsics e.g. /camera/extrinsics/depth_to_color
publishExtrinsicsTopic(sip, normal_ex);
// Representing Rotations with Quaternions
// see https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Using_quaternions_as_rotations for reference
// Q defines a quaternion rotation from <base profile CS> to <current profile CS>
// for example, rotation from depth to infra2
auto Q = rotationMatrixToQuaternion(tf_ex.rotation);
float3 trans{tf_ex.translation[0], tf_ex.translation[1], tf_ex.translation[2]};
// Rotation order is important (start from left to right):
// 1. quaternion_optical.inverse() [ROS -> Optical]
// 2. Q [Optical -> Optical] (usually no rotation, but might be very small rotations between sensors, like from Depth to Color)
// 3. quaternion_optical [Optical -> ROS]
// We do all these products since we want to finish in ROS CS, while Q is a rotation from optical to optical,
// and cant be used directly in ROS TF without this combination
Q = quaternion_optical * Q * quaternion_optical.inverse();
// The translation vector is in the Optical CS, and we convert it to ROS CS inside append_static_tf_msg
append_static_tf_msg(transform_ts_, trans, Q, _base_frame_id, FRAME_ID(sip));
// Transform stream frame from ROS CS to optical CS and publish it
// We are using zero translation vector here, since no translation between frame and optical_frame, but only rotation
append_static_tf_msg(transform_ts_, zero_trans, quaternion_optical, FRAME_ID(sip), OPTICAL_FRAME_ID(sip));
if (profile.is<rs2::video_stream_profile>() &&
profile.stream_type() != RS2_STREAM_DEPTH &&
profile.stream_index() == 1)
{
append_static_tf_msg(transform_ts_, trans, Q, _base_frame_id, ALIGNED_DEPTH_TO_FRAME_ID(sip));
append_static_tf_msg(transform_ts_, zero_trans, quaternion_optical,
ALIGNED_DEPTH_TO_FRAME_ID(sip), OPTICAL_FRAME_ID(sip));
}
if ((_imu_sync_method > imu_sync_method::NONE) && (profile.stream_type() == RS2_STREAM_GYRO))
{
append_static_tf_msg(transform_ts_, zero_trans, zero_rot_quaternions, FRAME_ID(sip), IMU_FRAME_ID);
append_static_tf_msg(transform_ts_, zero_trans, quaternion_optical, IMU_FRAME_ID, IMU_OPTICAL_FRAME_ID);
}
}
void BaseRealSenseNode::eraseTransformMsgs(const stream_index_pair& sip, const rs2::stream_profile& profile)
{
std::lock_guard<std::mutex> lock_guard(_publish_tf_mutex);
erase_static_tf_msg(_base_frame_id, FRAME_ID(sip));
erase_static_tf_msg(FRAME_ID(sip), OPTICAL_FRAME_ID(sip));
if (profile.is<rs2::video_stream_profile>() &&
profile.stream_type() != RS2_STREAM_DEPTH &&
profile.stream_index() == 1)
{
erase_static_tf_msg(_base_frame_id, ALIGNED_DEPTH_TO_FRAME_ID(sip));
erase_static_tf_msg(ALIGNED_DEPTH_TO_FRAME_ID(sip), OPTICAL_FRAME_ID(sip));
}
if ((_imu_sync_method > imu_sync_method::NONE) && (profile.stream_type() == RS2_STREAM_GYRO))
{
erase_static_tf_msg(FRAME_ID(sip), IMU_FRAME_ID);
erase_static_tf_msg(IMU_FRAME_ID, IMU_OPTICAL_FRAME_ID);
}
}
void BaseRealSenseNode::publishStaticTransforms()
{
restartStaticTransformBroadcaster();
_static_tf_broadcaster->sendTransform(_static_tf_msgs);
}
void BaseRealSenseNode::publishDynamicTransforms()
{
if (!_dynamic_tf_broadcaster)
{
_dynamic_tf_broadcaster = std::make_shared<tf2_ros::TransformBroadcaster>(_node);
}
// Publish transforms for the cameras
std::unique_lock<std::mutex> lock(_publish_dynamic_tf_mutex);
while (rclcpp::ok() && _is_running && _tf_publish_rate > 0)
{
_cv_tf.wait_for(lock, std::chrono::milliseconds((int)(1000.0/_tf_publish_rate)),
[&]{return (!(_is_running && _tf_publish_rate > 0));});
{
std::lock_guard<std::mutex> lock_guard(_publish_tf_mutex);
rclcpp::Time t = _node.now();
try
{
for(auto& msg : _static_tf_msgs)
msg.header.stamp = t;
_dynamic_tf_broadcaster->sendTransform(_static_tf_msgs);
}
catch(const std::exception& e)
{
ROS_ERROR_STREAM("Error publishing dynamic transforms: " << e.what());
}
}
}
}
void BaseRealSenseNode::startDynamicTf()
{
if (!_publish_tf)
{
ROS_WARN("Since the param 'publish_tf' is set to 'false',"
"the value set on the param 'tf_publish_rate' won't have any effect");
return;
}
if (_tf_publish_rate > 0)
{
// Start publishing dynamic TF, if the param 'tf_publish_rate' is set to > 0.0 Hz
ROS_WARN("Publishing dynamic camera transforms (/tf) at %g Hz", _tf_publish_rate);
if (!_tf_t)
{
_tf_t = std::make_shared<std::thread>([this]()
{
publishDynamicTransforms();
});
}
}
else
{
if (_tf_t && _tf_t->joinable())
{
// Stop publishing dynamic TF by resetting the '_tf_t' thread and '_dynamic_tf_broadcaster'
_tf_t->join();
_tf_t.reset();
_dynamic_tf_broadcaster.reset();
ROS_WARN("Stopped publishing dynamic camera transforms (/tf)");
}
else
{
// '_tf_t' thread is not running currently. i.e, dynamic tf is not getting broadcasted.
ROS_WARN("Currently not publishing dynamic camera transforms (/tf). "
"To start publishing it, set the 'tf_publish_rate' param to > 0.0 Hz ");
}
}
}

View File

@@ -0,0 +1,189 @@
# Testing realsense2_camera
The test infra for realsense2_camera uses both gtest and pytest. gtest is typically used here for testing at the unit level and pytest for integration level testing. Please be aware that the README assumes that the ROS2 version used is Humble or later, as the launch_pytest package used here is not available in prior versions
## Test using gtest
The default folder for the test cpp files is realsense2_camera/test. A test template gtest_template.cpp is available in the same folder.
### Adding a new test
If the user wants to add a new test, a copy of gtest_template.cpp as the starting point. Please name the file as gtest_`testname`.cpp format so that the CMake detects the file and add as a new test. Please be aware that, multiple tests can be added into one file, the gtest_template.cpp has 2 tests, test1 and test2.
### Adding a new test folder
It is recommended to use the test folder itself for storing all the cpp tests. However, if the user wants to add a different folder for a set of tests, please ensure that the file name format mentioned above is followed. The folder path is added to realsense_camera/CMakeLists.txt as below for the build to detect the tests within.
```
find_package(ament_cmake_gtest REQUIRED)
set(_gtest_folders
test #<-- default folder for the gtest sources
new_folder_for_test_but_why #<-- new folder name is added
)
```
## Test using pytest
The default folder for the test py files is realsense2_camera/test. Two test template files test_launch_template.py and test_integration_template.py are available in the same folder for reference.
### Add a new test
To add a new test, the user can create a copy of the test_launch_template.py or test_integration_template.py and start from there. Please name the file in the format test_`testname`.py so that the CMake detects the file and add as a new test. Please be aware that, multiple tests can be added into one file, the test_integration_template.py itself has more than one test. The marker `@pytest.mark.launch` is used to specify the test entry point.
The test_launch_template.py uses the rs_launch.py to start the camera node, so this template can be used for testing the rs_launch.py together with the rs node.
The test_integration_template.py gives a better control for testing, it uses few util functions and base test class from pytest_rs_utils.py located at the same location. However, it doesn't use the rs_launch.py, it creates the node directly instead.
The test_integration_template.py has two types of tests, one has a function "test_using_function". If the user wants to have a better control over the launch context for any specific test scenarios, this can be used. Both the function based test and class based tests use a default launch configuration from the utils. It's recommended to modify the camera name to a unique one in the parameters itself so that there are not clashes between tests.
It is expected that the class based test is used as the test format for most of the usecases. The class based test inherits from `pytest_rs_utils.RsTestBaseClass` and it has three steps, namely: init, run_test and process_data. Unless for the basic tests, the user will have to override the process_data function and check if the data received from the topics are as expected. Also, if the user doesn't want the base class to modify the data, use 'store_raw_data':True in the theme definition. Please see the test_integration_template.py for reference.
An assert command can be used to indicate if the test failed or passed. Please see the template for more info.
### Adding a new test folder
It is recommended to use the test folder itself for storing all the pytests. However, if the user wants to add a different folder for a set of tests, please ensure that the file name format mentioned above is followed. The folder path should be added to realsense_camera/CMakeLists.txt as below for the infra to detect the new test folder and the tests within.
```
find_package(ament_cmake_pytest REQUIRED)
set(_pytest_folders
test #default test folder
test/templates
test/rosbag
new_folder_for_pytest #<-- new folder #but please be aware that the utils functions are in test/utils folder,
#so if the template is used, change the include path also accordingly
)
```
### Grouping of tests
The pytests can be grouped using markers. These markers can be used to run a group of tests. However, "colcon test" command doesn't pass a custom marker using (--pytest-args -m `marker_name`) to the pytest internally. This is because, the ament_cmake that works as a bridge between colcon and pytest doesn't pass the pytest arguments to pytest. So until this is fixed, pytest command has to be used directly for running a group of tests. Please see the next session for the commands to run a group of py tests.
The grouping is specified by adding a marker just before the test declaration. In the test_integration_template.py `rosbag` is specified as a marker specify tests that use rosbag file. This is achieved by adding "@pytest.mark.rosbag" to the begining of the test. So when the pytest parses for test, it detects the marker for the test. If this marker is selected or none of the markers are specified, the test will be added to the list, else will be listed as a deselected test.
It is recommended to use markers such as d457, rosbag, or d435i etc to differentiate the tests so that it's easier to run a group of tests in a machine that has the required hardware.
## Building and running tests
### Build steps
The command used for building the tests along with the node:
colcon build
The test statements in CMakeLists.txt are protected by BUILD_TESTING macro. So in case, the tests are not being built, then it could be that the macro are disabled by default.
Note: The below command helps view the steps taken by the build command.
colcon build --event-handlers console_direct+
### Prerequisites for running the tests
1. The template tests require the rosbag files from librealsense.intel.comi, the following commands download them:
```
bag_filename="https://librealsense.intel.com/rs-tests/TestData/outdoors_1color.bag";
wget $bag_filename -P "records/"
bag_filename="https://librealsense.intel.com/rs-tests/D435i_Depth_and_IMU_Stands_still.bag";
wget $bag_filename -P "records/"
```
2. The tests use the environment variable ROSBAG_FILE_PATH as the directory that contains the rosbag files
```
export ROSBAG_FILE_PATH=/path/to/directory/of/rosbag
```
3. Install launch_pytest package. For humble:
```
sudo apt install ros-$ROS_DISTRO-launch-pytest
```
4. As in the case of all the packages, the install script of realsesnse2_camera has to be run.
```
. install/local_setup.bash
```
5. If the tests are run on a machine that has the RS board connected or the tests are using rosbag files, then its better to let the ROS search for the nodes in the local machine, this will be faster and less prone to interference and hence unexpected errors. It can be achieved using the following environment variable.
```
export ROS_DOMAIN_ID=1
```
So, all put together:
```
sudo apt install ros-$ROS_DISTRO-launch-pytest
bag_filename="https://librealsense.intel.com/rs-tests/TestData/outdoors_1color.bag";
wget $bag_filename -P "records/"
bag_filename="https://librealsense.intel.com/rs-tests/D435i_Depth_and_IMU_Stands_still.bag";
wget $bag_filename -P "records/"
export ROSBAG_FILE_PATH=$PWD/records
. install/local_setup.bash
export ROS_DOMAIN_ID=1
```
### Running the tests using colcon
All the tests can be run using the below command:
colcon test --packages-select realsense2_camera
This command will invoke both gtest and pytest infra and run all the tests specified in the files mentioned above. Since the test results are stored in build/realsense2_camera/test_results folder, it's good to clean this up after running the tests with a new test added/removed.
The same command with console_direct can be used for more info on failing tests, as below:
colcon test --packages-select realsense2_camera --event-handlers console_direct+
The test results can be viewed using the command:
colcon test-result --all --test-result-base build/realsense2_camera/test_results/
The xml files mentioned by the command can be directly opened also.
### Running pytests directly
Note :
1. All the commands for test execution has to be executed from realsense-ros folder. For example: If the ROS2 workspace was created based on Step 3 [Option2] of [this](https://github.com/IntelRealSense/realsense-ros/blob/ros2-master/README.md#installation).
Then, the path to execute the tests would be ~/ros2_ws/src/realsense-ros.
cd ~/ros2_ws/src/realsense-ros
2. Please setup below required environment variables. If not, Please prefix them for every test execution.
export PYTHONPATH=$PYTHONPATH:$PWD/realsense2_camera/test/utils:$PWD/realsense2_camera//launch:$PWD/realsense2_camera//scripts
User can run all the tests in a pytest file directly using the below command:
pytest-3 -s realsense2_camera/test/test_integration_template.py
All the pytests in a test folder can be directly run using the below command:
pytest-3 -s realsense2_camera/test/
### Running a group of pytests
As mentioned above, a set of pytests that are grouped using markers can be run using the pytest command. The below command runs all the pytests in realsense2_camera/test folder that has the marker rosbag:
pytest-3 -s -m rosbag realsense2_camera/test/
### Running a single pytest
The below command finds the test with the name test_static_tf_1 in realsense2_camera/test folder run:
pytest-3 -s -k test_static_tf_1 realsense2_camera/test/
### Marking tests as regression tests
Some of the tests, especially the live tests with multiple runs, for e.g., all profile tests (test_camera_all_profile_tests.py) take a long time. Such tests are marked are skipped with condition so that "colcon test" skips it.
If a user wants to add a test to this conditional skip, user can add by adding a marker as below.
@pytest.mark.skipif (os.getenv('RS_ROS_REGRESSION', "not found") == "not found",reason="Regression is not enabled, define RS_ROS_REGRESSION")
### Running skipped regression tests
1. Set the environment variable RS_ROS_REGRESSION as 1 and run the "colcon test"
2. pytest example:
RS_ROS_REGRESSION=1 PYTHONPATH=$PYTHONPATH:$PWD/realsense2_camera/test/utils:$PWD/realsense2_camera//launch:$PWD/realsense2_camera//scripts pytest-3 -s realsense2_camera/test/live_camera/test_camera_aligned_tests.py -k test_camera_align_depth_color_all -m d415
## Points to be noted while writing pytests
The tests that are in one file are normally run in parallel, there could also be changes in the pytest plugin. So if there are multiple tests in one file, the system capacity can influence the test execution. It's recomended to have 3-4 tests in file, more than that can affect the test results due to delays.
### Passing/changing parameters
The parameters passed while creating the node can be initialized individually for each test, please see the test_parameterized_template example for reference. The default values are taken from rs_launch.py and the passed parameters are used for overriding the default values. The parameters that can be dynamically modified can be changed using the param interface provided. However, the function `create_service_client_ifs()` has to be called to create this interface. Please see the test_d455_basic_tests.py for reference. There are specific functions to change the string, integer and bool parameters, the utils can be extended if any more types are needed.
### Difference in setting the bool parameters
There is a difference between setting the default values of bool parameters and setting them dynamically.
The bool test params are assinged withn quotes as below.
test_params_all_profiles_d455 = {
'camera_name': 'D455',
'device_type': 'D455',
'enable_accel':"True",
'enable_gyro':"True",
'unite_imu_method':1,
}
However the function that implements the setting of bool parameter dynamically takes the python bool datatype. For example:
self.set_bool_param('enable_accel', False)
### Adding 'service call' client interface
1. Create a client for respective service call in method `create_service_client_ifs()`, refer client creation for service call `device_info` with service type `DeviceInfo`.
2. Create a `get_` or `set_` method for each service call, refer `get_deviceinfo` in pytest_rs_utils.py
3. Use the `get_`, `set_` service calls as used in test_camera_service_call.py

View File

@@ -0,0 +1,33 @@
// Copyright 2023 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
TEST(realsense2_camera, test1)
{
std::cout << "Running test1...";
ASSERT_EQ(4, 2 + 2);
}
TEST(realsense2_camera, test2)
{
std::cout << "Running test2...";
ASSERT_EQ(4, 2 + 2);
}
int main(int argc, char** argv)
{
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@@ -0,0 +1,266 @@
# Copyright 2024 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, subprocess, re, getopt, time
start_time = time.time()
running_on_ci = False
if 'WORKSPACE' in os.environ:
#Path for ROS-CI on Jenkins
ws_rosci = os.environ['WORKSPACE']
sys.path.append( os.path.join( ws_rosci, 'lrs/unit-tests/py' ))
running_on_ci = True
else:
#For running this script locally
#Extract the root where both realsense-ros and librealsense are cloned
ws_local = '/'.join(os.path.abspath( __file__ ).split( os.path.sep )[0:-5])
#expected to have 'librealsense' repo in parallel to 'realsense-ros'
assert os.path.exists( os.path.join(ws_local, 'librealsense')), f" 'librealsense' doesn't exist at {ws_local} "
sys.path.append( os.path.join( ws_local, 'librealsense/unit-tests/py' ))
#logs are stored @ ./realsense2_camera/test/logs
logdir = os.path.join( '/'.join(os.path.abspath( __file__ ).split( os.path.sep )[0:-2]), 'logs')
dir_live_tests = os.path.dirname(__file__)
from rspy import log, file
regex = None
hub_reset = False
handle = None
test_ran = False
device_set = list()
def usage():
ourname = os.path.basename( sys.argv[0] )
print( 'Syntax: ' + ourname + ' [options] ' )
print( 'Options:' )
print( ' -h, --help Usage help' )
print( ' -r, --regex Run all tests whose name matches the following regular expression' )
print( ' e.g.: --regex test_camera_imu; -r d415_basic')
print( ' --device <> Run only on the specified device(s); list of devices separated by ',', No white spaces' )
print( ' e.g.: --device=d455,d435i,d585 (or) --device d455,d435i,d585 ')
print( ' Note: if --device option not used, tests run on all connected devices ')
sys.exit( 2 )
def command(dev_name, test=None):
cmd = ['pytest-3']
cmd += ['-s']
cmd += ['-m', ''.join(dev_name)]
if test:
cmd += ['-k', f'{test}']
cmd += [''.join(dir_live_tests)]
cmd += ['--debug']
cmd += [f'--junit-xml={logdir}/{dev_name.upper()}_pytest.xml']
return cmd
def run_test(cmd, test=None, dev_name=None, stdout=None, append =False):
handle = None
try:
if test:
stdout = stdout + os.sep + str(dev_name.upper()) + '_' + test + '.log'
else:
stdout = stdout + os.sep + str(dev_name.upper()) + '_' + 'full.log'
if stdout is None:
sys.stdout.flush()
elif stdout and stdout != subprocess.PIPE:
if append:
handle = open( stdout, "a" )
handle.write(
"\n----------TEST-SEPARATOR----------\n\n" )
handle.flush()
else:
handle = open( stdout, "w" )
result = subprocess.run( cmd,
stdout=handle,
stderr=subprocess.STDOUT,
universal_newlines=True,
timeout=200,
check=True )
if not result.returncode:
log.i("---Test Passed---")
except Exception as e:
log.e("---Test Failed---")
log.w( "Error Exception:\n ",e )
finally:
if handle:
handle.close()
junit_xml_parsing(f'{dev_name.upper()}_pytest.xml')
def junit_xml_parsing(xml_file):
'''
- remove redundant hierarchy from testcase 'classname', and 'name' attributes \
- running pytest-3 w/ --junit-xml={logdir}/{dev_name}_pytest.xml results in classname w/ \
too long path wich is redundant. \
- this helps in better reporting structure of test results in jenkins
'''
import xml.etree.ElementTree as ET
global logdir
if not os.path.isfile( os.path.join(logdir, f'{xml_file}' )):
log.e(f'{xml_file} not found, test resutls can\'t be generated')
else:
tree = ET.parse(os.path.join(logdir,xml_file))
root = tree.getroot()
for testsuite in root.findall('testsuite'):
for testcase in testsuite.findall('testcase'):
testcase.set('classname', testcase.attrib['classname'].split('.')[-2])
testcase.set('name', re.sub('launch_.*parameters','',testcase.attrib['name']))
new_xml = xml_file.split('.')[0]
tree.write(f'{logdir}/{new_xml}_refined.xml')
def build_device_port_mapping(possible_ports):
"""
Build a mapping of devices to YKUSH hub ports by enabling each port and querying connected devices.
"""
from rspy import devices
mapping = {}
# Turn off all ports first
for port in possible_ports:
subprocess.run(f'ykushcmd ykush3 -d {port}', shell=True)
time.sleep(2.5)
for port in possible_ports:
log.i(f"Checking YKUSH port {port}...")
subprocess.run(f'ykushcmd ykush3 -u {port}', shell=True)
time.sleep(5.0)
devices.query(hub_reset=True)
for device in devices._device_by_sn.values():
key = device.name.upper()
if key not in mapping:
mapping[key] = port
log.i(f"Detected device: {device.name} ({device._sn}) on port {port}")
subprocess.run(f'ykushcmd ykush3 -d {port}', shell=True)
return mapping
def disable_all_ports():
"""
Disable all ports on the YKUSH hub.
"""
log.i("Disabling all ports...")
subprocess.run(f'ykushcmd ykush3 -d a', shell=True)
time.sleep(2.5) # Wait for the system to unregister devices
def enable_port_for_device(device, port):
"""
Enable the port for the specified device.
"""
if port:
log.i(f"Enabling port {port} for device {device.upper()}")
subprocess.run(f'ykushcmd ykush3 -u {port}', shell=True)
time.sleep(5.0) # Wait for re-enumeration
else:
log.e(f"No port mapping found for device {device.upper()}")
def run_tests_for_device(device, testname):
"""
Run tests for a specific device by enabling its port and executing the test command.
"""
# Define which ports are connected to YKUSH (e.g., 1, 2, 3...)
possible_ports = [1, 2, 3]
device_port_mapping = build_device_port_mapping(possible_ports)
log.i("Device to port mapping:", device_port_mapping)
disable_all_ports() # Disable all ports first
port = device_port_mapping.get(device.upper())
enable_port_for_device(device, port) # Enable the port for the target device
cmd = command(device.lower(), testname)
run_test(cmd, testname, device, stdout=logdir, append=False)
def find_devices_run_tests():
"""
Main function to find devices and run tests on them.
"""
from rspy import devices
global logdir, device_set, _device_by_sn
max_retry = 3
try:
os.makedirs(logdir, exist_ok=True)
# Update dict '_device_by_sn' from devices module of rspy
while max_retry and not devices._device_by_sn:
subprocess.run('ykushcmd ykush3 --reset', shell=True)
time.sleep(2.0)
devices.query(hub_reset=hub_reset)
max_retry -= 1
if not devices._device_by_sn:
assert False, 'No Camera device detected!'
else:
connected_devices = [device.name for device in devices._device_by_sn.values()]
log.i('Connected devices:', connected_devices)
testname = regex if regex else None
if device_set:
# Loop through user-specified devices and run tests only on them
devices_not_found = []
for device in device_set:
if device.upper() in connected_devices:
log.i('Running tests on device:', device)
run_tests_for_device(device, testname)
else:
log.e('Skipping test run on device:', device, ', -- NOT found')
devices_not_found.append(device)
assert len(devices_not_found) == 0, f'Devices not found: {devices_not_found}'
else:
# Loop through all connected devices and run all tests
for device in connected_devices:
log.i('Running tests on device:', device)
run_tests_for_device(device, testname)
finally:
if devices.hub and devices.hub.is_connected():
devices.hub.disable_ports()
devices.wait_until_all_ports_disabled()
devices.hub.disconnect()
if running_on_ci:
log.i("Log path- \"Build Artifacts\":/ros2/realsense_camera/test/logs ")
else:
log.i("log path:", logdir)
run_time = time.time() - start_time
log.d("server took", run_time, "seconds")
if __name__ == '__main__':
try:
opts, args = getopt.getopt( sys.argv[1:], 'hr:', longopts=['help', 'regex=', 'device=' ] )
except getopt.GetoptError as err:
log.e( err )
usage()
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
elif opt in ('-r', '--regex'):
regex = arg
elif opt == '--device':
device_set = arg.split(',')
find_devices_run_tests()
sys.exit( 0 )

View File

@@ -0,0 +1,281 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import itertools
import pytest
import rclpy
from sensor_msgs.msg import Image as msg_Image
from sensor_msgs.msg import Imu as msg_Imu
from sensor_msgs.msg import PointCloud2 as msg_PointCloud2
import numpy as np
sys.path.append(os.path.abspath(os.path.dirname(__file__)+"/../utils"))
import pytest_rs_utils
from pytest_rs_utils import launch_descr_with_parameters
from pytest_rs_utils import delayed_launch_descr_with_parameters
from pytest_rs_utils import get_rosbag_file_path
from pytest_rs_utils import get_node_heirarchy
import pytest_live_camera_utils
from rclpy.parameter import Parameter
from rcl_interfaces.msg import ParameterValue
from rcl_interfaces.srv import SetParameters, GetParameters, ListParameters
from pytest_live_camera_utils import debug_print
test_params_align_depth_color_d455 = {
'camera_name': 'D455',
'device_type': 'D455',
'enable_color':'true',
'enable_depth':'true',
'depth_module.depth_profile':'848x480x30',
'rgb_camera.color_profile':'640x480x30',
'align_depth.enable':'true'
}
test_params_align_depth_color_d415 = {
'camera_name': 'D415',
'device_type': 'D415',
'enable_color':'true',
'enable_depth':'true',
'depth_module.depth_profile':'848x480x30',
'rgb_camera.color_profile':'640x480x30',
'align_depth.enable':'true'
}
'''
This test was implemented as a template to set the parameters and run the test.
This directory is not added to the CMakeLists.txt so as to avoid the colcon failure in the
machines that don't have the D455 connected.
1. Only a subset of parameter types are implemented in py_rs_utils, it has to be extended for others
2. After setting the param, rclpy.spin_once may be needed.Test passes even without this though.
'''
@pytest.mark.parametrize("launch_descr_with_parameters",[
pytest.param(test_params_align_depth_color_d455, marks=pytest.mark.d455),
pytest.param(test_params_align_depth_color_d415, marks=pytest.mark.d415),
#pytest.param(test_params_align_depth_color_d435, marks=pytest.mark.d435),
]
,indirect=True)
@pytest.mark.launch(fixture=launch_descr_with_parameters)
class TestCamera_AlignDepthColor(pytest_rs_utils.RsTestBaseClass):
def test_camera_align_depth_color(self,launch_descr_with_parameters):
params = launch_descr_with_parameters[1]
if pytest_live_camera_utils.check_if_camera_connected(params['device_type']) == False:
print("Device not found? : " + params['device_type'])
assert False
return
themes = [
{'topic':get_node_heirarchy(params)+'/color/image_raw',
'msg_type':msg_Image,
'expected_data_chunks':1,
'width':640,
'height':480,
},
{'topic':get_node_heirarchy(params)+'/depth/image_rect_raw',
'msg_type':msg_Image,
'expected_data_chunks':1,
'width':848,
'height':480,
},
{'topic':get_node_heirarchy(params)+'/aligned_depth_to_color/image_raw',
'msg_type':msg_Image,
'expected_data_chunks':1,
'width':640,
'height':480,
},
]
try:
'''
initialize, run and check the data
'''
print("Starting camera test...")
self.init_test("RsTest"+params['camera_name'])
self.wait_for_node(params['camera_name'])
self.create_service_client_ifs(get_node_heirarchy(params))
ret = self.run_test(themes)
assert ret[0], ret[1]
assert self.process_data(themes)
self.set_string_param('rgb_camera.color_profile', '1280x720x30')
self.set_bool_param('enable_color', True)
themes[0]['width'] = 1280
themes[0]['height'] = 720
themes[2]['width'] = 1280
themes[2]['height'] = 720
ret = self.run_test(themes)
assert ret[0], ret[1]
assert self.process_data(themes)
finally:
#this step is important because the test will fail next time
pytest_rs_utils.kill_realsense2_camera_node()
self.shutdown()
test_params_all_profiles_d455 = {
'camera_name': 'D455',
'device_type': 'D455',
'enable_color':'true',
'enable_depth':'true',
'depth_module.depth_profile':'848x480x30',
'rgb_camera.color_profile':'640x480x30',
'align_depth.enable':'true'
}
test_params_all_profiles_d415 = {
'camera_name': 'D415',
'device_type': 'D415',
'enable_color':'true',
'enable_depth':'true',
'depth_module.depth_profile':'848x480x30',
'rgb_camera.color_profile':'640x480x30',
'align_depth.enable':'true'
}
test_params_all_profiles_d435i = {
'camera_name': 'D435I',
'device_type': 'D435I',
'enable_color':'true',
'enable_depth':'true',
'depth_module.depth_profile':'848x480x30',
'rgb_camera.color_profile':'640x480x30',
'align_depth.enable':'true'
}
'''
This test was implemented as a template to set the parameters and run the test.
This directory is not added to the CMakeLists.txt so as to avoid the colcon failure in the
machines that don't have the D455 connected.
1. Only a subset of parameter types are implemented in py_rs_utils, it has to be extended for others
2. After setting the param, rclpy.spin_once may be needed.Test passes even without this though.
'''
@pytest.mark.skipif (os.getenv('RS_ROS_REGRESSION', "not found") == "not found",reason="Regression is not enabled, define RS_ROS_REGRESSION")
@pytest.mark.parametrize("launch_descr_with_parameters", [
pytest.param(test_params_all_profiles_d455, marks=pytest.mark.d455),
pytest.param(test_params_all_profiles_d415, marks=pytest.mark.d415),
pytest.param(test_params_all_profiles_d435i, marks=pytest.mark.d435i),]
,indirect=True)
@pytest.mark.launch(fixture=launch_descr_with_parameters)
class TestCamera_AllAlignDepthColor(pytest_rs_utils.RsTestBaseClass):
def test_camera_all_align_depth_color(self,launch_descr_with_parameters):
skipped_tests = []
failed_tests = []
num_passed = 0
num_failed = 0
params = launch_descr_with_parameters[1]
if pytest_live_camera_utils.check_if_camera_connected(params['device_type']) == False:
print("Device not found? : " + params['device_type'])
assert False
return
themes = [
{'topic':get_node_heirarchy(params)+'/color/image_raw',
'msg_type':msg_Image,
'expected_data_chunks':1,
'width':640,
'height':480,
},
{'topic':get_node_heirarchy(params)+'/depth/image_rect_raw',
'msg_type':msg_Image,
'expected_data_chunks':1,
'width':848,
'height':480,
},
{'topic':get_node_heirarchy(params)+'/aligned_depth_to_color/image_raw',
'msg_type':msg_Image,
'expected_data_chunks':1,
'width':640,
'height':480,
},
]
try:
'''
initialize, run and check the data
'''
self.init_test("RsTest"+params['camera_name'])
self.spin_for_time(wait_time=1.0)
cap = pytest_live_camera_utils.get_camera_capabilities(params['device_type'])
if cap == None:
debug_print("Device not found? : " + params['device_type'])
return
self.create_service_client_ifs(get_node_heirarchy(params))
color_profiles = set([i[1] for i in cap["color_profile"] if i[2] == "RGB8"])
depth_profiles = set([i[1] for i in cap["depth_profile"] if i[0] == "Depth"])
for color_profile in color_profiles:
for depth_profile in depth_profiles:
if depth_profile == color_profile:
continue
print("Testing the alignment of Depth:", depth_profile, " and Color:", color_profile)
self.set_bool_param('enable_color', False)
self.set_bool_param('enable_color', False)
self.set_bool_param('align_depth.enable', False)
themes[0]['width'] = themes[2]['width'] = int(color_profile.split('x')[0])
themes[0]['height'] = themes[2]['height'] = int(color_profile.split('x')[1])
themes[1]['width'] = int(depth_profile.split('x')[0])
themes[1]['height'] = int(depth_profile.split('x')[1])
dfps = int(depth_profile.split('x')[2])
cfps = int(color_profile.split('x')[2])
if dfps > cfps:
fps = cfps
else:
fps = dfps
timeout=100.0/fps
#for the changes to take effect
self.spin_for_time(wait_time=timeout/20)
self.set_string_param('rgb_camera.color_profile', color_profile)
self.set_string_param('depth_module.depth_profile', depth_profile)
self.set_bool_param('enable_color', True)
self.set_bool_param('enable_color', True)
self.set_bool_param('align_depth.enable', True)
#for the changes to take effect
self.spin_for_time(wait_time=timeout/20)
try:
ret = self.run_test(themes, timeout=timeout)
assert ret[0], ret[1]
assert self.process_data(themes), "".join(str(depth_profile) + " " + str(color_profile)) + " failed"
num_passed += 1
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("Test failed")
print("Tested the alignment of Depth:", depth_profile, " and Color:", color_profile, " with timeout ", timeout)
print(e)
print(exc_type, fname, exc_tb.tb_lineno)
num_failed += 1
failed_tests.append("".join(str(depth_profile) + " " + str(color_profile)))
finally:
#this step is important because the test will fail next time
pytest_rs_utils.kill_realsense2_camera_node()
self.shutdown()
print("Tests passed " + str(num_passed))
print("Tests skipped " + str(len(skipped_tests)))
if len(skipped_tests):
debug_print("\nSkipped tests:" + params['device_type'])
debug_print("\n".join(skipped_tests))
print("Tests failed " + str(num_failed))
if len(failed_tests):
print("\nFailed tests:" + params['device_type'])
print("\n".join(failed_tests))
def disable_all_params(self):
self.set_bool_param('enable_color', False)
self.set_bool_param('enable_depth', False)
self.set_bool_param('enable_infra', False)
self.set_bool_param('enable_infra1', False)
self.set_bool_param('enable_infra2', False)

View File

@@ -0,0 +1,269 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import itertools
import pytest
import rclpy
from sensor_msgs.msg import Image as msg_Image
from sensor_msgs.msg import Imu as msg_Imu
from sensor_msgs.msg import PointCloud2 as msg_PointCloud2
import numpy as np
sys.path.append(os.path.abspath(os.path.dirname(__file__)+"/../utils"))
import pytest_rs_utils
from pytest_rs_utils import launch_descr_with_parameters
from pytest_rs_utils import delayed_launch_descr_with_parameters
from pytest_rs_utils import get_rosbag_file_path
from pytest_rs_utils import get_node_heirarchy
import pytest_live_camera_utils
from rclpy.parameter import Parameter
from rcl_interfaces.msg import ParameterValue
from rcl_interfaces.srv import SetParameters, GetParameters, ListParameters
from pytest_live_camera_utils import debug_print
def check_if_skip_test(profile, format):
'''
if profile == 'Color':
if "BGRA8" == format:
return True
if "RGBA8" == format:
return True
if "Y8" == format:
return True
elif profile == 'Depth':
if "Z16" == format:
return True
el
if profile == 'Infrared':
if "Y8" == format:
return True
if "Y16" == format:
return True
if "BGRA8" == format:
return True
if "RGBA8" == format:
return True
if "Y10BPACK" == format:
return True
if "UYVY" == format:
return True
if "BGR8" == format:
return True
if "RGB8" == format:
return True
if "RAW10" == format:
return True
elif profile == 'Infrared1':
if "Y8" ==format:
return True
if "Y16" ==format:
return True
if "Y10BPACK" == format:
return True
if "UYVY" ==format:
return True
if "BGR8" ==format:
return True
if "RGB8" ==format:
return True
if "RAW10" ==format:
return True
if profile == 'Infrared2':
if "Y8" == format:
return True
if "Y16" == format:
return True
if "Y10BPACK" == format:
return True
if "UYVY" == format:
return True
if "BGR8" == format:
return True
if "RGB8" == format:
return True
if "RAW10" == format:
return True
'''
if profile == 'Infrared':
if "Y8" == format:
return True
if "Y16" == format:
return True
if profile == 'Infrared1':
if "Y8" ==format:
return True
if "Y16" ==format:
return True
if profile == 'Infrared2':
if "Y8" == format:
return True
if "Y16" == format:
return True
return False
test_params_all_profiles_d455 = {
'camera_name': 'D455',
'device_type': 'D455',
}
test_params_all_profiles_d415 = {
'camera_name': 'D415',
'device_type': 'D415',
}
test_params_all_profiles_d435i = {
'camera_name': 'D435I',
'device_type': 'D435I',
}
'''
This test was implemented as a template to set the parameters and run the test.
This directory is not added to the CMakeLists.txt so as to avoid the colcon failure in the
machines that don't have the D455 connected.
1. Only a subset of parameter types are implemented in py_rs_utils, it has to be extended for others
2. After setting the param, rclpy.spin_once may be needed.Test passes even without this though.
'''
@pytest.mark.skipif (os.getenv('RS_ROS_REGRESSION', "not found") == "not found",reason="Regression is not enabled, define RS_ROS_REGRESSION")
@pytest.mark.parametrize("launch_descr_with_parameters", [
pytest.param(test_params_all_profiles_d455, marks=pytest.mark.d455),
pytest.param(test_params_all_profiles_d415, marks=pytest.mark.d415),
pytest.param(test_params_all_profiles_d435i, marks=pytest.mark.d435i),]
,indirect=True)
@pytest.mark.launch(fixture=launch_descr_with_parameters)
class TestLiveCamera_Change_Resolution(pytest_rs_utils.RsTestBaseClass):
def test_LiveCamera_Change_Resolution(self,launch_descr_with_parameters):
skipped_tests = []
failed_tests = []
num_passed = 0
num_failed = 0
params = launch_descr_with_parameters[1]
if pytest_live_camera_utils.check_if_camera_connected(params['device_type']) == False:
print("Device not found? : " + params['device_type'])
assert False
return
themes = [{'topic':get_node_heirarchy(params)+'/color/image_raw', 'msg_type':msg_Image,'expected_data_chunks':1}]
config = pytest_live_camera_utils.get_profile_config(get_node_heirarchy(params))
try:
'''
initialize, run and check the data
'''
self.init_test("RsTest"+params['camera_name'])
self.spin_for_time(wait_time=1.0)
cap = pytest_live_camera_utils.get_camera_capabilities(params['device_type'])
if cap == None:
debug_print("Device not found? : " + params['device_type'])
return
self.create_service_client_ifs(get_node_heirarchy(params))
config["Color"]["default_profile1"],config["Color"]["default_profile2"] = pytest_live_camera_utils.get_default_profiles(cap["color_profile"], "Color")
config["Depth"]["default_profile1"],config["Depth"]["default_profile2"] = pytest_live_camera_utils.get_default_profiles(cap["depth_profile"], "Depth")
config["Infrared"]["default_profile1"],config["Infrared"]["default_profile2"] = pytest_live_camera_utils.get_default_profiles(cap["depth_profile"], "Infrared")
config["Infrared1"]["default_profile1"],config["Infrared1"]["default_profile2"] = pytest_live_camera_utils.get_default_profiles(cap["depth_profile"], "Infrared1")
config["Infrared2"]["default_profile1"],config["Infrared2"]["default_profile2"] = pytest_live_camera_utils.get_default_profiles(cap["depth_profile"], "Infrared2")
for key in cap["color_profile"]:
profile_type = key[0]
profile = key[1]
format = key[2]
if check_if_skip_test(profile_type, format):
skipped_tests.append(" ".join(key))
continue
print("Testing " + " ".join(key))
themes[0]['topic'] = config[profile_type]['topic']
themes[0]['width'] = int(profile.split('x')[0])
themes[0]['height'] = int(profile.split('x')[1])
if themes[0]['width'] == int(config[profile_type]["default_profile2"].split('x')[0]):
self.set_string_param(config[profile_type]["profile"], config[profile_type]["default_profile1"])
else:
self.set_string_param(config[profile_type]["profile"], config[profile_type]["default_profile2"])
self.set_bool_param(config[profile_type]["param"], True)
self.disable_all_params()
self.spin_for_time(wait_time=0.2)
self.set_string_param(config[profile_type]["profile"], profile)
self.set_string_param(config[profile_type]["format"], format)
self.set_bool_param(config[profile_type]["param"], True)
try:
ret = self.run_test(themes)
assert ret[0], ret[1]
assert self.process_data(themes), " ".join(key) + " failed"
num_passed += 1
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("Test failed")
print(e)
print(exc_type, fname, exc_tb.tb_lineno)
num_failed += 1
failed_tests.append(" ".join(key))
debug_print("Color tests completed")
for key in cap["depth_profile"]:
profile_type = key[0]
profile = key[1]
format = key[2]
if check_if_skip_test(profile_type, format):
skipped_tests.append(" ".join(key))
continue
print("Testing " + " ".join(key))
themes[0]['topic'] = config[profile_type]['topic']
themes[0]['width'] = int(profile.split('x')[0])
themes[0]['height'] = int(profile.split('x')[1])
if themes[0]['width'] == int(config[profile_type]["default_profile2"].split('x')[0]):
self.set_string_param(config[profile_type]["profile"], config[profile_type]["default_profile1"])
else:
self.set_string_param(config[profile_type]["profile"], config[profile_type]["default_profile2"])
self.set_bool_param(config[profile_type]["param"], True)
self.disable_all_params()
self.spin_for_time(wait_time=0.2)
self.set_string_param(config[profile_type]["profile"], profile)
self.set_string_param(config[profile_type]["format"], format)
self.set_bool_param(config[profile_type]["param"], True)
try:
ret = self.run_test(themes)
assert ret[0], ret[1]
assert self.process_data(themes), " ".join(key) + " failed"
num_passed += 1
except Exception as e:
print("Test failed")
print(e)
num_failed += 1
failed_tests.append(" ".join(key))
debug_print("Depth tests completed")
finally:
#this step is important because the test will fail next time
pytest_rs_utils.kill_realsense2_camera_node()
self.shutdown()
print("Tests passed " + str(num_passed))
print("Tests skipped " + str(len(skipped_tests)))
if len(skipped_tests):
debug_print("\nSkipped tests:" + params['device_type'])
debug_print("\n".join(skipped_tests))
print("Tests failed " + str(num_failed))
if len(failed_tests):
print("\nFailed tests:" + params['device_type'])
print("\n".join(failed_tests))
def disable_all_params(self):
self.set_bool_param('enable_color', False)
self.set_bool_param('enable_depth', False)
self.set_bool_param('enable_infra', False)
self.set_bool_param('enable_infra1', False)
self.set_bool_param('enable_infra2', False)

View File

@@ -0,0 +1,136 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import itertools
import pytest
import rclpy
from sensor_msgs.msg import Image as msg_Image
from sensor_msgs.msg import Imu as msg_Imu
from sensor_msgs.msg import PointCloud2 as msg_PointCloud2
import numpy as np
sys.path.append(os.path.abspath(os.path.dirname(__file__)+"/../utils"))
import pytest_rs_utils
import pytest_live_camera_utils
from pytest_rs_utils import launch_descr_with_parameters
from pytest_rs_utils import get_rosbag_file_path
from pytest_rs_utils import get_node_heirarchy
from rclpy.parameter import Parameter
from rcl_interfaces.msg import ParameterValue
from rcl_interfaces.srv import SetParameters, GetParameters, ListParameters
test_params_test_fps_d455 = {
'camera_name': 'D455',
'device_type': 'D455',
}
test_params_test_fps_d415 = {
'camera_name': 'D415',
'device_type': 'D415',
}
test_profiles ={
'D455':{
'depth_test_profiles':['640x480x5','640x480x15', '640x480x30', '640x480x90'],
'color_test_profiles':['640x480x5','640x480x15', '640x480x30', '1280x720x30']
},
'D415':{
'depth_test_profiles':['640x480x6','640x480x15', '640x480x30', '640x480x90'],
'color_test_profiles':['640x480x6','640x480x15', '640x480x30', '1280x720x30']
}
}
'''
The test was implemented to check the fps of Depth and Color frames. The RosTopicHz had to be
modified to make it work, see py_rs_utils for more details.
To check the fps, a value 'expected_fps_in_hz' has to be added to the corresponding theme
'''
@pytest.mark.skipif (os.getenv('RS_ROS_REGRESSION', "not found") == "not found",reason="Regression is not enabled, define RS_ROS_REGRESSION")
@pytest.mark.parametrize("launch_descr_with_parameters", [
pytest.param(test_params_test_fps_d455, marks=pytest.mark.d455),
pytest.param(test_params_test_fps_d415, marks=pytest.mark.d415),
#pytest.param(test_params_test_fps_d435, marks=pytest.mark.d435),
],indirect=True)
@pytest.mark.launch(fixture=launch_descr_with_parameters)
class TestCamera_TestFPS(pytest_rs_utils.RsTestBaseClass):
def test_camera_test_fps(self,launch_descr_with_parameters):
params = launch_descr_with_parameters[1]
if pytest_live_camera_utils.check_if_camera_connected(params['device_type']) == False:
print("Device not found? : " + params['device_type'])
assert False
return
try:
'''
initialize, run and check the data
'''
print("Starting camera test...")
self.init_test("RsTest"+params['camera_name'])
self.wait_for_node(params['camera_name'])
self.create_service_client_ifs(get_node_heirarchy(params))
themes = [
{'topic':get_node_heirarchy(params)+'/depth/image_rect_raw',
'msg_type':msg_Image,
'expected_data_chunks':100,
}
]
profiles = test_profiles[params['camera_name']]['depth_test_profiles']
self.spin_for_time(wait_time=0.5)
assert self.set_bool_param('enable_color', False)
self.spin_for_time(wait_time=0.5)
for profile in profiles:
print("Testing profile: ", profile)
themes[0]['expected_fps_in_hz'] = float(profile.split('x')[2])
assert self.set_string_param('depth_module.depth_profile', profile)
self.spin_for_time(wait_time=0.5)
assert self.set_bool_param('enable_depth', True)
self.spin_for_time(wait_time=0.5)
ret = self.run_test(themes, timeout=25.0)
assert ret[0], ret[1]
assert self.process_data(themes)
themes = [
{'topic':get_node_heirarchy(params)+'/color/image_raw',
'msg_type':msg_Image,
'expected_data_chunks':100,
}
]
assert self.set_bool_param('enable_depth', False)
self.spin_for_time(wait_time=0.5)
profiles = test_profiles[params['camera_name']]['color_test_profiles']
for profile in profiles:
print("Testing profile: ", profile)
themes[0]['expected_fps_in_hz'] = float(profile.split('x')[2])
assert self.set_string_param('rgb_camera.color_profile', profile)
self.spin_for_time(wait_time=0.5)
assert self.set_bool_param('enable_color', True)
self.spin_for_time(wait_time=0.5)
ret = self.run_test(themes, timeout=25.0)
assert ret[0], ret[1]
assert self.process_data(themes)
finally:
#this step is important because the test will fail next time
pytest_rs_utils.kill_realsense2_camera_node()
self.shutdown()

View File

@@ -0,0 +1,121 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import itertools
import pytest
import rclpy
from sensor_msgs.msg import Image as msg_Image
from sensor_msgs.msg import Imu as msg_Imu
from sensor_msgs.msg import PointCloud2 as msg_PointCloud2
import numpy as np
sys.path.append(os.path.abspath(os.path.dirname(__file__)+"/../utils"))
import pytest_rs_utils
from pytest_rs_utils import launch_descr_with_parameters
from pytest_rs_utils import delayed_launch_descr_with_parameters
from pytest_rs_utils import get_rosbag_file_path
from pytest_rs_utils import get_node_heirarchy
import pytest_live_camera_utils
from rclpy.parameter import Parameter
from rcl_interfaces.msg import ParameterValue
from rcl_interfaces.srv import SetParameters, GetParameters, ListParameters
from pytest_live_camera_utils import debug_print
test_params_all_profiles_d455 = {
'camera_name': 'D455',
'device_type': 'D455',
'enable_accel':"True",
'enable_gyro':"True",
'unite_imu_method':1,
}
@pytest.mark.parametrize("launch_descr_with_parameters", [
pytest.param(test_params_all_profiles_d455, marks=pytest.mark.d455)
],indirect=True)
@pytest.mark.launch(fixture=launch_descr_with_parameters)
class TestLiveCamera_TestMotionSensor(pytest_rs_utils.RsTestBaseClass):
def test_LiveCamera_check_motion_sensor(self,launch_descr_with_parameters):
params = launch_descr_with_parameters[1]
if pytest_live_camera_utils.check_if_camera_connected(params['device_type']) == False:
print("Device not found? : " + params['device_type'])
assert False
return
themes = [{'topic':get_node_heirarchy(params)+'/imu', 'msg_type':msg_Imu,'expected_data_chunks':1},
{'topic':get_node_heirarchy(params)+'/gyro/sample', 'msg_type':msg_Imu,'expected_data_chunks':1},
{'topic':get_node_heirarchy(params)+'/accel/sample', 'msg_type':msg_Imu,'expected_data_chunks':1}]
IMU_TOPIC = 0
GYRO_TOPIC = 1
ACCEL_TOPIC = 2
if params['unite_imu_method'] == '0':
themes[IMU_TOPIC]['expected_data_chunks'] = 0
try:
#initialize
self.init_test("RsTest"+params['camera_name'])
self.create_service_client_ifs(get_node_heirarchy(params))
#run with default params and check the data
msg = "Test with the default params "
print(msg)
ret = self.run_test(themes, timeout=50)
assert ret[0], msg + str(ret[1])
assert self.process_data(themes), msg + " failed"
msg = "Test with the accel false "
self.set_integer_param('unite_imu_method', 0)
self.set_bool_param('enable_accel', False)
self.set_bool_param('enable_gyro', True)
themes[ACCEL_TOPIC]['expected_data_chunks'] = 0
themes[IMU_TOPIC]['expected_data_chunks'] = 0
print(msg)
ret = self.run_test(themes)
assert ret[0], msg + str(ret[1])
assert self.process_data(themes), msg + " failed"
msg = "Test with the gyro false "
self.set_bool_param('enable_accel', True)
self.set_bool_param('enable_gyro', False)
themes[IMU_TOPIC]['expected_data_chunks'] = 0
themes[ACCEL_TOPIC]['expected_data_chunks'] = 1
themes[GYRO_TOPIC]['expected_data_chunks'] = 0
print(msg)
self.spin_for_time(wait_time=1.0)
ret = self.run_test(themes)
assert ret[0], msg + str(ret[1])
assert self.process_data(themes), msg + " failed"
msg = "Test with both gyro and accel false "
self.set_bool_param('enable_accel', False)
self.set_bool_param('enable_gyro', False)
self.set_integer_param('unite_imu_method', 1)
self.spin_for_time(wait_time=1.0)
themes[ACCEL_TOPIC]['expected_data_chunks'] = 0
themes[GYRO_TOPIC]['expected_data_chunks'] = 0
themes[IMU_TOPIC]['expected_data_chunks'] = 0
print(msg)
ret = self.run_test(themes, initial_wait_time=1.0)
assert ret[0], msg + " failed"
assert self.process_data(themes), msg +" failed"
finally:
#this step is important because the test will fail next time
pytest_rs_utils.kill_realsense2_camera_node()
self.shutdown()

View File

@@ -0,0 +1,115 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import itertools
import pytest
import rclpy
from rclpy.qos import DurabilityPolicy
from rclpy.qos import HistoryPolicy
from rclpy.qos import QoSProfile
import tf2_ros
from sensor_msgs.msg import Image as msg_Image
from sensor_msgs.msg import Imu as msg_Imu
from sensor_msgs.msg import PointCloud2 as msg_PointCloud2
from tf2_msgs.msg import TFMessage as msg_TFMessage
import numpy as np
sys.path.append(os.path.abspath(os.path.dirname(__file__)+"/../utils"))
import pytest_rs_utils
from pytest_rs_utils import launch_descr_with_parameters
import pytest_live_camera_utils
from pytest_rs_utils import get_rosbag_file_path
from pytest_rs_utils import get_node_heirarchy
from rclpy.parameter import Parameter
from rcl_interfaces.msg import ParameterValue
from rcl_interfaces.srv import SetParameters, GetParameters, ListParameters
'''
The test was implemented to check the tf_static before and after infra1 was enabled. There shouldn't be any
related data before enabling.
'''
test_params_points_cloud_d455 = {
'camera_name': 'D455',
'device_type': 'D455',
'pointcloud.enable': 'true'
}
test_params_points_cloud_d435i = {
'camera_name': 'D435I',
'device_type': 'D435I',
'pointcloud.enable': 'true'
}
test_params_points_cloud_d415 = {
'camera_name': 'D415',
'device_type': 'D415',
'enable_color':'true',
'enable_depth':'true',
'depth_module.profile':'848x480x30',
'pointcloud.enable': 'true'
}
'''
This test was ported from rs2_test.py
the command used to run is "python3 realsense2_camera/scripts/rs2_test.py points_cloud_1"
'''
@pytest.mark.parametrize("launch_descr_with_parameters", [
pytest.param(test_params_points_cloud_d455, marks=pytest.mark.d455),
pytest.param(test_params_points_cloud_d435i, marks=pytest.mark.d435i),
pytest.param(test_params_points_cloud_d415, marks=pytest.mark.d415),
],indirect=True)
@pytest.mark.launch(fixture=launch_descr_with_parameters)
class TestCamera_TestPointCloud(pytest_rs_utils.RsTestBaseClass):
def test_camera_test_point_cloud(self,launch_descr_with_parameters):
self.params = launch_descr_with_parameters[1]
if pytest_live_camera_utils.check_if_camera_connected(self.params['device_type']) == False:
print("Device not found? : " + self.params['device_type'])
assert False
return
themes = [
{
'topic':get_node_heirarchy(self.params)+'/depth/color/points',
'msg_type':msg_PointCloud2,
'expected_data_chunks':5,
},
]
try:
'''
initialize, run and check the data
'''
self.init_test("RsTest"+self.params['camera_name'])
self.wait_for_node(self.params['camera_name'])
self.create_service_client_ifs(get_node_heirarchy(self.params))
ret = self.run_test(themes, timeout=10)
assert ret[0], ret[1]
ret = self.process_data(themes, False)
assert ret[0], ret[1]
finally:
self.shutdown()
def process_data(self, themes, enable_infra1):
for count in range(self.node.get_num_chunks(get_node_heirarchy(self.params)+'/depth/color/points')):
data = self.node.pop_first_chunk(get_node_heirarchy(self.params)+'/depth/color/points')
print(data)#the frame counter starts with zero, jumps to 2 and continues. To be checked
return True,""

View File

@@ -0,0 +1,73 @@
# Copyright 2024 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
import pytest
sys.path.append(os.path.abspath(os.path.dirname(__file__)+"/../utils"))
import pytest_rs_utils
import pytest_live_camera_utils
from pytest_rs_utils import launch_descr_with_parameters
from pytest_rs_utils import get_node_heirarchy
test_params_test_srv_d455 = {
'camera_name': 'D455',
'device_type': 'D455',
}
test_params_test_srv_d435i = {
'camera_name': 'D435i',
'device_type': 'D435i',
}
test_params_test_srv_d415 = {
'camera_name': 'D415',
'device_type': 'D415',
}
'''
The test checks service call device_info with type DeviceInfo
device info includes - device name, FW version, serial number, etc
'''
@pytest.mark.parametrize("launch_descr_with_parameters", [
pytest.param(test_params_test_srv_d455, marks=pytest.mark.d455),
pytest.param(test_params_test_srv_d435i, marks=pytest.mark.d435i),
pytest.param(test_params_test_srv_d415, marks=pytest.mark.d415),
],indirect=True)
@pytest.mark.launch(fixture=launch_descr_with_parameters)
class TestCamera_ServiceCall_DeviceInfo(pytest_rs_utils.RsTestBaseClass):
def test_camera_service_call_device_info(self,launch_descr_with_parameters):
params = launch_descr_with_parameters[1]
if pytest_live_camera_utils.check_if_camera_connected(params['device_type']) == False:
print("Device not found? : " + params['device_type'])
assert False
try:
print("Starting camera test...")
self.init_test("RsTest"+params['camera_name'])
self.wait_for_node(params['camera_name'])
self.create_service_client_ifs(get_node_heirarchy(params))
#No need to call run_test() as no frame integritiy check required
response = self.get_deviceinfo()
if response is not None:
print(f"device_info service response:\n{response}\n")
assert params['device_type'].casefold() in response.device_name.casefold().split('_')
else:
assert False, "No device_info response received"
except Exception as e:
print(e)
finally:
#this step is important because the test will fail next time
pytest_rs_utils.kill_realsense2_camera_node()
self.shutdown()

View File

@@ -0,0 +1,223 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import itertools
import pytest
import rclpy
from rclpy.qos import DurabilityPolicy
from rclpy.qos import HistoryPolicy
from rclpy.qos import QoSProfile
import tf2_ros
from sensor_msgs.msg import Image as msg_Image
from sensor_msgs.msg import Imu as msg_Imu
from sensor_msgs.msg import PointCloud2 as msg_PointCloud2
from tf2_msgs.msg import TFMessage as msg_TFMessage
import numpy as np
sys.path.append(os.path.abspath(os.path.dirname(__file__)+"/../utils"))
import pytest_rs_utils
from pytest_rs_utils import launch_descr_with_parameters
import pytest_live_camera_utils
from pytest_rs_utils import get_rosbag_file_path
from pytest_rs_utils import get_node_heirarchy
from rclpy.parameter import Parameter
from rcl_interfaces.msg import ParameterValue
from rcl_interfaces.srv import SetParameters, GetParameters, ListParameters
'''
The test was implemented to check the tf_static before and after infra1 was enabled. There shouldn't be any
related data before enabling.
'''
test_params_tf_static_change_d455 = {
'camera_name': 'D455',
'device_type': 'D455',
'enable_infra1': 'false',
'enable_infra2': 'true',
'enable_accel': 'true',
'enable_gyro': 'true',
}
test_params_tf_static_change_d435i = {
'camera_name': 'D435I',
'device_type': 'D435I',
'enable_infra1': 'false',
'enable_infra2': 'true',
'enable_accel': 'true',
'enable_gyro': 'true',
}
test_params_tf_static_change_d415 = {
'camera_name': 'D415',
'device_type': 'D415',
'enable_infra1': 'false',
'enable_infra2': 'true',
'enable_accel': 'true',
'enable_gyro': 'true',
}
@pytest.mark.parametrize("launch_descr_with_parameters", [
#LRS-1181 [ROS2] To debug inconsistent TF (transform) test that fails on Jenkin 219 NUC on D455
#pytest.param(test_params_tf_static_change_d455, marks=pytest.mark.d455),
pytest.param(test_params_tf_static_change_d435i, marks=pytest.mark.d435i),
pytest.param(test_params_tf_static_change_d415, marks=pytest.mark.d415),
],indirect=True)
@pytest.mark.launch(fixture=launch_descr_with_parameters)
class TestCamera_TestTF_Static_change(pytest_rs_utils.RsTestBaseClass):
def test_camera_test_tf_static_change(self,launch_descr_with_parameters):
self.params = launch_descr_with_parameters[1]
if pytest_live_camera_utils.check_if_camera_connected(self.params['device_type']) == False:
print("Device not found? : " + self.params['device_type'])
assert False
return
themes = [
{'topic':'/tf_static',
'msg_type':msg_TFMessage,
'expected_data_chunks':1,
'qos' : QoSProfile(depth=100,durability=DurabilityPolicy.TRANSIENT_LOCAL,history=HistoryPolicy.KEEP_LAST,)#static
}
]
try:
'''
initialize, run and check the data
'''
self.init_test("RsTest"+self.params['camera_name'])
self.wait_for_node(self.params['camera_name'])
self.create_service_client_ifs(get_node_heirarchy(self.params))
ret = self.run_test(themes, timeout=10)
assert ret[0], ret[1]
ret = self.process_data(themes, False)
assert ret[0], ret[1]
assert self.set_bool_param('enable_infra1', True)
ret = self.run_test(themes, timeout=10)
assert ret[0], ret[1]
ret = self.process_data(themes, True)
assert ret[0], ret[1]
finally:
self.shutdown()
def process_data(self, themes, enable_infra1):
frame_ids = [self.params['camera_name']+'_link',
self.params['camera_name']+'_depth_frame',
self.params['camera_name']+'_infra2_frame',
self.params['camera_name']+'_color_frame']
if self.params['device_type'] == 'D455':
frame_ids.append(self.params['camera_name']+'_gyro_frame')
frame_ids.append(self.params['camera_name']+'_accel_frame')
frame_ids.append(self.params['camera_name']+'_infra1_frame')
data = self.node.pop_first_chunk('/tf_static')
coupled_frame_ids = [xx for xx in itertools.combinations(frame_ids, 2)]
res = self.get_transform_data(data, coupled_frame_ids, is_static=True)
for couple in coupled_frame_ids:
if self.params['camera_name']+'_infra1_frame' in couple:
if enable_infra1 == True and res[couple] != None:
continue
if enable_infra1 == False and res[couple] == None:
continue
return False, str(couple) + ": tf_data not as expected"
if res[couple] == None:
return False, str(couple) + ": didn't get any tf data"
return True,""
test_params_tf_d455 = {
'camera_name': 'D455',
'device_type': 'D455',
'publish_tf': 'true',
'tf_publish_rate': '1.1',
}
test_params_tf_d435i = {
'camera_name': 'D435I',
'device_type': 'D435I',
'publish_tf': 'true',
'tf_publish_rate': '1.1',
}
test_params_tf_d415 = {
'camera_name': 'D415',
'device_type': 'D415',
'publish_tf': 'true',
'tf_publish_rate': '1.1',
}
@pytest.mark.parametrize("launch_descr_with_parameters", [
#LRS-1181 [ROS2] To debug inconsistent TF (transform) test that fails on Jenkin 219 NUC on D455
#pytest.param(test_params_tf_d455, marks=pytest.mark.d455),
pytest.param(test_params_tf_d435i, marks=pytest.mark.d435i),
pytest.param(test_params_tf_d415, marks=pytest.mark.d415),
],indirect=True)
@pytest.mark.launch(fixture=launch_descr_with_parameters)
class TestCamera_TestTF_DYN(pytest_rs_utils.RsTestBaseClass):
def test_camera_test_tf_dyn(self,launch_descr_with_parameters):
self.params = launch_descr_with_parameters[1]
if pytest_live_camera_utils.check_if_camera_connected(self.params['device_type']) == False:
print("Device not found? : " + self.params['device_type'])
assert False
return
themes = [
{'topic':'/tf',
'msg_type':msg_TFMessage,
'expected_data_chunks':3,
'qos' : QoSProfile(depth=100,durability=DurabilityPolicy.VOLATILE,history=HistoryPolicy.KEEP_LAST,)
#'qos' : QoSProfile(depth=100,durability=DurabilityPolicy.TRANSIENT_LOCAL,history=HistoryPolicy.KEEP_LAST,)#static
},
{'topic':'/tf_static',
'msg_type':msg_TFMessage,
'expected_data_chunks':1,
#'qos' : QoSProfile(depth=100,durability=DurabilityPolicy.VOLATILE,history=HistoryPolicy.KEEP_LAST,)
'qos' : QoSProfile(depth=100,durability=DurabilityPolicy.TRANSIENT_LOCAL,history=HistoryPolicy.KEEP_LAST,)#static
}
]
try:
'''
initialize, run and check the data
'''
self.init_test("RsTest"+self.params['camera_name'])
self.wait_for_node(self.params['camera_name'])
self.create_service_client_ifs(get_node_heirarchy(self.params))
ret = self.run_test(themes, timeout=10)
assert ret[0], ret[1]
ret = self.process_data(themes, False)
assert ret[0], ret[1]
finally:
self.shutdown()
def process_data(self, themes, enable_infra1):
frame_ids = [self.params['camera_name']+'_link',
self.params['camera_name']+'_depth_frame',
self.params['camera_name']+'_color_frame']
data = self.node.pop_first_chunk('/tf_static')
ret = self.check_transform_data(data, frame_ids, True)
assert ret[0], ret[1]
data = self.node.pop_first_chunk('/tf')
ret = self.check_transform_data(data, frame_ids)
assert ret[0], ret[1]
data = self.node.pop_first_chunk('/tf')
ret = self.check_transform_data(data, frame_ids)
assert ret[0], ret[1]
data = self.node.pop_first_chunk('/tf')
ret = self.check_transform_data(data, frame_ids)
assert ret[0], ret[1]
#return True, ""
return True, ""

View File

@@ -0,0 +1,167 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import itertools
import pytest
import rclpy
from sensor_msgs.msg import Image as msg_Image
from sensor_msgs.msg import Imu as msg_Imu
from sensor_msgs.msg import PointCloud2 as msg_PointCloud2
import numpy as np
sys.path.append(os.path.abspath(os.path.dirname(__file__)+"/../utils"))
import pytest_rs_utils
from pytest_rs_utils import launch_descr_with_parameters
from pytest_rs_utils import get_rosbag_file_path
from pytest_rs_utils import get_node_heirarchy
import pytest_live_camera_utils
from rclpy.parameter import Parameter
from rcl_interfaces.msg import ParameterValue
from rcl_interfaces.srv import SetParameters, GetParameters, ListParameters
test_params_depth_avg_1 = {
'camera_name': 'D415',
'device_type': 'D415',
}
'''
This test was implemented as a template to set the parameters and run the test.
This directory is not added to the CMakeLists.txt so as to avoid the colcon failure in the
machines that don't have the D415 connected.
1. Only a subset of parameter types are implemented in py_rs_utils, it has to be extended for others
2. After setting the param, rclpy.spin_once may be needed.Test passes even without this though.
'''
@pytest.mark.d415
@pytest.mark.parametrize("launch_descr_with_parameters", [test_params_depth_avg_1],indirect=True)
@pytest.mark.launch(fixture=launch_descr_with_parameters)
class TestD415_Change_Resolution(pytest_rs_utils.RsTestBaseClass):
def test_D415_Change_Resolution(self,launch_descr_with_parameters):
params = launch_descr_with_parameters[1]
if pytest_live_camera_utils.check_if_camera_connected(params['device_type']) == False:
print("Device not found? : " + params['device_type'])
assert False
return
failed_tests = []
num_passed = 0
num_failed = 0
themes = [{'topic':get_node_heirarchy(params)+'/color/image_raw', 'msg_type':msg_Image,'expected_data_chunks':1}]
config = pytest_live_camera_utils.get_profile_config(get_node_heirarchy(params))
config["Color"]["default_profile1"] = "640x480x6"
config["Color"]["default_profile2"] = "1280x720x6"
config["Depth"]["default_profile1"] = "640x480x6"
config["Depth"]["default_profile2"] = "1280x720x6"
config["Infrared"]["default_profile1"] = "640x480x6"
config["Infrared"]["default_profile2"] = "1280x720x6"
config["Infrared1"]["default_profile1"] = "640x480x6"
config["Infrared1"]["default_profile2"] = "1280x720x6"
config["Infrared2"]["default_profile1"] = "640x480x6"
config["Infrared2"]["default_profile2"] = "1280x720x6"
cap = [
['Infrared', '848x100x100', 'BGRA8'],
['Infrared', '848x480x60', 'RGBA8'],
['Infrared', '640x480x60', 'RGBA8'],
['Infrared', '640x480x60', 'BGR8'],
['Infrared', '640x360x60', 'BGRA8'],
['Infrared', '640x360x60', 'BGR8'],
['Infrared', '640x360x30', 'UYVY'],
['Infrared', '480x270x60', 'BGRA8'],
['Infrared', '480x270x60', 'RGB8'],
['Infrared', '424x240x60', 'BGRA8'],
['Infrared1', '848x100x100', 'Y8'],
['Infrared1', '848x480x6', 'Y8'],
['Infrared1', '1920x1080x25', 'Y16'],
['Infrared2', '848x100x100', 'Y8'],
['Infrared2', '848x480x6', 'Y8'],
['Infrared2', '1920x1080x25', 'Y16'],
]
try:
'''
initialize, run and check the data
'''
self.init_test("RsTest"+params['camera_name'])
self.spin_for_time(wait_time=1.0)
self.create_service_client_ifs(get_node_heirarchy(params))
self.spin_for_time(wait_time=1.0)
for key in cap:
profile_type = key[0]
profile = key[1]
format = key[2]
print("Testing " + " ".join(key))
themes[0]['topic'] = config[profile_type]['topic']
themes[0]['width'] = int(profile.split('x')[0])
themes[0]['height'] = int(profile.split('x')[1])
#'''
self.disable_all_streams()
if themes[0]['width'] == int(config[profile_type]["default_profile2"].split('x')[0]):
self.set_string_param(config[profile_type]["profile"], config[profile_type]["default_profile1"])
else:
self.set_string_param(config[profile_type]["profile"], config[profile_type]["default_profile2"])
self.set_bool_param(config[profile_type]["param"], True)
self.spin_for_time(wait_time=1.0)
self.set_string_param(config[profile_type]["profile"], profile)
self.spin_for_time(wait_time=1.0)
self.set_string_param(config[profile_type]["format"], format)
self.spin_for_time(wait_time=1.0)
self.set_bool_param(config[profile_type]["param"], True)
self.spin_for_time(wait_time=1.0)
try:
ret = self.run_test(themes, timeout=10.0)
assert ret[0], ret[1]
assert self.process_data(themes), " ".join(key) + " failed"
num_passed += 1
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("Test failed")
print(e)
print(exc_type, fname, exc_tb.tb_lineno)
num_failed += 1
failed_tests.append(" ".join(key))
finally:
#this step is important because the test will fail next time
pytest_rs_utils.kill_realsense2_camera_node()
self.shutdown()
if num_failed != 0:
print("Failed tests:")
print("\n".join(failed_tests))
assert False, " Tests failed"
def disable_all_streams(self):
self.set_bool_param('enable_color', False)
self.spin_for_time(wait_time=1.0)
self.set_bool_param('enable_depth', False)
self.spin_for_time(wait_time=1.0)
self.set_bool_param('enable_infra', False)
self.spin_for_time(wait_time=1.0)
self.set_bool_param('enable_infra1', False)
self.spin_for_time(wait_time=1.0)
self.set_bool_param('enable_infra2', False)
self.spin_for_time(wait_time=1.0)
pass

View File

@@ -0,0 +1,231 @@
# Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import itertools
import pytest
import rclpy
from sensor_msgs.msg import Image as msg_Image
from sensor_msgs.msg import Imu as msg_Imu
from sensor_msgs.msg import PointCloud2 as msg_PointCloud2
import numpy as np
sys.path.append(os.path.abspath(os.path.dirname(__file__)+"/../utils"))
import pytest_rs_utils
import pytest_live_camera_utils
from pytest_rs_utils import launch_descr_with_parameters
from pytest_rs_utils import get_rosbag_file_path
from pytest_rs_utils import get_node_heirarchy
from rclpy.parameter import Parameter
from rcl_interfaces.msg import ParameterValue
from rcl_interfaces.srv import SetParameters, GetParameters, ListParameters
test_params_depth_avg_1 = {
'camera_name': 'D455',
'device_type': 'D455',
}
'''
This test was implemented as a template to set the parameters and run the test.
This directory is not added to the CMakeLists.txt so as to avoid the colcon failure in the
machines that don't have the D455 connected.
1. Only a subset of parameter types are implemented in py_rs_utils, it has to be extended for others
2. After setting the param, rclpy.spin_once may be needed.Test passes even without this though.
'''
@pytest.mark.d455
@pytest.mark.parametrize("launch_descr_with_parameters", [test_params_depth_avg_1],indirect=True)
@pytest.mark.launch(fixture=launch_descr_with_parameters)
class TestD455_Change_Resolution(pytest_rs_utils.RsTestBaseClass):
def test_D455_Change_Resolution(self,launch_descr_with_parameters):
params = launch_descr_with_parameters[1]
if pytest_live_camera_utils.check_if_camera_connected(params['device_type']) == False:
print("Device not found? : " + params['device_type'])
assert False
return
themes = [
{'topic':get_node_heirarchy(params)+'/color/image_raw',
'msg_type':msg_Image,
'expected_data_chunks':1,
'width':640,
'height':480,
#'data':data
}
]
try:
'''
initialize, run and check the data
'''
print("Starting camera test...")
self.init_test("RsTest"+params['camera_name'])
self.wait_for_node(params['camera_name'])
self.create_service_client_ifs(get_node_heirarchy(params))
self.spin_for_time(0.5)
assert self.set_bool_param('enable_color', False)
self.spin_for_time(0.5)
assert self.set_string_param('rgb_camera.color_profile', '640x480x30')
self.spin_for_time(0.5)
assert self.set_bool_param('enable_color', True)
self.spin_for_time(0.5)
ret = self.run_test(themes)
assert ret[0], ret[1]
assert self.process_data(themes)
self.set_string_param('rgb_camera.color_profile', '1280x800x5')
self.set_bool_param('enable_color', True)
themes[0]['width'] = 1280
themes[0]['height'] = 800
ret = self.run_test(themes)
assert ret[0], ret[1]
assert self.process_data(themes)
finally:
#this step is important because the test will fail next time
pytest_rs_utils.kill_realsense2_camera_node()
self.shutdown()
test_params_seq_id_update = {
'camera_name': 'D455',
'device_type': 'D455',
'exposure_1' : 2000,
'gain_1' : 20,
'exposure_2' : 3000,
'gain_2' : 30,
}
'''
This test sets the sequence ID param and validates the corresponding Exposure & Gain values.
'''
@pytest.mark.d455
@pytest.mark.parametrize("launch_descr_with_parameters", [test_params_seq_id_update],indirect=True)
@pytest.mark.launch(fixture=launch_descr_with_parameters)
class Test_D455_Seq_ID_Update(pytest_rs_utils.RsTestBaseClass):
def test_D455_Seq_ID_update(self,launch_descr_with_parameters):
params = launch_descr_with_parameters[1]
if pytest_live_camera_utils.check_if_camera_connected(params['device_type']) == False:
print("Device not found? : " + params['device_type'])
assert False
return
try:
'''
initialize, run and check the data
'''
print("Starting camera test...")
self.init_test("RsTest"+params['camera_name'])
self.wait_for_node(params['camera_name'])
self.create_service_client_ifs(get_node_heirarchy(params))
assert self.set_bool_param('depth_module.hdr_enabled', False)
assert self.set_integer_param('depth_module.sequence_id', 1)
assert self.set_integer_param('depth_module.exposure', params['exposure_1'])
assert self.set_integer_param('depth_module.gain', params['gain_1'])
assert self.set_integer_param('depth_module.sequence_id', 2)
assert self.set_integer_param('depth_module.exposure', params['exposure_2'])
assert self.set_integer_param('depth_module.gain', params['gain_2'])
assert self.set_integer_param('depth_module.sequence_id', 1)
assert self.get_integer_param('depth_module.exposure') == params['exposure_1']
assert self.get_integer_param('depth_module.gain') == params['gain_1']
assert self.set_integer_param('depth_module.sequence_id', 2)
assert self.get_integer_param('depth_module.exposure') == params['exposure_2']
assert self.get_integer_param('depth_module.gain') == params['gain_2']
finally:
#this step is important because the test will fail next time
pytest_rs_utils.kill_realsense2_camera_node()
self.shutdown()
test_params_reset_device = {
'camera_name': 'D455',
'device_type': 'D455',
'rgb_camera.color_profile': '640x480x30',
}
'''
This test was implemented as a template to set the parameters and run the test.
This directory is not added to the CMakeLists.txt so as to avoid the colcon failure in the
machines that don't have the D455 connected.
1. Only a subset of parameter types are implemented in py_rs_utils, it has to be extended for others
2. After setting the param, rclpy.spin_once may be needed.Test passes even without this though.
'''
@pytest.mark.d455
@pytest.mark.parametrize("launch_descr_with_parameters", [test_params_reset_device],indirect=True)
@pytest.mark.launch(fixture=launch_descr_with_parameters)
class TestD455_reset_device(pytest_rs_utils.RsTestBaseClass):
def test_D455_Reset_Device(self,launch_descr_with_parameters):
params = launch_descr_with_parameters[1]
if pytest_live_camera_utils.check_if_camera_connected(params['device_type']) == False:
print("Device not found? : " + params['device_type'])
assert False
return
themes = [
{'topic':get_node_heirarchy(params)+'/color/image_raw',
'msg_type':msg_Image,
'expected_data_chunks':1,
'width':640,
'height':480,
#'data':data
}
]
try:
'''
initialize, run and check the data
'''
print("Starting camera test...")
self.init_test("RsTest"+params['camera_name'])
self.wait_for_node(params['camera_name'])
self.create_service_client_ifs(get_node_heirarchy(params))
self.spin_for_time(0.5)
assert self.set_bool_param('enable_color', False)
self.spin_for_time(0.5)
assert self.set_string_param('rgb_camera.color_profile', '640x480x30')
self.spin_for_time(0.5)
assert self.set_bool_param('enable_color', True)
self.spin_for_time(0.5)
ret = self.run_test(themes)
assert ret[0], ret[1]
assert self.process_data(themes)
self.set_string_param('rgb_camera.color_profile', '1280x800x5')
self.set_bool_param('enable_color', True)
themes[0]['width'] = 1280
themes[0]['height'] = 800
ret = self.run_test(themes)
assert ret[0], ret[1]
assert self.process_data(themes)
self.reset_device()
themes[0]['width'] = int(params['rgb_camera.color_profile'].split('x')[0])
themes[0]['height'] = int(params['rgb_camera.color_profile'].split('x')[1])
ret = self.run_test(themes)
assert ret[0], ret[1]
assert self.process_data(themes)
finally:
#this step is important because the test will fail next time
pytest_rs_utils.kill_realsense2_camera_node()
self.shutdown()

Some files were not shown because too many files have changed in this diff Show More