Update BT configs and remove unused scripts

This commit is contained in:
NuoDaJia02
2026-02-04 16:17:41 +08:00
parent 7d7f602a16
commit 742e5ec67e
20 changed files with 420 additions and 2644 deletions

View File

@@ -15,89 +15,35 @@
- name: s1_left_hand_gripper_open
params: *gripper_open
- name: s1_right_arm_initial
params: &arm_inittial_right 'body_id: 1
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [0, 0, 0, 0, 0, 0, 80, -5, -112, -12, -62, 175]
'
params: &arm_inittial_right "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [80, -5, -112, -12, -62, 175]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s1_left_arm_initial
params: &arm_inittial_left 'body_id: 0
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [100, 5, 112, 12, 62, 5, 0, 0, 0, 0, 0, 0]
'
params: &arm_inittial_left "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [100, 5, 112, 12, 62, 5]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s1_right_arm_pre_cam_take_photo
params: 'body_id: 1
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [0, 0, 0, 0, 0, 0, 14.29, -34.07, -122.53, -76.89, 84.40, 112.89]
'
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [14.29, -34.07, -122.53, -76.89, 84.40, 112.89]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s1_left_arm_pre_cam_take_photo
params: 'body_id: 0
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [165.80, 34.07, 122.53, 76.89, -84.40, 67.20, 0, 0, 0, 0, 0, 0]
'
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [165.80, 34.07, 122.53, 76.89, -84.40, 67.20]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s2_right_arm_cam_take_photo
params: &right_arm_cam_take_photo 'body_id: 1
data_type: 100
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: &right_arm_cam_take_photo "arm_motion_params:\n- arm_id: 1\n motion_type: 100\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s2_left_arm_cam_take_photo
params: &left_arm_cam_take_photo 'body_id: 0
data_type: 100
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: &left_arm_cam_take_photo "arm_motion_params:\n- arm_id: 0\n motion_type: 100\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s2_top_camera_vision_recg
params: 'camera_position: top
@@ -111,61 +57,25 @@
'
- name: s3_right_arm_vision_pre_grasp
params: &pre_grasp_right 'body_id: 1
data_type: 101
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: &pre_grasp_right "arm_motion_params:\n- arm_id: 1\n motion_type: 101\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s3_left_arm_vision_pre_grasp
params: &pre_grasp_left 'body_id: 0
data_type: 101
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: &pre_grasp_left "arm_motion_params:\n- arm_id: 0\n motion_type: 101\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s4_right_arm_vision_grasp
params: 'body_id: 1
data_type: 102
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 102\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s4_left_arm_vision_grasp
params: 'body_id: 0
data_type: 102
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 102\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s4_right_hand_gripper_close
params: &gripper_close 'loc: 200
@@ -179,117 +89,45 @@
- name: s4_left_hand_gripper_close
params: *gripper_close
- name: s4_right_arm_take_box
params: 'body_id: 1
data_type: 110
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 110\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s4_left_arm_take_box
params: 'body_id: 0
data_type: 110
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 110\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s5_right_arm_take_box_off
params: 'body_id: 1
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [0, 0, 0, 0, 0, 0, 96.31, -103.06, 17.69, -53.40, -7.81, 143.19]
'
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [96.31, -103.06, 17.69, -53.40, -7.81, 143.19]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s5_left_arm_take_box_off
params: 'body_id: 0
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [83.78, 103.06, -17.69, 53.40, 7.81, 36.90, 0, 0, 0, 0, 0, 0]
'
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [83.78, 103.06, -17.69, 53.40, 7.81, 36.90]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s7_right_arm_grasp_box
params: &grasp_box_s7_s9_right 'body_id: 1
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [0, 0, 0, 0, 0, 0, 96.31, -103.06, 17.69, -53.40, -7.81, 143.19]
'
params: &grasp_box_s7_s9_right "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [96.31, -103.06, 17.69, -53.40, -7.81, 143.19]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s7_left_arm_grasp_box
params: &grasp_box_s7_s9_left 'body_id: 0
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [83.78, 103.06, -17.69, 53.40, 7.81, 36.90, 0, 0, 0, 0, 0, 0]
'
params: &grasp_box_s7_s9_left "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [83.78, 103.06, -17.69, 53.40, 7.81, 36.90]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s7_right_arm_put_down_box
params: 'body_id: 1
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [0, 0, 0, 0, 0, 0, 85.28, -103.86, 19.46, 40.34, -7.37, 49.94]
'
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [85.28, -103.86, 19.46, 40.34, -7.37, 49.94]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s7_left_arm_put_down_box
params: 'body_id: 0
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [94.81, 103.86, -19.46, -40.33, 7.37, 130.15, 0, 0, 0, 0, 0, 0]
'
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [94.81, 103.86, -19.46, -40.33, 7.37, 130.15]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s8_right_hand_gripper_open
params: *gripper_open
- name: s8_left_hand_gripper_open

View File

@@ -33,8 +33,8 @@
<!-- Phase 5: Dual Arm Hand Vision -->
<Parallel name="dual_arm_init_action">
<Arm_H name="s11_right_arm_initial" />
<Arm_H name="s11_left_arm_initial" />
<DualArm_H name="s11_right_arm_initial" />
<DualArm_H name="s11_left_arm_initial" />
</Parallel>
</Sequence>
@@ -55,14 +55,14 @@
<Parallel name="parallel_arm_init_action">
<GripperCmd1_H name="s0_right_hand_gripper_open" />
<GripperCmd0_H name="s0_left_hand_gripper_open" />
<Arm_H name="s1_right_arm_initial" />
<Arm_H name="s1_left_arm_initial" />
<DualArm_H name="s1_right_arm_initial" />
<DualArm_H name="s1_left_arm_initial" />
</Parallel>
<Parallel name="parallel_init_action">
<GripperCmd1_H name="s1_right_hand_gripper_open" />
<GripperCmd0_H name="s1_left_hand_gripper_open" />
<!-- <Arm_H name="s1_right_arm_pre_cam_take_photo" />
<Arm_H name="s1_left_arm_pre_cam_take_photo" /> -->
<!-- <DualArm_H name="s1_right_arm_pre_cam_take_photo" />
<DualArm_H name="s1_left_arm_pre_cam_take_photo" /> -->
</Parallel>
<RetryUntilSuccessful name="retry_top_cam_vision" num_attempts="5">
<VisionObjectRecognition_H name="s2_top_camera_vision_recg" />
@@ -71,13 +71,13 @@
</BehaviorTree>
<!-- Right Arm Specific Subtrees -->
<BehaviorTree ID="right_arm_hand_vision_subtree">
<BehaviorTree ID="right_DualArm_Hand_vision_subtree">
<Fallback name="right_hand_vision_fallback">
<RetryUntilSuccessful name="retry_right_hand_cam_vision" num_attempts="5">
<VisionObjectRecognition_H name="s3_right_camera_vision_recg" />
</RetryUntilSuccessful>
<Sequence>
<Arm_H name="s15_right_arm_initial_vision" />
<DualArm_H name="s15_right_arm_initial_vision" />
<AlwaysFailure />
</Sequence>
</Fallback>
@@ -87,11 +87,11 @@
<Sequence name="right_arm_grasp_root">
<RetryUntilSuccessful name="retry_right_arm_grasp_action" num_attempts="1">
<Sequence>
<Arm_H name="s3_right_arm_vision_pre_grasp" />
<Arm_H name="s4_right_arm_vision_grasp" />
<DualArm_H name="s3_right_arm_vision_pre_grasp" />
<DualArm_H name="s4_right_arm_vision_grasp" />
<GripperCmd1_H name="s4_right_hand_gripper_close" />
<Arm_H name="s4_right_arm_take_box" />
<Arm_H name="s5_right_arm_take_box_off" />
<DualArm_H name="s4_right_arm_take_box" />
<DualArm_H name="s5_right_arm_take_box_off" />
</Sequence>
</RetryUntilSuccessful>
</Sequence>
@@ -101,24 +101,24 @@
<Sequence name="right_arm_putdown_root">
<RetryUntilSuccessful name="retry_right_arm_putdown" num_attempts="1">
<Sequence>
<!-- <Arm_H name="s7_right_arm_grasp_box" /> -->
<Arm_H name="s7_right_arm_put_down_box" />
<!-- <DualArm_H name="s7_right_arm_grasp_box" /> -->
<DualArm_H name="s7_right_arm_put_down_box" />
<GripperCmd1_H name="s8_right_hand_gripper_open" />
<Arm_H name="s9_right_arm_grasp_box" />
<!-- <Arm_H name="s11_right_arm_initial" /> -->
<DualArm_H name="s9_right_arm_grasp_box" />
<!-- <DualArm_H name="s11_right_arm_initial" /> -->
</Sequence>
</RetryUntilSuccessful>
</Sequence>
</BehaviorTree>
<!-- Left Arm Specific Subtrees -->
<BehaviorTree ID="left_arm_hand_vision_subtree">
<BehaviorTree ID="left_DualArm_Hand_vision_subtree">
<Fallback name="left_hand_vision_fallback">
<RetryUntilSuccessful name="retry_left_hand_cam_vision" num_attempts="5">
<VisionObjectRecognition_H name="s3_left_camera_vision_recg" />
</RetryUntilSuccessful>
<Sequence>
<Arm_H name="s15_left_arm_initial_vision" />
<DualArm_H name="s15_left_arm_initial_vision" />
<AlwaysFailure />
</Sequence>
</Fallback>
@@ -128,11 +128,11 @@
<Sequence name="left_arm_grasp_root">
<RetryUntilSuccessful name="retry_left_arm_grasp_action" num_attempts="1">
<Sequence>
<Arm_H name="s3_left_arm_vision_pre_grasp" />
<Arm_H name="s4_left_arm_vision_grasp" />
<DualArm_H name="s3_left_arm_vision_pre_grasp" />
<DualArm_H name="s4_left_arm_vision_grasp" />
<GripperCmd0_H name="s4_left_hand_gripper_close" />
<Arm_H name="s4_left_arm_take_box" />
<Arm_H name="s5_left_arm_take_box_off" />
<DualArm_H name="s4_left_arm_take_box" />
<DualArm_H name="s5_left_arm_take_box_off" />
</Sequence>
</RetryUntilSuccessful>
</Sequence>
@@ -142,11 +142,11 @@
<Sequence name="left_arm_putdown_root">
<RetryUntilSuccessful name="retry_left_arm_putdown" num_attempts="1">
<Sequence>
<!-- <Arm_H name="s7_left_arm_grasp_box" /> -->
<Arm_H name="s7_left_arm_put_down_box" />
<!-- <DualArm_H name="s7_left_arm_grasp_box" /> -->
<DualArm_H name="s7_left_arm_put_down_box" />
<GripperCmd0_H name="s8_left_hand_gripper_open" />
<Arm_H name="s9_left_arm_grasp_box" />
<!-- <Arm_H name="s11_left_arm_initial" /> -->
<DualArm_H name="s9_left_arm_grasp_box" />
<!-- <DualArm_H name="s11_left_arm_initial" /> -->
</Sequence>
</RetryUntilSuccessful>
</Sequence>
@@ -158,8 +158,8 @@
<Parallel name="overall_recovery_parallel">
<GripperCmd1_H name="s12_right_hand_gripper_open" />
<GripperCmd0_H name="s12_left_hand_gripper_open" />
<Arm_H name="s13_right_arm_initial" />
<Arm_H name="s13_left_arm_initial" />
<DualArm_H name="s13_right_arm_initial" />
<DualArm_H name="s13_left_arm_initial" />
</Parallel>
<ClearDoneInstances_H name="clear_done_instances_overall" />
</Sequence>
@@ -168,8 +168,8 @@
<BehaviorTree ID="vision_recovery_subtree">
<Sequence name="vision_recovery_root">
<Parallel name="arm_initial2">
<Arm_H name="s14_right_arm_initial" />
<Arm_H name="s14_left_arm_initial" />
<DualArm_H name="s14_right_arm_initial" />
<DualArm_H name="s14_left_arm_initial" />
</Parallel>
<ClearDoneInstances_H name="clear_done_instances_vision" />
</Sequence>
@@ -178,9 +178,9 @@
<BehaviorTree ID="right_arm_grasp_recovery_subtree">
<Sequence name="right_grasp_recovery_root">
<GripperCmd1_H name="s15_right_hand_gripper_open" />
<Arm_H name="s15_right_arm_pre_grasp" />
<Arm_H name="s15_right_arm_cam_take_photo" />
<Arm_H name="s15_right_arm_initial_grasp" />
<DualArm_H name="s15_right_arm_pre_grasp" />
<DualArm_H name="s15_right_arm_cam_take_photo" />
<DualArm_H name="s15_right_arm_initial_grasp" />
<ClearDoneInstances_H name="clear_done_instances_right_grasp" match="right" />
</Sequence>
</BehaviorTree>
@@ -188,9 +188,9 @@
<BehaviorTree ID="left_arm_grasp_recovery_subtree">
<Sequence name="left_grasp_recovery_root">
<GripperCmd0_H name="s15_left_hand_gripper_open" />
<Arm_H name="s15_left_arm_pre_grasp" />
<Arm_H name="s15_left_arm_cam_take_photo" />
<Arm_H name="s15_left_arm_initial_grasp" />
<DualArm_H name="s15_left_arm_pre_grasp" />
<DualArm_H name="s15_left_arm_cam_take_photo" />
<DualArm_H name="s15_left_arm_initial_grasp" />
<ClearDoneInstances_H name="clear_done_instances_left_grasp" match="left" />
</Sequence>
</BehaviorTree>
@@ -198,8 +198,8 @@
<BehaviorTree ID="arm_vision_recovery_subtree">
<Sequence name="arm_vision_recovery_root">
<Parallel name="arm_initial3">
<Arm_H name="s16_right_arm_initial" />
<Arm_H name="s16_left_arm_initial" />
<DualArm_H name="s16_right_arm_initial" />
<DualArm_H name="s16_left_arm_initial" />
</Parallel>
<ClearDoneInstances_H name="clear_done_instances_arm_vision" />
</Sequence>
@@ -208,7 +208,7 @@
<BehaviorTree ID="left_arm_vision_recovery_subtree">
<Sequence name="left_arm_vision_recovery_root">
<Parallel name="left_arm_initial">
<Arm_H name="s17_left_arm_initial" />
<DualArm_H name="s17_left_arm_initial" />
</Parallel>
<ClearDoneInstances_H name="clear_done_instances_left_arm_vision" match="left" />
</Sequence>
@@ -217,7 +217,7 @@
<BehaviorTree ID="right_arm_vision_recovery_subtree">
<Sequence name="right_arm_vision_recovery_root">
<Parallel name="right_arm_initial">
<Arm_H name="s17_right_arm_initial" />
<DualArm_H name="s17_right_arm_initial" />
</Parallel>
<ClearDoneInstances_H name="clear_done_instances_right_arm_vision" match="right" />
</Sequence>
@@ -232,19 +232,19 @@
<!-- 右臂 -->
<Fallback>
<Sequence>
<Arm_H name="s2_right_arm_cam_take_photo" />
<DualArm_H name="s2_right_arm_cam_take_photo" />
<Script code="right_cam_status:=true" />
</Sequence>
<Arm_H name="s15_right_arm_initial_cam" />
<DualArm_H name="s15_right_arm_initial_cam" />
<AlwaysSuccess/>
</Fallback>
<!-- 左臂 -->
<Fallback>
<Sequence>
<Arm_H name="s2_left_arm_cam_take_photo" />
<DualArm_H name="s2_left_arm_cam_take_photo" />
<Script code="left_cam_status:=true" />
</Sequence>
<Arm_H name="s15_left_arm_initial_cam" />
<DualArm_H name="s15_left_arm_initial_cam" />
<AlwaysSuccess/>
</Fallback>
</Parallel>
@@ -262,7 +262,7 @@
<ScriptCondition code="right_done==false &amp;&amp; right_cam_ok==true" />
<Fallback>
<Sequence>
<SubTree ID="right_arm_hand_vision_subtree"/>
<SubTree ID="right_DualArm_Hand_vision_subtree"/>
<Script code="right_v_ok:=true" />
</Sequence>
<AlwaysFailure/>
@@ -276,7 +276,7 @@
<ScriptCondition code="left_done==false &amp;&amp; left_cam_ok==true" />
<Fallback>
<Sequence>
<SubTree ID="left_arm_hand_vision_subtree"/>
<SubTree ID="left_DualArm_Hand_vision_subtree"/>
<Script code="left_v_ok:=true" />
</Sequence>
<AlwaysFailure/>

View File

@@ -15,89 +15,35 @@
- name: s1_left_hand_gripper_open
params: *gripper_open
- name: s1_right_arm_initial
params: &arm_inittial_right 'body_id: 1
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [0, 0, 0, 0, 0, 0, 80, -5, -112, -12, -62, 175]
'
params: &arm_inittial_right "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [80, -5, -112, -12, -62, 175]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s1_left_arm_initial
params: &arm_inittial_left 'body_id: 0
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [100, 5, 112, 12, 62, 5, 0, 0, 0, 0, 0, 0]
'
params: &arm_inittial_left "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [100, 5, 112, 12, 62, 5]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s1_right_arm_pre_cam_take_photo
params: 'body_id: 1
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [0, 0, 0, 0, 0, 0, 14.29, -34.07, -122.53, -76.89, 84.40, 112.89]
'
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [14.29, -34.07, -122.53, -76.89, 84.40, 112.89]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s1_left_arm_pre_cam_take_photo
params: 'body_id: 0
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [165.80, 34.07, 122.53, 76.89, -84.40, 67.20, 0, 0, 0, 0, 0, 0]
'
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [165.80, 34.07, 122.53, 76.89, -84.40, 67.20]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s2_right_arm_cam_take_photo
params: &right_arm_cam_take_photo 'body_id: 1
data_type: 100
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: &right_arm_cam_take_photo "arm_motion_params:\n- arm_id: 1\n motion_type: 100\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s2_left_arm_cam_take_photo
params: &left_arm_cam_take_photo 'body_id: 0
data_type: 100
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: &left_arm_cam_take_photo "arm_motion_params:\n- arm_id: 0\n motion_type: 100\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s2_top_camera_vision_recg
params: 'camera_position: top
@@ -111,61 +57,25 @@
'
- name: s3_right_arm_vision_pre_grasp
params: &pre_grasp_right 'body_id: 1
data_type: 101
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: &pre_grasp_right "arm_motion_params:\n- arm_id: 1\n motion_type: 101\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s3_left_arm_vision_pre_grasp
params: &pre_grasp_left 'body_id: 0
data_type: 101
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: &pre_grasp_left "arm_motion_params:\n- arm_id: 0\n motion_type: 101\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s4_right_arm_vision_grasp
params: 'body_id: 1
data_type: 102
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 102\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s4_left_arm_vision_grasp
params: 'body_id: 0
data_type: 102
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 102\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s4_right_hand_gripper_close
params: &gripper_close 'loc: 200
@@ -179,117 +89,45 @@
- name: s4_left_hand_gripper_close
params: *gripper_close
- name: s4_right_arm_take_box
params: 'body_id: 1
data_type: 110
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 110\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s4_left_arm_take_box
params: 'body_id: 0
data_type: 110
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 110\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s5_right_arm_take_box_off
params: 'body_id: 1
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [0, 0, 0, 0, 0, 0, 96.31, -103.06, 17.69, -53.40, -7.81, 143.19]
'
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [96.31, -103.06, 17.69, -53.40, -7.81, 143.19\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s5_left_arm_take_box_off
params: 'body_id: 0
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [83.78, 103.06, -17.69, 53.40, 7.81, 36.90, 0, 0, 0, 0, 0, 0]
'
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [83.78, 103.06, -17.69, 53.40, 7.81, 36.90]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s7_right_arm_grasp_box
params: &grasp_box_s7_s9_right 'body_id: 1
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [0, 0, 0, 0, 0, 0, 96.31, -103.06, 17.69, -53.40, -7.81, 143.19]
'
params: &grasp_box_s7_s9_right "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [96.31, -103.06, 17.69, -53.40, -7.81, 143.19]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s7_left_arm_grasp_box
params: &grasp_box_s7_s9_left 'body_id: 0
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [83.78, 103.06, -17.69, 53.40, 7.81, 36.90, 0, 0, 0, 0, 0, 0]
'
params: &grasp_box_s7_s9_left "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [83.78, 103.06, -17.69, 53.40, 7.81, 36.90]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s7_right_arm_put_down_box
params: 'body_id: 1
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [0, 0, 0, 0, 0, 0, 85.28, -103.86, 19.46, 40.34, -7.37, 49.94]
'
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [85.28, -103.86, 19.46, 40.34, -7.37, 49.94]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s7_left_arm_put_down_box
params: 'body_id: 0
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [94.81, 103.86, -19.46, -40.33, 7.37, 130.15, 0, 0, 0, 0, 0, 0]
'
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [94.81, 103.86, -19.46, -40.33, 7.37, 130.15]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s8_right_hand_gripper_open
params: *gripper_open
- name: s8_left_hand_gripper_open

View File

@@ -44,8 +44,8 @@
<!-- Phase 5: Dual Arm Initialization -->
<Parallel name="dual_arm_init_action">
<MoveWheel_H name="s10_wheel_move_to_origin_pickup_position" />
<Arm_H name="s11_right_arm_initial" />
<Arm_H name="s11_left_arm_initial" />
<DualArm_H name="s11_right_arm_initial" />
<DualArm_H name="s11_left_arm_initial" />
</Parallel>
</Sequence>
@@ -66,14 +66,14 @@
<Parallel name="parallel_arm_init_action">
<GripperCmd1_H name="s0_right_hand_gripper_open" />
<GripperCmd0_H name="s0_left_hand_gripper_open" />
<Arm_H name="s1_right_arm_initial" />
<Arm_H name="s1_left_arm_initial" />
<DualArm_H name="s1_right_arm_initial" />
<DualArm_H name="s1_left_arm_initial" />
</Parallel>
<Parallel name="parallel_init_action">
<GripperCmd1_H name="s1_right_hand_gripper_open" />
<GripperCmd0_H name="s1_left_hand_gripper_open" />
<!-- <Arm_H name="s1_right_arm_pre_cam_take_photo" />
<Arm_H name="s1_left_arm_pre_cam_take_photo" /> -->
<!-- <DualArm_H name="s1_right_arm_pre_cam_take_photo" />
<DualArm_H name="s1_left_arm_pre_cam_take_photo" /> -->
</Parallel>
<RetryUntilSuccessful name="retry_top_cam_vision" num_attempts="5">
<VisionObjectRecognition_H name="s2_top_camera_vision_recg" />
@@ -82,7 +82,7 @@
</BehaviorTree>
<!-- Right Arm Specific Subtrees -->
<BehaviorTree ID="right_arm_hand_vision_subtree">
<BehaviorTree ID="right_DualArm_Hand_vision_subtree">
<Sequence name="right_hand_vision_root">
<RetryUntilSuccessful name="retry_right_hand_cam_vision" num_attempts="5">
<VisionObjectRecognition_H name="s3_right_camera_vision_recg" />
@@ -94,11 +94,11 @@
<Sequence name="right_arm_grasp_root">
<RetryUntilSuccessful name="retry_right_arm_grasp_action" num_attempts="1">
<Sequence>
<Arm_H name="s3_right_arm_vision_pre_grasp" />
<Arm_H name="s4_right_arm_vision_grasp" />
<DualArm_H name="s3_right_arm_vision_pre_grasp" />
<DualArm_H name="s4_right_arm_vision_grasp" />
<GripperCmd1_H name="s4_right_hand_gripper_close" />
<Arm_H name="s4_right_arm_take_box" />
<Arm_H name="s5_right_arm_take_box_off" />
<DualArm_H name="s4_right_arm_take_box" />
<DualArm_H name="s5_right_arm_take_box_off" />
</Sequence>
</RetryUntilSuccessful>
</Sequence>
@@ -108,18 +108,18 @@
<Sequence name="right_arm_putdown_root">
<RetryUntilSuccessful name="retry_right_arm_putdown" num_attempts="1">
<Sequence>
<!-- <Arm_H name="s7_right_arm_grasp_box" /> -->
<Arm_H name="s7_right_arm_put_down_box" />
<!-- <DualArm_H name="s7_right_arm_grasp_box" /> -->
<DualArm_H name="s7_right_arm_put_down_box" />
<GripperCmd1_H name="s8_right_hand_gripper_open" />
<Arm_H name="s9_right_arm_grasp_box" />
<!-- <Arm_H name="s11_right_arm_initial" /> -->
<DualArm_H name="s9_right_arm_grasp_box" />
<!-- <DualArm_H name="s11_right_arm_initial" /> -->
</Sequence>
</RetryUntilSuccessful>
</Sequence>
</BehaviorTree>
<!-- Left Arm Specific Subtrees -->
<BehaviorTree ID="left_arm_hand_vision_subtree">
<BehaviorTree ID="left_DualArm_Hand_vision_subtree">
<Sequence name="left_hand_vision_root">
<RetryUntilSuccessful name="retry_left_hand_cam_vision" num_attempts="5">
<VisionObjectRecognition_H name="s3_left_camera_vision_recg" />
@@ -131,11 +131,11 @@
<Sequence name="left_arm_grasp_root">
<RetryUntilSuccessful name="retry_left_arm_grasp_action" num_attempts="1">
<Sequence>
<Arm_H name="s3_left_arm_vision_pre_grasp" />
<Arm_H name="s4_left_arm_vision_grasp" />
<DualArm_H name="s3_left_arm_vision_pre_grasp" />
<DualArm_H name="s4_left_arm_vision_grasp" />
<GripperCmd0_H name="s4_left_hand_gripper_close" />
<Arm_H name="s4_left_arm_take_box" />
<Arm_H name="s5_left_arm_take_box_off" />
<DualArm_H name="s4_left_arm_take_box" />
<DualArm_H name="s5_left_arm_take_box_off" />
</Sequence>
</RetryUntilSuccessful>
</Sequence>
@@ -145,11 +145,11 @@
<Sequence name="left_arm_putdown_root">
<RetryUntilSuccessful name="retry_left_arm_putdown" num_attempts="1">
<Sequence>
<!-- <Arm_H name="s7_left_arm_grasp_box" /> -->
<Arm_H name="s7_left_arm_put_down_box" />
<!-- <DualArm_H name="s7_left_arm_grasp_box" /> -->
<DualArm_H name="s7_left_arm_put_down_box" />
<GripperCmd0_H name="s8_left_hand_gripper_open" />
<Arm_H name="s9_left_arm_grasp_box" />
<!-- <Arm_H name="s11_left_arm_initial" /> -->
<DualArm_H name="s9_left_arm_grasp_box" />
<!-- <DualArm_H name="s11_left_arm_initial" /> -->
</Sequence>
</RetryUntilSuccessful>
</Sequence>
@@ -161,8 +161,8 @@
<Parallel name="overall_recovery_parallel">
<GripperCmd1_H name="s12_right_hand_gripper_open" />
<GripperCmd0_H name="s12_left_hand_gripper_open" />
<Arm_H name="s13_right_arm_initial" />
<Arm_H name="s13_left_arm_initial" />
<DualArm_H name="s13_right_arm_initial" />
<DualArm_H name="s13_left_arm_initial" />
</Parallel>
<ClearDoneInstances_H name="clear_done_instances_overall" />
</Sequence>
@@ -171,8 +171,8 @@
<BehaviorTree ID="vision_recovery_subtree">
<Sequence name="vision_recovery_root">
<Parallel name="arm_initial2">
<Arm_H name="s14_right_arm_initial" />
<Arm_H name="s14_left_arm_initial" />
<DualArm_H name="s14_right_arm_initial" />
<DualArm_H name="s14_left_arm_initial" />
</Parallel>
<ClearDoneInstances_H name="clear_done_instances_vision" />
</Sequence>
@@ -181,9 +181,9 @@
<BehaviorTree ID="right_arm_grasp_recovery_subtree">
<Sequence name="right_grasp_recovery_root">
<GripperCmd1_H name="s15_right_hand_gripper_open" />
<Arm_H name="s15_right_arm_pre_grasp" />
<Arm_H name="s15_right_arm_cam_take_photo" />
<Arm_H name="s15_right_arm_initial" />
<DualArm_H name="s15_right_arm_pre_grasp" />
<DualArm_H name="s15_right_arm_cam_take_photo" />
<DualArm_H name="s15_right_arm_initial" />
<ClearDoneInstances_H name="clear_done_instances_right_grasp" />
</Sequence>
</BehaviorTree>
@@ -191,9 +191,9 @@
<BehaviorTree ID="left_arm_grasp_recovery_subtree">
<Sequence name="left_grasp_recovery_root">
<GripperCmd0_H name="s15_left_hand_gripper_open" />
<Arm_H name="s15_left_arm_pre_grasp" />
<Arm_H name="s15_left_arm_cam_take_photo" />
<Arm_H name="s15_left_arm_initial" />
<DualArm_H name="s15_left_arm_pre_grasp" />
<DualArm_H name="s15_left_arm_cam_take_photo" />
<DualArm_H name="s15_left_arm_initial" />
<ClearDoneInstances_H name="clear_done_instances_left_grasp" />
</Sequence>
</BehaviorTree>
@@ -201,8 +201,8 @@
<BehaviorTree ID="arm_vision_recovery_subtree">
<Sequence name="arm_vision_recovery_root">
<Parallel name="arm_initial3">
<Arm_H name="s16_right_arm_initial" />
<Arm_H name="s16_left_arm_initial" />
<DualArm_H name="s16_right_arm_initial" />
<DualArm_H name="s16_left_arm_initial" />
</Parallel>
<ClearDoneInstances_H name="clear_done_instances_arm_vision" />
</Sequence>
@@ -211,7 +211,7 @@
<BehaviorTree ID="left_arm_vision_recovery_subtree">
<Sequence name="left_arm_vision_recovery_root">
<Parallel name="left_arm_initial">
<Arm_H name="s17_left_arm_initial" />
<DualArm_H name="s17_left_arm_initial" />
</Parallel>
<ClearDoneInstances_H name="clear_done_instances_left_arm_vision" />
</Sequence>
@@ -220,7 +220,7 @@
<BehaviorTree ID="right_arm_vision_recovery_subtree">
<Sequence name="right_arm_vision_recovery_root">
<Parallel name="right_arm_initial">
<Arm_H name="s17_right_arm_initial" />
<DualArm_H name="s17_right_arm_initial" />
</Parallel>
<ClearDoneInstances_H name="clear_done_instances_right_arm_vision" />
</Sequence>
@@ -235,19 +235,19 @@
<!-- 右臂 -->
<Fallback>
<Sequence>
<Arm_H name="s2_right_arm_cam_take_photo" />
<DualArm_H name="s2_right_arm_cam_take_photo" />
<Script code="right_cam_status:=true" />
</Sequence>
<Arm_H name="s15_right_arm_initial" />
<DualArm_H name="s15_right_arm_initial" />
<AlwaysSuccess/>
</Fallback>
<!-- 左臂 -->
<Fallback>
<Sequence>
<Arm_H name="s2_left_arm_cam_take_photo" />
<DualArm_H name="s2_left_arm_cam_take_photo" />
<Script code="left_cam_status:=true" />
</Sequence>
<Arm_H name="s15_left_arm_initial" />
<DualArm_H name="s15_left_arm_initial" />
<AlwaysSuccess/>
</Fallback>
</Parallel>
@@ -265,7 +265,7 @@
<ScriptCondition code="right_done==false &amp;&amp; right_cam_ok==true" />
<Fallback>
<Sequence>
<SubTree ID="right_arm_hand_vision_subtree"/>
<SubTree ID="right_DualArm_Hand_vision_subtree"/>
<Script code="right_v_ok:=true" />
</Sequence>
<AlwaysFailure/>
@@ -279,7 +279,7 @@
<ScriptCondition code="left_done==false &amp;&amp; left_cam_ok==true" />
<Fallback>
<Sequence>
<SubTree ID="left_arm_hand_vision_subtree"/>
<SubTree ID="left_DualArm_Hand_vision_subtree"/>
<Script code="left_v_ok:=true" />
</Sequence>
<AlwaysFailure/>

View File

@@ -23,19 +23,15 @@
'
- name: s1_left_arm_pre_cam_take_photo
params: &pre_cam_take_photo 'body_id: 2
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [85, 25, 90, 0, 68, 90, 95, -25, -90, 0, -68, 90]
'
params: &pre_cam_take_photo "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [85, 25, 90, 0, 68, 90]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s1_right_arm_pre_cam_take_photo
params: &pre_cam_take_photo "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [95, -25, -90, 0, -68, 90]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s1_wheel_move_to_origin_pickup_position
params: 'move_distance: 0.5
@@ -43,69 +39,38 @@
'
- name: s2_left_arm_pre_cam_take_photo
params: 'body_id: 2
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [120, 20, 80, 0, 80, 120, 60, -20, -80, 0, -80, 60]
'
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [120, 20, 80, 0, 80, 120]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s2_right_arm_pre_cam_take_photo
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [60, -20, -80, 0, -80, 60]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s2_top_camera_vision_recg
params: 'camera_position: top
'
- name: s2_left_arm_cam_take_photo
params: &arm_cam_take_photo 'body_id: 0
data_type: 100
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: &arm_cam_take_photo "arm_motion_params:\n- arm_id: 0\n motion_type: 100\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s3_left_camera_vision_recg
params: 'camera_position: left
'
- name: s3_left_arm_vision_pre_grasp
params: &pre_grasp 'body_id: 0
data_type: 101
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: &pre_grasp "arm_motion_params:\n- arm_id: 0\n motion_type: 101\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s4_left_arm_vision_grasp
params: 'body_id: 0
data_type: 102
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 102\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s4_gripper_close
params: 'loc: 200
@@ -117,33 +82,20 @@
'
- name: s4_left_arm_take_box
params: 'body_id: 0
data_type: 110
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 110\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s5_left_arm_grasp_box
params: 'body_id: 2
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [84, 88, 45, 22, 16, 68, 95, -25, -90, 0, -68, 90]
'
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [84, 88, 45, 22, 16, 68]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s5_right_arm_grasp_box
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [95, -25, -90, 0, -68, 90]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s6_move_waist_turn_left_90
params: 'move_pitch_degree: 0.0
@@ -157,33 +109,25 @@
'
- name: s7_left_arm_grasp_box
params: &grasp_box_s7_s9 'body_id: 2
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [85, 88, -14, 22, 16, 68, 95, -25, -90, 0, -68, 90]
'
params: &grasp_box_s7_s9 "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [85, 88, -14, 22, 16, 68]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s7_right_arm_grasp_box
params: &grasp_box_s7_s9 "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [95, -25, -90, 0, -68, 90]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s7_left_arm_put_down_box
params: 'body_id: 2
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [95, 88, -14, -22, 16, 112, 95, -25, -90, 0, -68, 90]
'
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [95, 88, -14, -22, 16, 112]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s7_right_arm_put_down_box
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [95, -25, -90, 0, -68, 90]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s8_gripper_open
params: *gripper_open
- name: s9_left_arm_grasp_box

View File

@@ -50,15 +50,19 @@
<Sequence name="vision_root">
<RetryUntilSuccessful name="retry_vision" num_attempts="1">
<Sequence>
<Arm_H name="s1_left_arm_pre_cam_take_photo" />
<Parallel name="parallel_action_1">
<DualArm_H name="s1_left_arm_pre_cam_take_photo" />
<DualArm_H name="s1_right_arm_pre_cam_take_photo" />
</Parallel>
<Parallel name="parallel_action_2">
<!-- <MoveWheel_H name="s1_wheel_move_to_origin_pickup_position" /> -->
<GripperCmd0_H name="s1_gripper_open" />
<Arm_H name="s2_left_arm_pre_cam_take_photo" />
<DualArm_H name="s2_left_arm_pre_cam_take_photo" />
<DualArm_H name="s2_right_arm_pre_cam_take_photo" />
</Parallel>
<SubTree ID="vision_top_cam_subtree"/>
<Arm_H name="s2_left_arm_cam_take_photo" />
<DualArm_H name="s2_left_arm_cam_take_photo" />
<SubTree ID="vision_hand_cam_subtree"/>
</Sequence>
</RetryUntilSuccessful>
@@ -85,11 +89,14 @@
<Sequence name="grasp_root">
<RetryUntilSuccessful name="retry_grasp" num_attempts="1">
<Sequence>
<Arm_H name="s3_left_arm_vision_pre_grasp" />
<Arm_H name="s4_left_arm_vision_grasp" />
<DualArm_H name="s3_left_arm_vision_pre_grasp" />
<DualArm_H name="s4_left_arm_vision_grasp" />
<GripperCmd0_H name="s4_gripper_close" />
<Arm_H name="s4_left_arm_take_box" />
<Arm_H name="s5_left_arm_grasp_box" />
<DualArm_H name="s4_left_arm_take_box" />
<Parallel name="parallel_action_grasp_box">
<DualArm_H name="s5_left_arm_grasp_box" />
<DualArm_H name="s5_right_arm_grasp_box" />
</Parallel>
</Sequence>
</RetryUntilSuccessful>
</Sequence>
@@ -103,14 +110,20 @@
<MoveWheel_H name="s6_wheel_move_to_putdown_position" />
<MoveWaist_H name="s6_move_waist_turn_left_90" />
</Parallel> -->
<Arm_H name="s7_left_arm_grasp_box" />
<Arm_H name="s7_left_arm_put_down_box" />
<Parallel name="parallel_action_s7_grasp_box">
<DualArm_H name="s7_left_arm_grasp_box" />
<DualArm_H name="s7_right_arm_grasp_box" />
</Parallel>
<Parallel name="parallel_action_s7_put_down_box">
<DualArm_H name="s7_left_arm_put_down_box" />
<DualArm_H name="s7_right_arm_put_down_box" />
</Parallel>
<GripperCmd0_H name="s8_gripper_open" />
<Arm_H name="s9_left_arm_grasp_box" />
<DualArm_H name="s9_left_arm_grasp_box" />
<Parallel name="parallel_action_3">
<!-- <MoveWaist_H name="s10_move_waist_turn_right_90" /> -->
<Arm_H name="s11_left_arm_pre_cam_take_photo" />
<DualArm_H name="s11_left_arm_pre_cam_take_photo" />
</Parallel>
</Sequence>
</RetryUntilSuccessful>
@@ -122,7 +135,7 @@
<RetryUntilSuccessful name="retry_recovery" num_attempts="1">
<Sequence>
<GripperCmd0_H name="s12_gripper_open" />
<Arm_H name="s13_left_arm_pre_cam_take_photo" />
<DualArm_H name="s13_left_arm_pre_cam_take_photo" />
<ClearDoneInstances_H name="clear_done_instances_overall" />
</Sequence>
</RetryUntilSuccessful>
@@ -134,7 +147,7 @@
<RetryUntilSuccessful name="retry_vision_recovery" num_attempts="1">
<Sequence>
<!-- <GripperCmd0_H name="s14_gripper_open" /> -->
<Arm_H name="s14_left_arm_pre_cam_take_photo" />
<DualArm_H name="s14_left_arm_pre_cam_take_photo" />
<ClearDoneInstances_H name="clear_done_instances_vision" />
</Sequence>
</RetryUntilSuccessful>
@@ -146,9 +159,9 @@
<RetryUntilSuccessful name="retry_grasp_recovery" num_attempts="1">
<Sequence>
<GripperCmd0_H name="s15_gripper_open" />
<Arm_H name="s15_arm_pre_grasp" />
<Arm_H name="s15_left_arm_cam_take_photo" />
<Arm_H name="s15_left_arm_pre_cam_take_photo" />
<DualArm_H name="s15_arm_pre_grasp" />
<DualArm_H name="s15_left_arm_cam_take_photo" />
<DualArm_H name="s15_left_arm_pre_cam_take_photo" />
<ClearDoneInstances_H name="clear_done_instances_grasp" />
</Sequence>
</RetryUntilSuccessful>
@@ -160,9 +173,9 @@
<RetryUntilSuccessful name="retry_putdown_recovery" num_attempts="1">
<Sequence>
<GripperCmd0_H name="s16_gripper_open" />
<Arm_H name="s16_arm_pre_grasp" />
<Arm_H name="s16_left_arm_cam_take_photo" />
<Arm_H name="s16_left_arm_pre_cam_take_photo" />
<DualArm_H name="s16_arm_pre_grasp" />
<DualArm_H name="s16_left_arm_cam_take_photo" />
<DualArm_H name="s16_left_arm_pre_cam_take_photo" />
<ClearDoneInstances_H name="clear_done_instances_putdown" />
</Sequence>
</RetryUntilSuccessful>

View File

@@ -22,90 +22,55 @@
mode: 2
'
- name: s1_left_arm_pre_cam_take_photo
params: &pre_cam_take_photo "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [85, 25, 90, 0, 68, 90]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s1_right_arm_pre_cam_take_photo
params: &pre_cam_take_photo 'body_id: 2
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [85, 25, 90, 0, 68, 90, 95, -25, -90, 0, -68, 90]
'
params: &pre_cam_take_photo "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [95, -25, -90, 0, -68, 90]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s1_wheel_move_to_origin_pickup_position
params: 'move_distance: 0.5
move_angle: 0.0
'
- name: s2_left_arm_pre_cam_take_photo
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [120, 20, 80, 0, 80, 120]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s2_right_arm_pre_cam_take_photo
params: 'body_id: 2
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [120, 20, 80, 0, 80, 120, 60, -20, -80, 0, -80, 60]
'
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [960, -20, -80, 0, -80, 60]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s2_top_camera_vision_recg
params: 'camera_position: top
'
- name: s2_right_arm_cam_take_photo
params: &arm_cam_take_photo 'body_id: 1
data_type: 100
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: &arm_cam_take_photo "arm_motion_params:\n- arm_id: 1\n motion_type: 100\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s3_right_camera_vision_recg
params: 'camera_position: right
'
- name: s3_right_arm_vision_pre_grasp
params: &pre_grasp 'body_id: 1
data_type: 101
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: &pre_grasp "arm_motion_params:\n- arm_id: 1\n motion_type: 101\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s4_right_arm_vision_grasp
params: 'body_id: 1
data_type: 102
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 102\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s4_gripper_close
params: 'loc: 200
@@ -117,33 +82,20 @@
'
- name: s4_right_arm_take_box
params: 'body_id: 1
data_type: 110
data_length: 0
command_id: 0
frame_time_stamp: 0
data_array: []
'
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 110\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [0, 0, 0, 0, 0, 0]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s5_left_arm_grasp_box
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [85, 25, 90, 0, 68, 90]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s5_right_arm_grasp_box
params: 'body_id: 2
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [85, 25, 90, 0, 68, 90, 96.30, -88.95, -45, -22.88, -16.30, 112.10]
'
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [96.30, -88.95, -45, -22.88, -16.30, 112.10]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s6_move_waist_turn_left_90
params: 'move_pitch_degree: 0.0
@@ -156,34 +108,26 @@
move_angle: 0.0
'
- name: s7_left_arm_grasp_box
params: &grasp_box_s7_s9 "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [85, 25, 90, 0, 68, 90]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s7_right_arm_grasp_box
params: &grasp_box_s7_s9 'body_id: 2
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [85, 25, 90, 0, 68, 90, 96.30, -88.95, 13.98, -22.88, -16.30, 112.10]
'
params: &grasp_box_s7_s9 "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [96.30, -88.95, 13.98, -22.88, -16.30, 112.10]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s7_left_arm_put_down_box
params: "arm_motion_params:\n- arm_id: 0\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [85, 25, 90, 0, 68, 90]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s7_right_arm_put_down_box
params: 'body_id: 2
data_type: 2
data_length: 12
command_id: 0
frame_time_stamp: 0
data_array: [85, 25, 90, 0, 68, 90, 85.78, -88.95, 13.97, 22.88, -16.29, 67.99]
'
params: "arm_motion_params:\n- arm_id: 1\n motion_type: 0\n target_pose:\n \
\ position:\n x: 0.0\n y: 0.0\n z: 0.0\n orientation:\n \
\ x: 0.0\n y: 0.0\n z: 0.0\n w: 1.0\n target_joints: [85.78, -88.95, 13.97, 22.88, -16.29, 67.99]\n blend_radius:\
\ 0\nvelocity_scaling: 50\n"
- name: s8_gripper_open
params: *gripper_open
- name: s9_right_arm_grasp_box

View File

@@ -50,15 +50,19 @@
<Sequence name="vision_root">
<RetryUntilSuccessful name="retry_vision" num_attempts="1">
<Sequence>
<Arm_H name="s1_right_arm_pre_cam_take_photo" />
<Parallel name="parallel_action_1">
<DualArm_H name="s1_left_arm_pre_cam_take_photo" />
<DualArm_H name="s1_right_arm_pre_cam_take_photo" />
</Parallel>
<Parallel name="parallel_action_2">
<!-- <MoveWheel_H name="s1_wheel_move_to_origin_pickup_position" /> -->
<GripperCmd1_H name="s1_gripper_open" />
<Arm_H name="s2_right_arm_pre_cam_take_photo" />
<DualArm_H name="s2_left_arm_pre_cam_take_photo" />
<DualArm_H name="s2_right_arm_pre_cam_take_photo" />
</Parallel>
<SubTree ID="vision_top_cam_subtree"/>
<Arm_H name="s2_right_arm_cam_take_photo" />
<DualArm_H name="s2_right_arm_cam_take_photo" />
<SubTree ID="vision_hand_cam_subtree"/>
</Sequence>
</RetryUntilSuccessful>
@@ -85,11 +89,14 @@
<Sequence name="grasp_root">
<RetryUntilSuccessful name="retry_grasp" num_attempts="1">
<Sequence>
<Arm_H name="s3_right_arm_vision_pre_grasp" />
<Arm_H name="s4_right_arm_vision_grasp" />
<DualArm_H name="s3_right_arm_vision_pre_grasp" />
<DualArm_H name="s4_right_arm_vision_grasp" />
<GripperCmd1_H name="s4_gripper_close" />
<Arm_H name="s4_right_arm_take_box" />
<Arm_H name="s5_right_arm_grasp_box" />
<DualArm_H name="s4_right_arm_take_box" />
<Parallel name="parallel_action_grasp_box">
<DualArm_H name="s5_left_arm_grasp_box" />
<DualArm_H name="s5_right_arm_grasp_box" />
</Parallel>
</Sequence>
</RetryUntilSuccessful>
</Sequence>
@@ -103,14 +110,20 @@
<MoveWheel_H name="s6_wheel_move_to_putdown_position" />
<MoveWaist_H name="s6_move_waist_turn_left_90" />
</Parallel> -->
<Arm_H name="s7_right_arm_grasp_box" />
<Arm_H name="s7_right_arm_put_down_box" />
<Parallel name="parallel_action_s7_grasp_box">
<DualArm_H name="s7_left_arm_grasp_box" />
<DualArm_H name="s7_right_arm_grasp_box" />
</Parallel>
<Parallel name="parallel_action_s7_put_down_box">
<DualArm_H name="s7_left_arm_put_down_box" />
<DualArm_H name="s7_right_arm_put_down_box" />
</Parallel>
<GripperCmd1_H name="s8_gripper_open" />
<Arm_H name="s9_right_arm_grasp_box" />
<DualArm_H name="s9_right_arm_grasp_box" />
<Parallel name="parallel_action_3">
<!-- <MoveWaist_H name="s10_move_waist_turn_right_90" /> -->
<Arm_H name="s11_right_arm_pre_cam_take_photo" />
<DualArm_H name="s11_right_arm_pre_cam_take_photo" />
</Parallel>
</Sequence>
</RetryUntilSuccessful>
@@ -122,7 +135,7 @@
<RetryUntilSuccessful name="retry_recovery" num_attempts="1">
<Sequence>
<GripperCmd1_H name="s12_gripper_open" />
<Arm_H name="s13_right_arm_pre_cam_take_photo" />
<DualArm_H name="s13_right_arm_pre_cam_take_photo" />
<ClearDoneInstances_H name="clear_done_instances_overall" />
</Sequence>
</RetryUntilSuccessful>
@@ -134,7 +147,7 @@
<RetryUntilSuccessful name="retry_vision_recovery" num_attempts="1">
<Sequence>
<!-- <GripperCmd1_H name="s14_gripper_open" /> -->
<Arm_H name="s14_right_arm_pre_cam_take_photo" />
<DualArm_H name="s14_right_arm_pre_cam_take_photo" />
<ClearDoneInstances_H name="clear_done_instances_vision" />
</Sequence>
</RetryUntilSuccessful>
@@ -146,9 +159,9 @@
<RetryUntilSuccessful name="retry_grasp_recovery" num_attempts="1">
<Sequence>
<GripperCmd1_H name="s15_gripper_open" />
<Arm_H name="s15_arm_pre_grasp" />
<Arm_H name="s15_right_arm_cam_take_photo" />
<Arm_H name="s15_right_arm_pre_cam_take_photo" />
<DualArm_H name="s15_arm_pre_grasp" />
<DualArm_H name="s15_right_arm_cam_take_photo" />
<DualArm_H name="s15_right_arm_pre_cam_take_photo" />
<ClearDoneInstances_H name="clear_done_instances_grasp" />
</Sequence>
</RetryUntilSuccessful>
@@ -160,9 +173,9 @@
<RetryUntilSuccessful name="retry_putdown_recovery" num_attempts="1">
<Sequence>
<GripperCmd1_H name="s16_gripper_open" />
<Arm_H name="s16_arm_pre_grasp" />
<Arm_H name="s16_right_arm_cam_take_photo" />
<Arm_H name="s16_right_arm_pre_cam_take_photo" />
<DualArm_H name="s16_arm_pre_grasp" />
<DualArm_H name="s16_right_arm_cam_take_photo" />
<DualArm_H name="s16_right_arm_pre_cam_take_photo" />
<ClearDoneInstances_H name="clear_done_instances_putdown" />
</Sequence>
</RetryUntilSuccessful>

View File

@@ -1,55 +0,0 @@
# Example configuration file showing how to configure skill-specific parameters
# This can be used as an additional parameters file in addition to the main brain_node_params.yaml
cerebrum_node:
ros__parameters:
# Skill-specific parameters
cerebrum:
bt:
VisionObjectRecognition:
topic: "vision_object_recognition"
payload_yaml: ""
timeout_ms: "10000"
Arm:
topic: "arm_control"
payload_yaml: ""
timeout_ms: "30000"
CameraTakePhoto:
topic: "camera_take_photo"
payload_yaml: ""
timeout_ms: "5000"
HandControl:
topic: "hand_control"
payload_yaml: ""
timeout_ms: "5000"
MoveWaist:
topic: "move_waist"
payload_yaml: ""
timeout_ms: "10000"
MoveLeg:
topic: "move_leg"
payload_yaml: ""
timeout_ms: "15000"
MoveWheel:
topic: "move_wheel"
payload_yaml: ""
timeout_ms: "20000"
MoveHome:
topic: "move_home"
payload_yaml: ""
timeout_ms: "30000"
GripperCmd:
topic: "gripper_cmd0"
payload_yaml: ""
timeout_ms: "5000"
# Example instance-specific overrides
Arm:
s1_arm_move_to_object:
topic: "arm_control_specific"
payload_yaml: "{target_pose: {x: 0.1, y: 0.2, z: 0.3}}"
timeout_ms: "45000"
s2_arm_grasp_object:
topic: "arm_control_grasp"
payload_yaml: "{target_pose: {x: 0.15, y: 0.25, z: 0.35}, grip_force: 10.0}"
timeout_ms: "50000"

View File

@@ -1,4 +0,0 @@
#!/bin/bash
colcon build --packages-select smacc2_msgs smacc2 brain
#colcon build --packages-select smacc2_msgs smacc2 brain arm_control leg_control vision_object_recg hand_control

View File

@@ -1,46 +0,0 @@
import numpy as np
from scipy.spatial.transform import Rotation as R
# # 欧拉角
# rx, ry, rz = -1.433, 0.114, -0.430
# # 尝试不同的旋转顺序
# orders = ['xyz', 'zyx', 'xzy', 'yxz', 'yzx', 'zyx']
# print("尝试不同旋转顺序的结果:")
# for order in orders:
# try:
# rot = R.from_euler(order, [rx, ry, rz], degrees=False)
# quat = rot.as_quat() # [x, y, z, w]
# print(f"顺序 {order}: [{quat[0]:.8f}, {quat[1]:.8f}, {quat[2]:.8f}, {quat[3]:.8f}]")
# except:
# continue
# # 特别检查XYZ顺序
# print("\nXYZ顺序的详细计算")
# rot_xyz = R.from_euler('xyz', [rx, ry, rz], degrees=False)
# quat_xyz = rot_xyz.as_quat()
# print(f"XYZ顺序: [{quat_xyz[0]:.8f}, {quat_xyz[1]:.8f}, {quat_xyz[2]:.8f}, {quat_xyz[3]:.8f}]")
# 输入欧拉角 (rx, ry, rz),单位弧度
# euler_angles_rad = [-1.433, 0.114, -0.430]
# euler_angles_rad = [-1.622, -0.016, -0.4]
# euler_angles_rad = [-0.972, -0.557, -0.901]
# euler_angles_rad = [-1.674, 0.223, -0.747]
# euler_angles_rad = [-1.484, 0.152, -0.26]
# euler_angles_rad = [-1.691, -0.443, -0.452]
# euler_angles_rad = [-1.72, -0.189, -0.223]
# euler_angles_rad = [-2.213, -0.202, -0.391]
# euler_angles_rad = [-1.358, -0.284, -0.255]
# euler_angles_rad = [-1.509, -0.234, -0.416]
# euler_angles_rad = [-1.79, -0.447, -0.133]
euler_angles_rad = [-1.468, 0.199, -0.361]
# 创建旋转对象,指定欧拉角顺序为 'xyz'
rot = R.from_euler('xyz', euler_angles_rad, degrees=False)
# 转换为四元数,格式为 [x, y, z, w]
quaternion = rot.as_quat()
print(f"四元数 (x, y, z, w): [{quaternion[0]:.8f}, {quaternion[1]:.8f}, {quaternion[2]:.8f}, {quaternion[3]:.8f}]")

View File

@@ -1,207 +0,0 @@
#!/usr/bin/env python3
"""
Convert poses to quaternions (qx, qy, qz, qw) from either:
- Euler angles (rx, ry, rz)
- Rotation vector (Rodrigues) (rx, ry, rz) where |r| is angle in rad
Supports:
- CSV input/output with configurable column names
- Unit options: radians/degrees for angles, mm->m for position
- Euler order: xyz (default) or zyx
Examples:
python src/scripts/euler_to_quaternion.py \
--in poses.csv --out poses_quat.csv --mode euler \
--rx rx --ry ry --rz rz --order xyz \
--x x --y y --z z --mm-to-m
# Single value
python src/scripts/euler_to_quaternion.py --mode euler --single 0.1 0.2 -0.3 --order zyx
# Rotation vector (e.g., from many robots):
python src/scripts/euler_to_quaternion.py --mode rotvec --single -1.433 0.114 -0.430
"""
from __future__ import annotations
import argparse
import csv
import math
from typing import Tuple, List
def euler_to_quaternion(rx: float, ry: float, rz: float, order: str = "xyz", degrees: bool = False) -> Tuple[float, float, float, float]:
"""Convert Euler angles to quaternion [x, y, z, w].
Args:
rx, ry, rz: Euler angles. If degrees=True, values are in degrees; else radians.
order: Rotation order. Supported: 'xyz' (default), 'zyx'. Intrinsic rotations.
degrees: Whether the input angles are in degrees.
Returns:
(qx, qy, qz, qw)
"""
if degrees:
rx, ry, rz = math.radians(rx), math.radians(ry), math.radians(rz)
def q_from_axis_angle(ax: str, angle: float) -> Tuple[float, float, float, float]:
s = math.sin(angle / 2.0)
c = math.cos(angle / 2.0)
if ax == 'x':
return (s, 0.0, 0.0, c)
if ax == 'y':
return (0.0, s, 0.0, c)
if ax == 'z':
return (0.0, 0.0, s, c)
raise ValueError('Invalid axis')
def q_mul(q1: Tuple[float, float, float, float], q2: Tuple[float, float, float, float]) -> Tuple[float, float, float, float]:
x1, y1, z1, w1 = q1
x2, y2, z2, w2 = q2
x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
y = w1 * y2 - x1 * z2 + y1 * w2 + z1 * x2
z = w1 * z2 + x1 * y2 - y1 * x2 + z1 * w2
w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
return (x, y, z, w)
if order == 'xyz':
qx = q_from_axis_angle('x', rx)
qy = q_from_axis_angle('y', ry)
qz = q_from_axis_angle('z', rz)
q = q_mul(q_mul(qx, qy), qz)
elif order == 'zyx':
qz = q_from_axis_angle('z', rz)
qy = q_from_axis_angle('y', ry)
qx = q_from_axis_angle('x', rx)
q = q_mul(q_mul(qz, qy), qx)
else:
raise ValueError("Unsupported Euler order. Use 'xyz' or 'zyx'.")
# Normalize
norm = math.sqrt(sum(v * v for v in q))
return tuple(v / (norm + 1e-12) for v in q) # type: ignore
def rotvec_to_quaternion(rx: float, ry: float, rz: float) -> Tuple[float, float, float, float]:
"""Convert rotation vector (Rodrigues) to quaternion [x,y,z,w].
The vector direction is the rotation axis and its norm is the rotation angle in radians.
"""
angle = math.sqrt(rx * rx + ry * ry + rz * rz)
if angle < 1e-12:
return (0.0, 0.0, 0.0, 1.0)
ax = rx / angle
ay = ry / angle
az = rz / angle
s = math.sin(angle / 2.0)
c = math.cos(angle / 2.0)
q = (ax * s, ay * s, az * s, c)
norm = math.sqrt(sum(v * v for v in q))
return tuple(v / (norm + 1e-12) for v in q) # type: ignore
def convert_csv(in_path: str, out_path: str,
rx_col: str = 'rx', ry_col: str = 'ry', rz_col: str = 'rz',
x_col: str | None = None, y_col: str | None = None, z_col: str | None = None,
order: str = 'xyz', degrees: bool = False, mm_to_m: bool = False,
mode: str = 'euler') -> None:
"""Read CSV, append quaternion columns (qx,qy,qz,qw) and write to output.
If x/y/z columns are provided, optionally convert mm->m.
"""
with open(in_path, 'r', newline='') as f_in:
reader = csv.DictReader(f_in)
fieldnames: List[str] = list(reader.fieldnames or [])
# Ensure quaternion columns at the end
for c in ['qx', 'qy', 'qz', 'qw']:
if c not in fieldnames:
fieldnames.append(c)
# If converting position units, overwrite or add x_m/y_m/z_m
if x_col and y_col and z_col and mm_to_m:
for c in ['x_m', 'y_m', 'z_m']:
if c not in fieldnames:
fieldnames.append(c)
with open(out_path, 'w', newline='') as f_out:
writer = csv.DictWriter(f_out, fieldnames=fieldnames)
writer.writeheader()
for row in reader:
try:
rx = float(row[rx_col])
ry = float(row[ry_col])
rz = float(row[rz_col])
except KeyError as e:
raise KeyError(f"Missing rotation columns in CSV: {e}")
if mode == 'euler':
qx, qy, qz, qw = euler_to_quaternion(rx, ry, rz, order=order, degrees=degrees)
elif mode == 'rotvec':
qx, qy, qz, qw = rotvec_to_quaternion(rx, ry, rz)
else:
raise ValueError("mode must be 'euler' or 'rotvec'")
row.update({'qx': f"{qx:.8f}", 'qy': f"{qy:.8f}", 'qz': f"{qz:.8f}", 'qw': f"{qw:.8f}"})
if x_col and y_col and z_col and (x_col in row and y_col in row and z_col in row):
try:
x = float(row[x_col])
y = float(row[y_col])
z = float(row[z_col])
if mm_to_m:
row['x_m'] = f"{x / 1000.0:.6f}"
row['y_m'] = f"{y / 1000.0:.6f}"
row['z_m'] = f"{z / 1000.0:.6f}"
except ValueError:
pass
writer.writerow(row)
def main():
parser = argparse.ArgumentParser(description='Convert pose angles to quaternion (qx,qy,qz,qw) from Euler or rotation vector.')
parser.add_argument('--mode', type=str, default='euler', choices=['euler', 'rotvec'], help='Input angle format')
parser.add_argument('--in', dest='in_path', type=str, help='Input CSV file path')
parser.add_argument('--out', dest='out_path', type=str, help='Output CSV file path')
parser.add_argument('--rx', type=str, default='rx', help='Column name for rx')
parser.add_argument('--ry', type=str, default='ry', help='Column name for ry')
parser.add_argument('--rz', type=str, default='rz', help='Column name for rz')
parser.add_argument('--x', type=str, default=None, help='Column name for x (position)')
parser.add_argument('--y', type=str, default=None, help='Column name for y (position)')
parser.add_argument('--z', type=str, default=None, help='Column name for z (position)')
parser.add_argument('--order', type=str, default='xyz', choices=['xyz', 'zyx'], help='Euler order')
parser.add_argument('--degrees', action='store_true', help='Input angles are in degrees (default radians)')
parser.add_argument('--mm-to-m', action='store_true', help='Convert position units from mm to m when x/y/z provided')
# single conversion mode
parser.add_argument('--single', nargs=3, type=float, metavar=('RX', 'RY', 'RZ'),
help='Convert a single Euler triplet to quaternion (prints to stdout)')
args = parser.parse_args()
if args.single is not None:
rx, ry, rz = args.single
if args.mode == 'euler':
q = euler_to_quaternion(rx, ry, rz, order=args.order, degrees=args.degrees)
else:
q = rotvec_to_quaternion(rx, ry, rz)
print(f"qx,qy,qz,qw = {q[0]:.8f}, {q[1]:.8f}, {q[2]:.8f}, {q[3]:.8f}")
return
if not args.in_path or not args.out_path:
parser.error('CSV mode requires --in and --out')
convert_csv(
in_path=args.in_path,
out_path=args.out_path,
rx_col=args.rx,
ry_col=args.ry,
rz_col=args.rz,
x_col=args.x,
y_col=args.y,
z_col=args.z,
order=args.order,
degrees=args.degrees,
mm_to_m=args.mm_to_m,
mode=args.mode,
)
print(f"Written: {args.out_path}")
if __name__ == '__main__':
main()

View File

@@ -1,588 +0,0 @@
import numpy as np
import cv2
from typing import List, Tuple
import argparse
import json
import os
import glob
import csv
def hand_eye_calibration(robot_poses: List[np.ndarray],
camera_poses: List[np.ndarray],
mode: str = 'eye_in_hand') -> np.ndarray:
"""
执行手眼标定解决AX = XB问题。
参数:
robot_poses (list of np.array): 机械臂末端在基座坐标系中的位姿列表 (4x4齐次矩阵)。
camera_poses (list of np.array): 标定板在相机坐标系中的位姿列表 (4x4齐次矩阵)。注意:这是 T_c_ttarget->camera与OpenCV的 R_target2cam/t_target2cam 一致。
mode (str): 标定模式。'eye_in_hand''eye_to_hand'
返回:
X (np.array): 求解出的变换矩阵X (4x4齐次矩阵)。
对于 eye_in_hand: X = camera^T_end_effector (相机在机械臂末端坐标系中的位姿)
对于 eye_to_hand: X = base^T_camera (相机在机器人基座坐标系中的位姿)
"""
# 输入验证
n = len(robot_poses)
if n != len(camera_poses):
raise ValueError("机器人位姿数量与相机位姿数量必须相同。")
if n < 3:
raise ValueError("至少需要三组数据(且包含显著旋转)才能进行稳定标定。")
# OpenCV calibrateHandEye 需要每一帧的绝对位姿:
# - R_gripper2base, t_gripper2base: T_g^b夹爪到基座
# - R_target2cam, t_target2cam: T_t^c标定板到相机
# 传入时请确保 robot_poses 为 T_b^g基座到夹爪camera_poses 为 T_c^t目标到相机
R_gripper2base: List[np.ndarray] = []
t_gripper2base: List[np.ndarray] = []
R_target2cam: List[np.ndarray] = []
t_target2cam: List[np.ndarray] = []
for i in range(n):
T_b_g = robot_poses[i]
if T_b_g.shape != (4, 4):
raise ValueError("robot_poses 中的矩阵必须是 4x4")
T_g_b = np.linalg.inv(T_b_g)
R_gripper2base.append(T_g_b[:3, :3])
t_gripper2base.append(T_g_b[:3, 3].reshape(3, 1))
T_c_t = camera_poses[i]
if T_c_t.shape != (4, 4):
raise ValueError("camera_poses 中的矩阵必须是 4x4")
R_target2cam.append(T_c_t[:3, :3])
t_target2cam.append(T_c_t[:3, 3].reshape(3, 1))
# 选择算法,失败时自动尝试其他方法
methods = [
getattr(cv2, 'CALIB_HAND_EYE_TSAI', None),
getattr(cv2, 'CALIB_HAND_EYE_PARK', None),
getattr(cv2, 'CALIB_HAND_EYE_HORAUD', None),
getattr(cv2, 'CALIB_HAND_EYE_ANDREFF', None),
getattr(cv2, 'CALIB_HAND_EYE_DANIILIDIS', None),
]
methods = [m for m in methods if m is not None]
last_err = None
for method in methods:
try:
R_cam2gripper, t_cam2gripper = cv2.calibrateHandEye(
R_gripper2base, t_gripper2base,
R_target2cam, t_target2cam,
method=method,
)
# 成功返回
X_cam_gripper = np.eye(4)
X_cam_gripper[:3, :3] = R_cam2gripper
X_cam_gripper[:3, 3] = t_cam2gripper.flatten()
if mode == 'eye_in_hand':
# 直接返回 camera^T_gripper
return X_cam_gripper
else:
# eye_to_hand需要 base^T_camera。已知
# 对每一帧 i有 T_b^c_i = T_b^g_i * T_g^c其中 T_g^c = (T_c^g)^{-1}
T_g_c = np.linalg.inv(X_cam_gripper)
T_b_c_list = []
for T_b_g in robot_poses:
T_b_c_list.append(T_b_g @ T_g_c)
return average_SE3(T_b_c_list)
except Exception as e:
last_err = e
continue
raise RuntimeError(f"hand-eye 标定失败:{last_err}")
def create_homogeneous_matrix(rvec, tvec):
"""
根据旋转向量和平移向量创建4x4齐次变换矩阵。
参数:
rvec: 3x1旋转向量
tvec: 3x1平移向量
返回:
T: 4x4齐次变换矩阵
"""
R, _ = cv2.Rodrigues(rvec)
T = np.eye(4)
T[:3, :3] = R
T[:3, 3] = tvec.flatten()
return T
def rot_to_quat(R: np.ndarray) -> np.ndarray:
"""将旋转矩阵转为四元数 [x, y, z, w](右手,单位四元数)。"""
tr = np.trace(R)
if tr > 0.0:
S = np.sqrt(tr + 1.0) * 2.0
qw = 0.25 * S
qx = (R[2, 1] - R[1, 2]) / S
qy = (R[0, 2] - R[2, 0]) / S
qz = (R[1, 0] - R[0, 1]) / S
else:
if R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]:
S = np.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2]) * 2.0
qx = 0.25 * S
qy = (R[0, 1] + R[1, 0]) / S
qz = (R[0, 2] + R[2, 0]) / S
qw = (R[2, 1] - R[1, 2]) / S
elif R[1, 1] > R[2, 2]:
S = np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2]) * 2.0
qx = (R[0, 1] + R[1, 0]) / S
qy = 0.25 * S
qz = (R[1, 2] + R[2, 1]) / S
qw = (R[0, 2] - R[2, 0]) / S
else:
S = np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1]) * 2.0
qx = (R[0, 2] + R[2, 0]) / S
qy = (R[1, 2] + R[2, 1]) / S
qz = 0.25 * S
qw = (R[1, 0] - R[0, 1]) / S
q = np.array([qx, qy, qz, qw])
return q / (np.linalg.norm(q) + 1e-12)
def quat_to_rot(q: np.ndarray) -> np.ndarray:
"""四元数 [x, y, z, w] 转旋转矩阵。"""
x, y, z, w = q
xx, yy, zz = x*x, y*y, z*z
xy, xz, yz = x*y, x*z, y*z
wx, wy, wz = w*x, w*y, w*z
R = np.array([
[1 - 2*(yy + zz), 2*(xy - wz), 2*(xz + wy)],
[2*(xy + wz), 1 - 2*(xx + zz), 2*(yz - wx)],
[2*(xz - wy), 2*(yz + wx), 1 - 2*(xx + yy)]
])
return R
def average_SE3(T_list: List[np.ndarray]) -> np.ndarray:
"""对一组 SE(3) 变换做简单平均(四元数+平移平均)。"""
if not T_list:
raise ValueError("T_list 为空")
# 平移均值
t_stack = np.stack([T[:3, 3] for T in T_list], axis=0)
t_mean = np.mean(t_stack, axis=0)
# 旋转均值(单位四元数平均后归一化)
q_list = [rot_to_quat(T[:3, :3]) for T in T_list]
# 对齐四元数符号到第一个四元数的半球,避免相互抵消
q0 = q_list[0]
aligned = []
for q in q_list:
if np.dot(q, q0) < 0:
aligned.append(-q)
else:
aligned.append(q)
q = np.mean(np.stack(aligned, axis=0), axis=0)
q = q / (np.linalg.norm(q) + 1e-12)
R_mean = quat_to_rot(q)
T = np.eye(4)
T[:3, :3] = R_mean
T[:3, 3] = t_mean
return T
def hand_eye_residual(robot_poses: List[np.ndarray], camera_poses: List[np.ndarray], X_cam_gripper: np.ndarray) -> Tuple[float, float]:
"""基于 AX ≈ X B 的关系计算残差(旋转角度均值,平移均值)。
返回 (rot_err_deg, trans_err_m)。"""
rot_errs = []
trans_errs = []
n = len(robot_poses)
# 构造相对运动对,避免偏置
for i in range(n - 1):
A = np.linalg.inv(robot_poses[i + 1]) @ robot_poses[i] # T_b^g_{i+1}^{-1} * T_b^g_i
B = np.linalg.inv(camera_poses[i + 1]) @ camera_poses[i] # T_c^t_{i+1}^{-1} * T_c^t_i
# OpenCV定义 X 为 T_c^gcamera->gripper
lhs = np.linalg.inv(A) # OpenCV论文中常用 A 为 g2b但这里用的相对位姿定义略有差异
# 直接用 AX 与 XB 的等价残差度量
AX = A @ np.linalg.inv(X_cam_gripper)
XB = np.linalg.inv(X_cam_gripper) @ B
Delta = np.linalg.inv(AX) @ XB
R = Delta[:3, :3]
t = Delta[:3, 3]
# 旋转角度
angle = np.rad2deg(np.arccos(np.clip((np.trace(R) - 1) / 2.0, -1.0, 1.0)))
rot_errs.append(abs(angle))
trans_errs.append(np.linalg.norm(t))
return float(np.mean(rot_errs)), float(np.mean(trans_errs))
# ==================== 示例用法 ====================
def generate_synthetic_dataset(num_poses: int = 12, seed: int = 42) -> Tuple[List[np.ndarray], List[np.ndarray]]:
"""生成可解的模拟数据集 (T_b^g 列表, T_c^t 列表)。"""
rng = np.random.default_rng(seed)
def hat(v):
return np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
def exp_so3(w):
th = np.linalg.norm(w)
if th < 1e-12:
return np.eye(3)
k = w / th
K = hat(k)
return np.eye(3) + np.sin(th) * K + (1 - np.cos(th)) * (K @ K)
def make_T(R, t):
T = np.eye(4)
T[:3, :3] = R
T[:3, 3] = t
return T
# 真实未知相机在夹爪中的位姿T_c^g
R_c_g = exp_so3(np.deg2rad([10, -5, 15]))
t_c_g = np.array([0.03, -0.02, 0.15])
T_c_g_true = make_T(R_c_g, t_c_g)
# 世界(=基座到标定板的固定位姿T_b^t
R_b_t = exp_so3(np.deg2rad([0, 0, 0]))
t_b_t = np.array([0.4, 0.0, 0.3])
T_b_t = make_T(R_b_t, t_b_t)
robot_poses: List[np.ndarray] = [] # T_b^g
camera_poses: List[np.ndarray] = [] # T_c^t
for _ in range(num_poses):
# 生成多轴、较大幅度的运动(避免退化)
ang = np.deg2rad(rng.uniform([-30, -30, -30], [30, 30, 30]))
# 让 SO(3) 采样更丰富:
R_b_g = (np.eye(3) @ rot_perturb(ang))
t_b_g = rng.uniform([-0.2, -0.2, 0.3], [0.2, 0.2, 0.6])
T_b_g = make_T(R_b_g, t_b_g)
robot_poses.append(T_b_g)
# 相机位姿T_b^c = T_b^g * T_g^c其中 T_g^c = (T_c^g)^{-1}
T_g_c = np.linalg.inv(T_c_g_true)
T_b_c = T_b_g @ T_g_c
# 目标在相机坐标系T_c^t = (T_b^c)^{-1} * T_b^t
T_c_t = np.linalg.inv(T_b_c) @ T_b_t
camera_poses.append(T_c_t)
return robot_poses, camera_poses
def rot_perturb(ang_vec: np.ndarray) -> np.ndarray:
"""给定欧拉角近似rad的向量输出旋转矩阵用罗德里格公式按轴角"""
axis = ang_vec
th = np.linalg.norm(axis)
if th < 1e-12:
return np.eye(3)
k = axis / th
K = np.array([[0, -k[2], k[1]], [k[2], 0, -k[0]], [-k[1], k[0], 0]])
return np.eye(3) + np.sin(th) * K + (1 - np.cos(th)) * (K @ K)
def to_json_SE3(T: np.ndarray) -> dict:
R = T[:3, :3]
t = T[:3, 3]
q = rot_to_quat(R)
return {
"matrix": T.tolist(),
"rotation_matrix": R.tolist(),
"translation": t.tolist(),
"quaternion_xyzw": q.tolist(),
}
def load_se3_matrix(path: str) -> np.ndarray:
"""加载 4x4 齐次矩阵。支持 .npy / .npz(json-like)/.json。"""
if not os.path.exists(path):
raise FileNotFoundError(path)
ext = os.path.splitext(path)[1].lower()
if ext == ".npy":
M = np.load(path)
elif ext == ".npz":
d = np.load(path)
key = 'matrix' if 'matrix' in d.files else d.files[0]
M = d[key]
elif ext == ".json":
with open(path, 'r') as f:
data = json.load(f)
M = np.array(data.get('matrix', data))
else:
raise ValueError(f"不支持的文件类型: {ext}")
M = np.array(M)
if M.shape != (4, 4):
raise ValueError(f"矩阵形状应为(4,4),实际为{M.shape}")
return M
def load_camera_calib(path: str) -> Tuple[np.ndarray, np.ndarray]:
"""加载相机内参,支持 OpenCV YAML/JSONcameraMatrix, distCoeffs或简单 JSON{"K":[], "D":[]}。"""
import yaml
with open(path, 'r') as f:
txt = f.read()
data = None
try:
data = yaml.safe_load(txt)
except Exception:
try:
data = json.loads(txt)
except Exception:
raise ValueError("无法解析相机标定文件,支持 YAML/JSON包含 camera_matrix/cameraMatrix/K 和 dist_coeffs/distCoeffs/D 字段")
# 支持多种键名
K = data.get('camera_matrix') or data.get('cameraMatrix') or data.get('K')
if isinstance(K, dict) and 'data' in K:
K = K['data']
D = data.get('dist_coeffs') or data.get('distCoeffs') or data.get('D')
if isinstance(D, dict) and 'data' in D:
D = D['data']
if K is None or D is None:
raise ValueError("标定文件需包含 camera_matrix/cameraMatrix/K 与 dist_coeffs/distCoeffs/D")
K = np.array(K, dtype=float).reshape(3, 3)
D = np.array(D, dtype=float).reshape(-1, 1)
return K, D
def build_chessboard_object_points(rows: int, cols: int, square: float) -> np.ndarray:
"""生成棋盘格 3D 角点单位Z=0 平面,原点在棋盘左上角角点。"""
objp = np.zeros((rows * cols, 3), np.float32)
grid = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2)
objp[:, :2] = grid * square
return objp
def detect_chessboard(gray: np.ndarray, rows: int, cols: int) -> Tuple[bool, np.ndarray]:
flags = cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_NORMALIZE_IMAGE
ok, corners = cv2.findChessboardCorners(gray, (cols, rows), flags)
if not ok:
return False, None
# 亚像素优化
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
return True, corners.reshape(-1, 2)
def detect_charuco(gray: np.ndarray, rows: int, cols: int, square: float, marker: float, dict_name: str = 'DICT_4X4_50') -> Tuple[bool, np.ndarray, np.ndarray]:
"""检测 Charuco 角点,返回 (ok, charuco_corners, charuco_ids)。需要 opencv-contrib-python。"""
if not hasattr(cv2, 'aruco'):
raise RuntimeError("需要 opencv-contrib-python 才能使用 Charuco。请安装 opencv-contrib-python==4.11.0.86")
aruco = cv2.aruco
dictionary = getattr(aruco, dict_name)
aruco_dict = aruco.getPredefinedDictionary(getattr(aruco, dict_name))
board = aruco.CharucoBoard((cols, rows), square, marker, aruco_dict)
params = aruco.DetectorParameters()
detector = aruco.ArucoDetector(aruco_dict, params)
corners, ids, _ = detector.detectMarkers(gray)
if ids is None or len(ids) == 0:
return False, None, None
retval, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(corners, ids, gray, board)
ok = retval is not None and retval >= 4
return ok, (charuco_corners.reshape(-1, 2) if ok else None), (charuco_ids if ok else None)
def solve_pnp_from_image(image_path: str, K: np.ndarray, D: np.ndarray,
pattern: str, rows: int, cols: int, square: float,
charuco_marker: float = None) -> np.ndarray:
img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
if img is None:
raise FileNotFoundError(f"无法读取图像: {image_path}")
if pattern == 'chessboard':
ok, corners = detect_chessboard(img, rows, cols)
if not ok:
raise RuntimeError(f"棋盘检测失败: {image_path}")
objp = build_chessboard_object_points(rows, cols, square)
# PnP
retval, rvec, tvec = cv2.solvePnP(objp, corners, K, D, flags=cv2.SOLVEPNP_ITERATIVE)
if not retval:
raise RuntimeError(f"solvePnP 失败: {image_path}")
return create_homogeneous_matrix(rvec, tvec)
elif pattern == 'charuco':
if charuco_marker is None:
raise ValueError("使用 charuco 时必须提供 --charuco-marker")
ok, charuco_corners, charuco_ids = detect_charuco(img, rows, cols, square, charuco_marker)
if not ok:
raise RuntimeError(f"Charuco 检测失败: {image_path}")
# 从 board 索引构建 3D 角点
aruco = cv2.aruco
aruco_dict = aruco.getPredefinedDictionary(getattr(aruco, 'DICT_4X4_50'))
board = aruco.CharucoBoard((cols, rows), square, charuco_marker, aruco_dict)
obj_pts = []
img_pts = []
for idx, corner in zip(charuco_ids.flatten(), charuco_corners.reshape(-1, 2)):
obj_pts.append(board.getChessboardCorners()[idx][0])
img_pts.append(corner)
obj_pts = np.array(obj_pts, dtype=np.float32)
img_pts = np.array(img_pts, dtype=np.float32)
retval, rvec, tvec = cv2.solvePnP(obj_pts, img_pts, K, D, flags=cv2.SOLVEPNP_ITERATIVE)
if not retval:
raise RuntimeError(f"solvePnP 失败: {image_path}")
return create_homogeneous_matrix(rvec, tvec)
else:
raise ValueError("pattern 仅支持 chessboard 或 charuco")
def load_robot_poses_file(path: str) -> List[np.ndarray]:
"""从文件加载 robot_poses 列表。支持 .npz(robot_poses), .json(list or dict.matrix), .csv( image, tx,ty,tz,qx,qy,qz,qw )。"""
ext = os.path.splitext(path)[1].lower()
if ext == '.npz':
d = np.load(path)
arr = d['robot_poses']
assert arr.ndim == 3 and arr.shape[1:] == (4, 4)
return [arr[i] for i in range(arr.shape[0])]
if ext == '.json':
with open(path, 'r') as f:
data = json.load(f)
if isinstance(data, dict) and 'robot_poses' in data:
mats = data['robot_poses']
else:
mats = data
return [np.array(M).reshape(4, 4) for M in mats]
if ext == '.csv':
poses = []
with open(path, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
tx, ty, tz = float(row['tx']), float(row['ty']), float(row['tz'])
qx, qy, qz, qw = float(row['qx']), float(row['qy']), float(row['qz']), float(row['qw'])
R = quat_to_rot(np.array([qx, qy, qz, qw]))
T = np.eye(4)
T[:3, :3] = R
T[:3, 3] = np.array([tx, ty, tz])
poses.append(T)
return poses
raise ValueError(f"不支持的 robot poses 文件类型: {ext}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Hand-Eye calibration (eye-in-hand / eye-to-hand)")
parser.add_argument("--data", type=str, default=None,
help=".npz dataset with arrays 'robot_poses' and 'camera_poses' of shape (N,4,4)")
parser.add_argument("--mode", type=str, default=None, choices=["eye_in_hand", "eye_to_hand"],
help="Calibration mode. If omitted, both modes are computed")
parser.add_argument("--out", type=str, default=None, help="Output JSON file for the calibration result")
parser.add_argument("--export-dataset", type=str, default=None,
help="Export the dataset used (robot_poses,camera_poses) to .npz at this path")
parser.add_argument("--seed", type=int, default=42, help="Synthetic dataset RNG seed when no --data is provided")
parser.add_argument("--num-poses", type=int, default=12, help="Synthetic dataset pose count when no --data is provided")
parser.add_argument("--eval", action="store_true", help="Compute simple AX≈XB residual metrics (eye-in-hand)")
# Image-based camera pose estimation
parser.add_argument("--images", type=str, default=None,
help="Directory or glob for images captured by the depth camera (color/IR). Sorted alphanumerically.")
parser.add_argument("--camera-calib", type=str, default=None, help="Camera intrinsic file (YAML/JSON)")
parser.add_argument("--pattern", type=str, default="chessboard", choices=["chessboard", "charuco"], help="Calibration board pattern")
parser.add_argument("--board-rows", type=int, default=6, help="Inner rows of chessboard/charuco")
parser.add_argument("--board-cols", type=int, default=9, help="Inner cols of chessboard/charuco")
parser.add_argument("--square-size", type=float, default=0.024, help="Square size in meters")
parser.add_argument("--charuco-marker", type=float, default=None, help="Charuco marker size in meters (for charuco only)")
parser.add_argument("--robot-poses-file", type=str, default=None, help="Path to robot poses file (.npz/.json/.csv)")
# Frames handling
parser.add_argument("--robot-poses-frame", type=str, default="robot_base", choices=["robot_base", "arm_base"],
help="Frame of robot_poses in the dataset: robot_base (default) or arm_base")
parser.add_argument("--arm-to-robot-base", type=str, default=None,
help="Optional path to 4x4 matrix of T_robot_base^arm_base (compose robot poses to robot base)")
args = parser.parse_args()
# Load or generate dataset
if args.data:
d = np.load(args.data)
robot_arr = d["robot_poses"]
cam_arr = d["camera_poses"]
assert robot_arr.ndim == 3 and robot_arr.shape[1:] == (4, 4)
assert cam_arr.ndim == 3 and cam_arr.shape[1:] == (4, 4)
robot_poses = [robot_arr[i] for i in range(robot_arr.shape[0])]
camera_poses = [cam_arr[i] for i in range(cam_arr.shape[0])]
print(f"已从 {args.data} 加载数据集,共 {len(robot_poses)}")
elif args.images and args.robot_poses_file and args.camera_calib:
# Build dataset from images + robot poses
# 1) images
if os.path.isdir(args.images):
img_paths = sorted(glob.glob(os.path.join(args.images, '*')))
else:
img_paths = sorted(glob.glob(args.images))
if not img_paths:
raise FileNotFoundError(f"未找到图像: {args.images}")
# 2) robot poses
robot_poses = load_robot_poses_file(args.robot_poses_file)
if len(robot_poses) != len(img_paths):
raise ValueError(f"robot poses 数量({len(robot_poses)})与图像数量({len(img_paths)})不一致。请确保一一对应且顺序匹配。")
# 3) camera intrinsics
K, D = load_camera_calib(args.camera_calib)
# 4) detect and solvePnP
camera_poses = []
for img in img_paths:
T_c_t = solve_pnp_from_image(img, K, D, args.pattern, args.board_rows, args.board_cols, args.square_size, args.charuco_marker)
camera_poses.append(T_c_t)
print(f"已从图像与位姿构建数据集,共 {len(camera_poses)}")
else:
robot_poses, camera_poses = generate_synthetic_dataset(num_poses=args.num_poses, seed=args.seed)
print(f"生成了 {len(robot_poses)} 组模拟位姿数据 (seed={args.seed})")
# Optional export of dataset for reproducibility
if args.export_dataset:
np.savez(args.export_dataset,
robot_poses=np.stack(robot_poses, axis=0),
camera_poses=np.stack(camera_poses, axis=0))
print(f"已导出数据集到: {args.export_dataset}")
# Frame composition: if robot_poses are in arm_base, optionally compose to robot_base
reference_base = "robot_base"
if args.robot_poses_frame == "arm_base":
reference_base = "arm_base"
if args.arm_to_robot_base:
T_rb_ab = load_se3_matrix(args.arm_to_robot_base) # T_robot_base^arm_base
robot_poses = [T_rb_ab @ T_ab_g for T_ab_g in robot_poses]
reference_base = "robot_base"
print("已将 arm_base 框架下的末端位姿转换到 robot_base 框架")
def run_mode(mode_name: str) -> np.ndarray:
print(f"\n=== 标定模式: {mode_name} ===")
X = hand_eye_calibration(robot_poses, camera_poses, mode=mode_name)
print("标定结果 X:")
print(X)
if mode_name == 'eye_to_hand':
print(f"参考基座: {reference_base}")
print(f"平移部分: [{X[0, 3]:.4f}, {X[1, 3]:.4f}, {X[2, 3]:.4f}]")
return X
results = {}
try:
if args.mode:
X = run_mode(args.mode)
results[args.mode] = X
else:
X_eih = run_mode('eye_in_hand')
X_eth = run_mode('eye_to_hand')
results['eye_in_hand'] = X_eih
results['eye_to_hand'] = X_eth
if args.eval:
# 仅计算 eye-in-hand 残差(需要 X_cam_gripper若未跑则先跑一次
if 'eye_in_hand' in results:
X_cam_gripper = results['eye_in_hand']
else:
X_cam_gripper = hand_eye_calibration(robot_poses, camera_poses, mode='eye_in_hand')
rot_err_deg, trans_err = hand_eye_residual(robot_poses, camera_poses, X_cam_gripper)
print(f"残差评估:旋转均值 {rot_err_deg:.3f} 度,平移均值 {trans_err:.4f} m")
if args.out:
# 若选择了特定模式,写该模式;否则写两者
payload = {"metadata": {"reference_base": reference_base,
"robot_poses_frame": args.robot_poses_frame,
"composed_with_arm_to_robot_base": bool(args.arm_to_robot_base)}}
if args.mode:
payload[args.mode] = to_json_SE3(results[args.mode])
else:
payload.update({k: to_json_SE3(v) for k, v in results.items()})
with open(args.out, 'w') as f:
json.dump(payload, f, indent=2)
print(f"已保存标定结果到: {args.out}")
except Exception as e:
print(f"标定过程中发生错误: {e}")
# ==================== 实际应用提示 ====================
if not args.data:
print("\n=== 实际应用说明 ===")
print("1. 从机器人控制器读取末端位姿 (通常是4x4齐次矩阵或位姿向量注意这是 基座->末端 T_b^g)")
print("2. 使用 OpenCV solvePnP 从标定板图像解算 T_c^t (标定板到相机):")
print(" retval, rvec, tvec = cv2.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs)")
print(" camera_pose = create_homogeneous_matrix(rvec, tvec) # 即 T_c^t")
print("3. 收集多组对应的 T_b^g 和 T_c^t包含多轴较大旋转>= 8-12 组)")
print("4. 运行本脚本 --data dataset.npz --mode eye_to_hand --out result.json --eval")
print("\n若 robot_poses 在 arm_base 框架下:")
print("- 如无 arm_base->robot_base 变换,脚本将输出 arm_base^T_cameraeye_to_hand")
print("- 若提供 --arm-to-robot-base T_robot_base_from_arm_base.npy将自动转换并输出 robot_base^T_camera")

View File

@@ -1,224 +0,0 @@
# 手眼标定Eye-in-Hand / Eye-to-Hand操作指南
本指南配合脚本 `src/scripts/hand_eye_cali.py` 使用,完成相机与机器人之间的外参(手眼)标定。脚本基于 OpenCV 的 `calibrateHandEye` 实现,支持“眼在手上 (eye-in-hand)”与“眼在手外 (eye-to-hand)”两种模式。
---
## 一、原理与坐标定义
- 机器人末端在基座坐标系下的位姿:`T_b^g`4x4 齐次矩阵)
- 标定板在相机坐标系下的位姿:`T_c^t`4x4 齐次矩阵),可由 `solvePnP` 得到
- 需求目标:
- 眼在手上 (eye-in-hand):求 `T_c^g`(相机在末端坐标系下的位姿)
- 眼在手外 (eye-to-hand):求 `T_b^c`(相机在基座坐标系下的位姿)
OpenCV `calibrateHandEye` 的输入为每一帧的“绝对位姿”:
- `R_gripper2base, t_gripper2base` 对应 `T_g^b`(由 `T_b^g` 求逆获得)
- `R_target2cam, t_target2cam` 对应 `T_t^c``T_c^t` 的旋转和平移部分)
---
## 二、前置条件
- 已有相机的内参fx, fy, cx, cy, 畸变等),并能在图像中稳定检测到标定板角点或 AprilTag/ArUco 标记
- 能够从机器人系统读取末端在基座下的位姿 `T_b^g`
- 至少采集 812 组数据,包含多轴、较大幅度旋转和平移(信息量不足会导致求解失败或误差大)
---
## 三、数据采集
1. 固定标定板(推荐刚性固定在稳定位置),或在“眼在手上”场景中保持相机跟随末端运动
2. 对每一帧:
- 记录机器人末端位姿 `T_b^g`4x4
- 在对应图像中用 `solvePnP` 算法求解 `T_c^t`4x4
3. 采集方式一:直接保存数据集 `.npz`
- 数组名必须是 `robot_poses``camera_poses`
- 形状为 `(N, 4, 4)`
数据格式示例Python 生成):
```python
np.savez('dataset.npz',
robot_poses=np.stack(robot_pose_list, axis=0),
camera_poses=np.stack(camera_pose_list, axis=0))
```
4. 采集方式二:仅保存图像与机器人位姿
- 保存 N 张图像(彩色或红外),用于检测棋盘/Charuco 角点
- 同时保存 N 条机器人末端位姿 `T_b^g`(或 `T_ab^g`)到 `.csv/.json/.npz`
- 运行时脚本会:
1) 从相机内参文件YAML/JSON读入 `K,D`
2) 对每张图像用 `solvePnP` 求解 `T_c^t`
3) 组合成数据集并求解手眼外参
> 仅有机械臂基座arm_base的位姿可以吗可以。
> - 如果记录的是 `T_ab^g`arm_base->末端),也能完成手眼标定。
> - `eye_to_hand` 模式下,脚本将输出 `arm_base^T_camera`。若 `T_rb^ab`robot_base->arm_base可传给脚本自动合成 `robot_base^T_camera`。
## 四、脚本使用方法
脚本路径:`src/scripts/hand_eye_cali.py`
- 计算两种模式并导出 JSON
```bash
python src/scripts/hand_eye_cali.py \
--data dataset.npz \
--out handeye_result.json \
--eval
```
- 仅计算眼在手外 (eye-to-hand)
```bash
python src/scripts/hand_eye_cali.py \
--data dataset.npz \
--mode eye_to_hand \
--out handeye_result.json
```
- 没有数据时,可生成模拟数据用于测试:
```bash
python src/scripts/hand_eye_cali.py \
--num-poses 12 --eval \
--out /tmp/handeye_result.json \
--export-dataset /tmp/handeye_dataset.npz
```
### 仅用图像 + 末端位姿进行标定
前提准备好相机标定文件YAML/JSON包含 `cameraMatrix`/`camera_matrix`/`K``distCoeffs`/`dist_coeffs`/`D`)。
- 棋盘格(默认 6x9 内角点,格长 0.024 m
```bash
python src/scripts/hand_eye_cali.py \
--images data/images/*.png \
--camera-calib data/camera.yaml \
--robot-poses-file data/robot_poses.csv \
--pattern chessboard --board-rows 6 --board-cols 9 --square-size 0.024 \
--robot-poses-frame arm_base \
--mode eye_to_hand \
--out handeye_result.json --eval
```
- Charuco需要 opencv-contrib-python
```bash
python src/scripts/hand_eye_cali.py \
--images data/images/*.png \
--camera-calib data/camera.yaml \
--robot-poses-file data/robot_poses.csv \
--pattern charuco --board-rows 6 --board-cols 9 --square-size 0.024 --charuco-marker 0.016 \
--robot-poses-frame robot_base \
--mode eye_to_hand \
--out handeye_result.json --eval
```
robot_poses.csv 示例(头部为列名,按行对齐图像顺序):
```csv
image,tx,ty,tz,qx,qy,qz,qw
0001.png,0.10,0.00,0.45,0.00,0.00,0.00,1.00
0002.png,0.10,0.05,0.45,0.05,0.00,0.00,0.9987
...
```
相机 YAML 示例(可兼容 OpenCV 格式):
```yaml
camera_matrix: {data: [fx, 0, cx, 0, fy, cy, 0, 0, 1]}
dist_coeffs: {data: [k1, k2, p1, p2, k3]}
```
### 参数说明
- `--data`: 读取 `.npz` 数据集(包含 `robot_poses``camera_poses`
- `--mode`: `eye_in_hand``eye_to_hand`(缺省则两者都算)
- `--out`: 输出 JSON 路径(包含 4x4 矩阵、旋转矩阵、平移、四元数)
- `--eval`: 计算 AX≈XB 的简单残差(眼在手上),用于快速自检
- `--export-dataset`: 将当前使用的数据集导出(便于复现)
- `--num-poses`, `--seed`: 生成模拟数据的数量与种子(无 `--data` 时生效)
- `--robot-poses-frame`: `robot_base`(默认)或 `arm_base`,用于指明 `robot_poses` 的基座框架
- `--arm-to-robot-base`: `T_robot_base^arm_base` 的 4x4 矩阵文件路径(.npy/.npz/.json若提供则在内部进行 `T_b^g = T_rb^ab @ T_ab^g` 组合
-. `--images`: 图像目录或通配(按文件名排序)
-. `--camera-calib`: 相机内参文件YAML/JSON
-. `--pattern`: `chessboard``charuco`
-. `--board-rows/--board-cols/--square-size`: 标定板参数(单位:米)
-. `--charuco-marker`: Charuco 的方块内 marker 尺寸(米)
-. `--robot-poses-file`: 末端位姿文件(.csv/.json/.npz与图像一一对应
---
## 五、结果解释与落地
- 脚本输出 JSON 中字段:
- `matrix`: 4x4 齐次矩阵(行主序)
- `rotation_matrix`: 3x3 旋转矩阵
- `translation`: 3 维平移 `[x, y, z]`(单位:米)
- `quaternion_xyzw`: 四元数 `[x, y, z, w]`
- 眼在手外结果:
-`--robot-poses-frame robot_base` 或提供了 `--arm-to-robot-base`,则输出为 `robot_base^T_camera`
-`--robot-poses-frame arm_base` 且未提供 arm->robot 变换,则输出为 `arm_base^T_camera`
- 发布 TF 示例:
```bash
ros2 run tf2_ros static_transform_publisher \
x y z qx qy qz qw \
base_link camera_link
```
`x y z qx qy qz qw` 替换为 JSON 中的 `translation``quaternion_xyzw`,帧名根据机器人而定(如 `base_link``camera_link`)。
- 眼在手上(`camera^T_end_effector`)则在末端坐标系下描述相机位姿,常用于抓取/视觉伺服求解。
---
## 六、常见问题与排查
- 报错“Not enough informative motions”或残差很大
- 增加数据数量(>=12
- 扩大旋转幅度,覆盖多个轴,避免纯平移或单轴小角度
- 确认 `T_b^g``T_c^t` 的定义方向正确(基座->末端、标定板->相机)
- `solvePnP` 不稳定/跳变:
- 使用更鲁棒的标定板AprilTag/Charuco
- 固定/稳定曝光,提升角点检测质量
- 确认相机内参/畸变准确
- 结果帧名/方向不符合期望:
- 仔细对照:脚本的 `eye_to_hand` 输出的是 `base^T_camera``eye_in_hand` 输出的是 `camera^T_end_effector`
- 若需要 `end_effector^T_camera`,取逆即可
---
## 七、建议采集策略
- 首先让末端在 3 个轴上分别做正/反方向旋转,各配合一定平移
- 保证每一帧采集时 `T_b^g``T_c^t` 时间匹配(尽量同步)
- 目标板尽可能占据成像区域较大比例,避免深远距离下的姿态估计不稳定
---
## 八、附录:数据制作参考
使用 `solvePnP` 生成 `T_c^t`
```python
# objectPoints: (N,3) mm 或 mimagePoints: (N,2) 像素
retval, rvec, tvec = cv2.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs)
T_c_t = create_homogeneous_matrix(rvec, tvec)
```
保存数据集:
```python
np.savez('dataset.npz',
robot_poses=np.stack(robot_pose_list, axis=0),
camera_poses=np.stack(camera_pose_list, axis=0))
```
`robot_poses` 是以机械臂基座 `arm_base` 为基准,且已知 `T_robot_base^arm_base`,可以在运行时提供:
```bash
python src/scripts/hand_eye_cali.py \
--data dataset.npz \
--robot-poses-frame arm_base \
--arm-to-robot-base T_robot_base_from_arm_base.npy \
--mode eye_to_hand \
--out handeye_result.json
```
---
## 九、数据处理脚本
提供一个模板脚本,读取机器人驱动与视觉检测的实时话题,自动同步采样并生成 dataset.npz
将手眼标定结果直接写为 ROS2 的 static_transform_publisher 配置或 URDF 片段,便于一键加载

View File

@@ -1,207 +0,0 @@
#!/usr/bin/env python3
"""
Detect calibration board in images, estimate camera pose via solvePnP,
and output quaternions (qx,qy,qz,qw) for up to N images.
Supports:
- chessboard or Charuco detection
- YAML/JSON camera intrinsics (OpenCV-style keys)
- Save to CSV and print to stdout
Example:
python src/scripts/images_to_quaternions.py \
--images data/images/*.png \
--camera-calib data/camera.yaml \
--pattern chessboard --board-rows 6 --board-cols 9 --square-size 0.024 \
--limit 12 --out /tmp/quats.csv
"""
from __future__ import annotations
import argparse
import glob
import json
import os
from typing import Tuple, List
import cv2
import numpy as np
def load_camera_calib(path: str) -> Tuple[np.ndarray, np.ndarray]:
import yaml
with open(path, 'r') as f:
txt = f.read()
try:
data = yaml.safe_load(txt)
except Exception:
data = json.loads(txt)
K = data.get('camera_matrix') or data.get('cameraMatrix') or data.get('K')
if isinstance(K, dict) and 'data' in K:
K = K['data']
D = data.get('dist_coeffs') or data.get('distCoeffs') or data.get('D')
if isinstance(D, dict) and 'data' in D:
D = D['data']
if K is None or D is None:
raise ValueError('Calibration must contain camera_matrix/cameraMatrix/K and dist_coeffs/distCoeffs/D')
K = np.array(K, dtype=float).reshape(3, 3)
D = np.array(D, dtype=float).reshape(-1, 1)
return K, D
def build_chessboard_object_points(rows: int, cols: int, square: float) -> np.ndarray:
objp = np.zeros((rows * cols, 3), np.float32)
grid = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2)
objp[:, :2] = grid * square
return objp
def detect_chessboard(gray: np.ndarray, rows: int, cols: int) -> Tuple[bool, np.ndarray]:
flags = cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_NORMALIZE_IMAGE
ok, corners = cv2.findChessboardCorners(gray, (cols, rows), flags)
if not ok:
return False, None
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
return True, corners.reshape(-1, 2)
def detect_charuco(gray: np.ndarray, rows: int, cols: int, square: float, marker: float) -> Tuple[bool, np.ndarray, np.ndarray]:
if not hasattr(cv2, 'aruco'):
raise RuntimeError('opencv-contrib-python is required for Charuco detection')
aruco = cv2.aruco
aruco_dict = aruco.getPredefinedDictionary(getattr(aruco, 'DICT_4X4_50'))
board = aruco.CharucoBoard((cols, rows), square, marker, aruco_dict)
params = aruco.DetectorParameters()
detector = aruco.ArucoDetector(aruco_dict, params)
corners, ids, _ = detector.detectMarkers(gray)
if ids is None or len(ids) == 0:
return False, None, None
retval, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(corners, ids, gray, board)
ok = retval is not None and retval >= 4
return ok, (charuco_corners.reshape(-1, 2) if ok else None), (charuco_ids if ok else None)
def rot_to_quat(R: np.ndarray) -> np.ndarray:
tr = np.trace(R)
if tr > 0.0:
S = np.sqrt(tr + 1.0) * 2.0
qw = 0.25 * S
qx = (R[2, 1] - R[1, 2]) / S
qy = (R[0, 2] - R[2, 0]) / S
qz = (R[1, 0] - R[0, 1]) / S
else:
if R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]:
S = np.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2]) * 2.0
qx = 0.25 * S
qy = (R[0, 1] + R[1, 0]) / S
qz = (R[0, 2] + R[2, 0]) / S
qw = (R[2, 1] - R[1, 2]) / S
elif R[1, 1] > R[2, 2]:
S = np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2]) * 2.0
qx = (R[0, 1] + R[1, 0]) / S
qy = 0.25 * S
qz = (R[1, 2] + R[2, 1]) / S
qw = (R[0, 2] - R[2, 0]) / S
else:
S = np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1]) * 2.0
qx = (R[0, 2] + R[2, 0]) / S
qy = (R[1, 2] + R[2, 1]) / S
qz = 0.25 * S
qw = (R[1, 0] - R[0, 1]) / S
q = np.array([qx, qy, qz, qw])
return q / (np.linalg.norm(q) + 1e-12)
def pnp_quaternion_for_image(image_path: str, K: np.ndarray, D: np.ndarray,
pattern: str, rows: int, cols: int, square: float,
charuco_marker: float | None = None) -> Tuple[np.ndarray, np.ndarray]:
gray = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
if gray is None:
raise FileNotFoundError(f'Cannot read image: {image_path}')
if pattern == 'chessboard':
ok, corners = detect_chessboard(gray, rows, cols)
if not ok:
raise RuntimeError(f'Chessboard detection failed: {image_path}')
objp = build_chessboard_object_points(rows, cols, square)
retval, rvec, tvec = cv2.solvePnP(objp, corners, K, D, flags=cv2.SOLVEPNP_ITERATIVE)
if not retval:
raise RuntimeError(f'solvePnP failed: {image_path}')
elif pattern == 'charuco':
if charuco_marker is None:
raise ValueError('charuco requires --charuco-marker')
ok, charuco_corners, charuco_ids = detect_charuco(gray, rows, cols, square, charuco_marker)
if not ok:
raise RuntimeError(f'Charuco detection failed: {image_path}')
aruco = cv2.aruco
aruco_dict = aruco.getPredefinedDictionary(getattr(aruco, 'DICT_4X4_50'))
board = aruco.CharucoBoard((cols, rows), square, charuco_marker, aruco_dict)
obj_pts = []
img_pts = []
for idx, corner in zip(charuco_ids.flatten(), charuco_corners.reshape(-1, 2)):
obj_pts.append(board.getChessboardCorners()[idx][0])
img_pts.append(corner)
obj_pts = np.array(obj_pts, dtype=np.float32)
img_pts = np.array(img_pts, dtype=np.float32)
retval, rvec, tvec = cv2.solvePnP(obj_pts, img_pts, K, D, flags=cv2.SOLVEPNP_ITERATIVE)
if not retval:
raise RuntimeError(f'solvePnP failed: {image_path}')
else:
raise ValueError('pattern must be chessboard or charuco')
R, _ = cv2.Rodrigues(rvec)
q = rot_to_quat(R)
return q, tvec.flatten()
def main():
parser = argparse.ArgumentParser(description='Estimate quaternions from board detections in images (solvePnP).')
parser.add_argument('--images', type=str, required=True, help='Directory or glob for images')
parser.add_argument('--camera-calib', type=str, required=True, help='Camera intrinsics (YAML/JSON)')
parser.add_argument('--pattern', type=str, default='chessboard', choices=['chessboard', 'charuco'])
parser.add_argument('--board-rows', type=int, default=6)
parser.add_argument('--board-cols', type=int, default=9)
parser.add_argument('--square-size', type=float, default=0.024, help='meters')
parser.add_argument('--charuco-marker', type=float, default=None, help='meters for Charuco marker size')
parser.add_argument('--limit', type=int, default=12, help='Max number of images to process')
parser.add_argument('--out', type=str, default=None, help='Output CSV path')
args = parser.parse_args()
# Collect images
if os.path.isdir(args.images):
img_paths = sorted(glob.glob(os.path.join(args.images, '*')))
else:
img_paths = sorted(glob.glob(args.images))
if not img_paths:
raise FileNotFoundError(f'No images found: {args.images}')
img_paths = img_paths[: args.limit]
# Load intrinsics
K, D = load_camera_calib(args.camera_calib)
# Process
results: List[Tuple[str, np.ndarray]] = []
for p in img_paths:
try:
q, t = pnp_quaternion_for_image(
p, K, D,
args.pattern, args.board_rows, args.board_cols, args.square_size,
args.charuco_marker,
)
results.append((p, q))
print(f'{os.path.basename(p)}: qx={q[0]:.8f}, qy={q[1]:.8f}, qz={q[2]:.8f}, qw={q[3]:.8f}')
except Exception as e:
print(f'WARN: {p}: {e}')
if args.out:
import csv
with open(args.out, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['image', 'qx', 'qy', 'qz', 'qw'])
for p, q in results:
writer.writerow([os.path.basename(p), f'{q[0]:.8f}', f'{q[1]:.8f}', f'{q[2]:.8f}', f'{q[3]:.8f}'])
print(f'Written CSV: {args.out}')
if __name__ == '__main__':
main()

View File

@@ -1,115 +0,0 @@
# 姿态到四元数转换说明
本文档说明如何将姿态表示(欧拉角与旋转向量 Rodrigues/Axis-Angle转换为四元数 (qx, qy, qz, qw),包含原理、公式、边界情况与一个完整的计算示例,并给出现有脚本 `src/scripts/euler_to_quaternion.py` 的使用方法。
## 基本约定
- 四元数记为 q = [x, y, z, w],其中 w 为实部,(x, y, z) 为虚部(与 ROS、OpenCV 常见约定一致)。
- 旋转向量 r = (rx, ry, rz) 的方向为旋转轴,模长 |r| 为旋转角(弧度)。
- 欧拉角使用“内在旋转”约定,支持顺序 xyz默认与 zyx。
---
## 1) 旋转向量 (Rodrigues) → 四元数
给定旋转向量 r = (rx, ry, rz)
1. 计算旋转角与单位旋转轴:
- 角度:θ = |r| = sqrt(rx^2 + ry^2 + rz^2)
-u = r / θ = (ux, uy, uz)
2. 由轴角到四元数:
- s = sin(θ/2), c = cos(θ/2)
- q = (ux·s, uy·s, uz·s, c)
3. 数值稳定性:若 θ 非常小(例如 < 1e-12可近似认为 q (0, 0, 0, 1)。
4. 归一化实际实现中会做一次单位化 q q / ||q||以避免数值误差
> 注:四元数 q 与 -q 表示同一旋转(双覆表示)。
### 示例(完整计算过程)
已知 r = (-1.433, 0.114, -0.430)单位rad
- θ = |r| 1.5004615956
- u = r / θ (-0.955039439, 0.075976620, -0.286578478)
- s = sin(θ/2) 0.6818076141
- c = cos(θ/2) 0.7315315286
- q = (ux·s, uy·s, uz·s, c)
(-0.6511531610, 0.0518014378, -0.1953913882, 0.7315315286)
因此对应四元数为
- qx,qy,qz,qw -0.65115316, 0.05180144, -0.19539139, 0.73153153
---
## 2) 欧拉角 → 四元数
对于欧拉角 (rx, ry, rz)若输入单位为弧度
- xyz 顺序为例依次绕自身 xyz
1. 构造三个轴角四元数
- qx = (sin(rx/2), 0, 0, cos(rx/2))
- qy = (0, sin(ry/2), 0, cos(ry/2))
- qz = (0, 0, sin(rz/2), cos(rz/2))
2. 乘法顺序为 q = qx qy qz内在旋转右乘积)。
- 对于 zyx 顺序q = qz qy qx
- 最后同样进行单位化
> 提示:不同库/软件可能有“外在 vs 内在”、“左乘 vs 右乘”、“轴顺序”等差异,需确保与使用方约定一致。
---
## 3) 边界与常见问题
- 零角或极小角直接返回单位四元数 (0,0,0,1) 或采用泰勒展开近似
- 符号一致性q -q 表示同一旋转批量处理时常将 w 约束为非负以保证连续性可选)。
- 单位所有角度必须使用弧度若源数据是度请先转弧度乘以 π/180)。
- 数值稳定建议在输出前做单位化避免浮点累积误差
---
## 4) 使用脚本批量/单次转换
脚本路径`src/scripts/euler_to_quaternion.py`
- 单次旋转向量 四元数
```bash
python src/scripts/euler_to_quaternion.py --mode rotvec --single RX RY RZ
# 例如:
python src/scripts/euler_to_quaternion.py --mode rotvec --single -1.433 0.114 -0.430
```
- CSV 批量(默认欧拉角,若为旋转向量请加 --mode rotvec
```bash
python src/scripts/euler_to_quaternion.py \
--in input.csv --out output.csv \
--rx rx --ry ry --rz rz \
--mode rotvec
```
- 欧拉角(内在旋转顺序 xyz 或 zyx
```bash
python src/scripts/euler_to_quaternion.py \
--mode euler --order xyz --single RX RY RZ
```
> 如果输入 CSV 同时包含位姿中的位置列x,y,z脚本支持可选的单位转换 `--mm-to-m`(将毫米换算为米,并额外输出 x_m,y_m,z_m 列)。
---
## 5) 本次 12 组旋转向量的结果
以下 12 组 (rx, ry, rz)单位rad对应的四元数 (qx, qy, qz, qw)
1. -0.65115316, 0.05180144, -0.19539139, 0.73153153
2. -0.71991924, -0.00710155, -0.17753865, 0.67092912
3. -0.44521470, -0.25512818, -0.41269388, 0.75258039
4. -0.72304324, 0.09631938, -0.32264833, 0.60318248
5. -0.67311368, 0.06894426, -0.11793097, 0.72681287
6. -0.73524204, -0.19261515, -0.19652833, 0.61943132
7. -0.75500427, -0.08296268, -0.09788718, 0.64304265
8. -0.88627353, -0.08089799, -0.15658968, 0.42831579
9. -0.62408775, -0.13051614, -0.11718879, 0.76141106
10. -0.67818166, -0.10516535, -0.18696062, 0.70289090
11. -0.77275040, -0.19297175, -0.05741665, 0.60193194
12. -0.66493346, 0.09013744, -0.16351565, 0.72318833
---
## 6) 参考实现
脚本 `euler_to_quaternion.py` 中的核心函数:
- `rotvec_to_quaternion(rx, ry, rz)`:实现了第 1 节所述的 Rodrigues → 四元数转换,并在小角度与归一化方面做了稳健处理。
- `euler_to_quaternion(rx, ry, rz, order, degrees)`实现了第 2 节所述的欧拉角 四元数转换支持 xyz/zyx 两种顺序与度/弧度输入
如需将结果保存为 CSV 或用于后续手眼标定TF 发布等可直接复用该脚本的命令行接口
---

View File

@@ -1,3 +0,0 @@
# Pinned to avoid NumPy 2.x ABI issues with some wheels
numpy<2
opencv-python==4.11.0.86

View File

@@ -1,119 +0,0 @@
#!/usr/bin/env python3
"""
rotate_pose.py
Small utility to rotate a 7-element pose (x,y,z,qx,qy,qz,qw or qw,qx,qy,qz)
around a given axis by a specified angle (degrees).
Usage examples:
./rotate_pose.py --pose 0.1 0.2 0.3 -0.6 0.1 -0.1 0.7 --axis z --angle 180 --order qxqyqzqw --frame world
python3 rotate_pose.py --pose 0.179387 0.341708 -0.099286 -0.88140507 0.12851699 -0.17498077 0.41951188 --axis z --angle 180 --order qxqyqzqw --frame world
Supported frames:
- world: apply rotation in world frame (q' = q_rot * q)
- body: apply rotation in body/local frame (q' = q * q_rot)
Supported quaternion order strings: "qxqyqzqw" (default) or "wxyz" (qw,qx,qy,qz)
"""
import argparse
import math
from typing import List, Tuple
def normalize_quat(q: Tuple[float, float, float, float]) -> Tuple[float, float, float, float]:
x, y, z, w = q
n = math.sqrt(x * x + y * y + z * z + w * w)
if n == 0:
return (0.0, 0.0, 0.0, 1.0)
return (x / n, y / n, z / n, w / n)
def axis_angle_to_quat(axis: str, angle_deg: float) -> Tuple[float, float, float, float]:
a = math.radians(angle_deg)
ca = math.cos(a / 2.0)
sa = math.sin(a / 2.0)
ax = axis.lower()
if ax == 'x':
return (sa, 0.0, 0.0, ca)
if ax == 'y':
return (0.0, sa, 0.0, ca)
if ax == 'z':
return (0.0, 0.0, sa, ca)
raise ValueError('axis must be x,y or z')
def quat_mul(a: Tuple[float,float,float,float], b: Tuple[float,float,float,float]) -> Tuple[float,float,float,float]:
ax, ay, az, aw = a
bx, by, bz, bw = b
x = aw*bx + ax*bw + ay*bz - az*by
y = aw*by - ax*bz + ay*bw + az*bx
z = aw*bz + ax*by - ay*bx + az*bw
w = aw*bw - ax*bx - ay*by - az*bz
return (x, y, z, w)
def rotate_position(pos: Tuple[float,float,float], axis: str, angle_deg: float) -> Tuple[float,float,float]:
# For 180-degree rotations we can do simple sign flips but implement general rotation using quaternion
qrot = axis_angle_to_quat(axis, angle_deg)
# convert pos to quaternion (v,0)
px, py, pz = pos
p = (px, py, pz, 0.0)
qrot_n = normalize_quat(qrot)
qrot_conj = (-qrot_n[0], -qrot_n[1], -qrot_n[2], qrot_n[3])
tmp = quat_mul(qrot_n, p)
p_rot = quat_mul(tmp, qrot_conj)
return (p_rot[0], p_rot[1], p_rot[2])
def parse_pose(vals: List[float], order: str) -> Tuple[Tuple[float,float,float], Tuple[float,float,float,float]]:
if order == 'qxqyqzqw':
if len(vals) != 7:
raise ValueError('need 7 values for pose')
return (vals[0], vals[1], vals[2]), (vals[3], vals[4], vals[5], vals[6])
if order == 'wxyz':
if len(vals) != 7:
raise ValueError('need 7 values for pose')
return (vals[0], vals[1], vals[2]), (vals[3], vals[4], vals[5], vals[6]) if False else (vals[4], vals[5], vals[6], vals[3])
raise ValueError('unsupported order')
def format_pose(pos: Tuple[float,float,float], quat: Tuple[float,float,float,float], order: str) -> List[float]:
if order == 'qxqyqzqw':
return [pos[0], pos[1], pos[2], quat[0], quat[1], quat[2], quat[3]]
if order == 'wxyz':
# qw,qx,qy,qz
return [pos[0], pos[1], pos[2], quat[3], quat[0], quat[1], quat[2]]
raise ValueError('unsupported order')
def main():
p = argparse.ArgumentParser()
p.add_argument('--pose', nargs=7, type=float, required=True, help='pose values')
p.add_argument('--axis', choices=['x','y','z'], default='z')
p.add_argument('--angle', type=float, default=180.0)
p.add_argument('--order', choices=['qxqyqzqw','wxyz'], default='qxqyqzqw')
p.add_argument('--frame', choices=['world','body'], default='world')
args = p.parse_args()
pos, quat = parse_pose(list(args.pose), args.order)
# rotation quaternion
qrot = axis_angle_to_quat(args.axis, args.angle)
qrot = normalize_quat(qrot)
# apply rotation to position in world frame: pos' = R * pos
pos_rot = rotate_position(pos, args.axis, args.angle)
# apply rotation to orientation
if args.frame == 'world':
qnew = quat_mul(qrot, quat)
else:
qnew = quat_mul(quat, qrot)
qnew = normalize_quat(qnew)
out = format_pose(pos_rot, qnew, args.order)
# Print as numeric list without quotes
print('rotated_pose:', [float('{:.8f}'.format(x)) for x in out])
if __name__ == '__main__':
main()

View File

@@ -1,152 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Resolve workspace root (script may be invoked from repo root)
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
WS_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
# Source ROS 2 workspace (temporarily disable nounset to avoid unbound vars inside setup scripts)
if [[ -f "${WS_ROOT}/install/setup.bash" ]]; then
# shellcheck source=/dev/null
set +u
source "${WS_ROOT}/install/setup.bash"
set -u
else
echo "[run.sh] install/setup.bash not found at ${WS_ROOT}/install/setup.bash"
echo "[run.sh] Please build the workspace first, e.g.,: colcon build && source install/setup.bash"
exit 1
fi
# Human-friendly logging format with local wall-clock time and colors
export RCUTILS_COLORIZED_OUTPUT=${RCUTILS_COLORIZED_OUTPUT:-1}
export RCUTILS_CONSOLE_OUTPUT_FORMAT='[{time}] [{severity}] [{name}]: {message}'
LOG_TO_FILES=0
LOG_LEVEL="info" # debug|info|warn|error|fatal
RUN_BRAIN=1
RUN_MOTION=1
usage() {
cat <<EOF
Usage: ${0##*/} [options]
Options:
-l, --log-to-files Write each node's stdout/stderr to log/run/<timestamp>/*.log
-L, --log-level LEVEL Set ROS 2 log level (default: info). Ex: debug, warn, error
--brain-only Run only brain node
--arm_control-only Run only arm_control node
-h, --help Show this help and exit
Environment:
RCUTILS_COLORIZED_OUTPUT Colorize console logs (default: 1)
RCUTILS_CONSOLE_OUTPUT_FORMAT Log format (already configured here)
EOF
}
# Parse args
while [[ $# -gt 0 ]]; do
case "$1" in
-l|--log-to-files)
LOG_TO_FILES=1; shift ;;
-L|--log-level)
LOG_LEVEL="${2:-info}"; shift 2 ;;
--brain-only)
RUN_BRAIN=1; RUN_MOTION=0; shift ;;
--arm_control-only)
RUN_BRAIN=0; RUN_MOTION=1; shift ;;
-h|--help)
usage; exit 0 ;;
*)
echo "Unknown option: $1" >&2
usage; exit 2 ;;
esac
done
# 获取当前日期用于归档 (例如: 20260114)
date_day() { date +"%Y%m%d"; }
# 获取可读的时间戳用于文件名 (例如: 2026-01-14_15-30-05)
readable_timestamp() { date +"%Y-%m-%d_%H-%M-%S"; }
# 1. 统一存放路径到当天的目录下
DAILY_LOG_ROOT="${WS_ROOT}/logs/$(date_day)"
# 2. 为每次启动创建一个带时间的子目录 (保证多次运行不冲突)
LOG_DIR="${DAILY_LOG_ROOT}/robot_logs_$(readable_timestamp)"
mkdir -p "${LOG_DIR}"
# 强制 ROS 2 自身的内部日志也记录到此目录,防止散落在 ~/.ros/log
export ROS_LOG_DIR="${LOG_DIR}"
pids=()
names=()
start_node() {
local name="$1"; shift
local pkg="$1"; shift
local exe="$1"; shift
# 3. 日志文件名可配置:在此处修改格式
local node_ts=$(readable_timestamp)
local log_file="${LOG_DIR}/${name}_${node_ts}.log"
if (( LOG_TO_FILES )); then
echo "[run.sh] Starting ${name} -> ros2 run ${pkg} ${exe} (logs: ${log_file})"
(
# 禁用 ANSI 颜色以保持日志文件整洁
RCUTILS_COLORIZED_OUTPUT=0 exec ros2 run "${pkg}" "${exe}" --ros-args --log-level "${LOG_LEVEL}"
) >>"${log_file}" 2>&1 &
else
echo "[run.sh] Starting ${name} -> ros2 run ${pkg} ${exe} --ros-args --log-level ${LOG_LEVEL}"
ros2 run "${pkg}" "${exe}" --ros-args --log-level "${LOG_LEVEL}" &
fi
local pid=$!
pids+=("${pid}")
names+=("${name}")
}
cleanup_done=0
cleanup() {
(( cleanup_done )) && return 0
cleanup_done=1
echo ""
echo "[run.sh] Shutting down (${#pids[@]} processes)..."
# Send SIGINT first for graceful shutdown
for pid in "${pids[@]:-}"; do
if kill -0 "$pid" 2>/dev/null; then
kill -INT "$pid" 2>/dev/null || true
fi
done
# Give them a moment to exit gracefully
sleep 1
# Force kill any stubborn processes
for pid in "${pids[@]:-}"; do
if kill -0 "$pid" 2>/dev/null; then
kill -TERM "$pid" 2>/dev/null || true
fi
done
wait || true
echo "[run.sh] All processes stopped."
}
trap cleanup INT TERM
# Start requested nodes
if (( RUN_MOTION )); then
start_node "arm_control" "arm_control" "motion_node"
fi
if (( RUN_BRAIN )); then
start_node "brain" "brain" "brain_node"
fi
if (( ${#pids[@]} == 0 )); then
echo "[run.sh] Nothing to run. Use --brain-only or --arm_control-only to pick a node."
exit 0
fi
echo "[run.sh] Started: ${names[*]}"
echo "[run.sh] Press Ctrl+C to stop."
# Wait for all background processes
wait
# Ensure cleanup executes on normal exit too
cleanup

View File

@@ -1,94 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Defaults
ACTION_NAME="/ArmSpaceControl"
FRAME_ID="base_link"
X=0.5
Y=0.0
Z=0.3
YAW_DEG=0.0
ANGLE_UNIT="deg" # deg|rad
DRY_RUN=0
LOG_LEVEL="info"
usage(){
cat <<EOF
Usage: ${0##*/} [options]
Options:
-a, --action NAME Action name (default: /arm_space_control)
-f, --frame ID Target frame_id (default: base_link)
--x VALUE Position x (m) (default: 0.5)
--y VALUE Position y (m) (default: 0.0)
--z VALUE Position z (m) (default: 0.3)
--yaw VALUE Yaw angle (default: 0.0)
--rad Interpret yaw as radians (default is degrees)
--deg Interpret yaw as degrees (default)
--log-level LEVEL ROS 2 log level (default: info)
--dry-run Print goal YAML and exit
-h, --help Show this help
Examples:
${0##*/} --x 0.5 --y 0.0 --z 0.3 --yaw 90 --deg --frame base_link
${0##*/} --action /ArmSpaceControl --yaw 1.57 --rad
EOF
}
# Parse args
while [[ $# -gt 0 ]]; do
case "$1" in
-a|--action) ACTION_NAME="$2"; shift 2 ;;
-f|--frame) FRAME_ID="$2"; shift 2 ;;
--x) X="$2"; shift 2 ;;
--y) Y="$2"; shift 2 ;;
--z) Z="$2"; shift 2 ;;
--yaw) YAW_DEG="$2"; shift 2 ;;
--rad) ANGLE_UNIT="rad"; shift ;;
--deg) ANGLE_UNIT="deg"; shift ;;
--log-level) LOG_LEVEL="$2"; shift 2 ;;
--dry-run) DRY_RUN=1; shift ;;
-h|--help) usage; exit 0 ;;
*) echo "Unknown option: $1" >&2; usage; exit 2 ;;
esac
done
# Compute quaternion from yaw only (roll=pitch=0)
# qx=qy=0, qz=sin(yaw/2), qw=cos(yaw/2)
python_quat='import math,sys; yaw=float(sys.argv[1]); print(f"{math.sin(yaw/2.0)} {math.cos(yaw/2.0)}")'
if [[ "$ANGLE_UNIT" == "deg" ]]; then
# Convert degrees to radians in Python for accuracy
read -r QZ QW < <(python3 -c 'import math,sys; yaw_deg=float(sys.argv[1]); yaw=math.radians(yaw_deg); print(f"{math.sin(yaw/2.0)} {math.cos(yaw/2.0)}")' "$YAW_DEG")
else
read -r QZ QW < <(python3 -c "$python_quat" "$YAW_DEG")
fi
# Build YAML (avoid shell-escaping issues; no quotes around frame_id)
GOAL_YAML=$(cat <<YAML
{target: {header: {frame_id: ${FRAME_ID}}, pose: {position: {x: ${X}, y: ${Y}, z: ${Z}}, orientation: {x: 0.0, y: 0.0, z: ${QZ}, w: ${QW}}}}}
YAML
)
if (( DRY_RUN )); then
echo "Will send to action: ${ACTION_NAME}"
echo "Goal YAML:"
echo "$GOAL_YAML"
exit 0
fi
# Source workspace
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
WS_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
if [[ -f "${WS_ROOT}/install/setup.bash" ]]; then
# shellcheck source=/dev/null
source "${WS_ROOT}/install/setup.bash"
else
echo "install/setup.bash not found at ${WS_ROOT}/install/setup.bash" >&2
exit 1
fi
# Send goal
set -x
ros2 action send_goal --feedback "${ACTION_NAME}" interfaces/action/ArmSpaceControl "$GOAL_YAML" --ros-args --log-level "${LOG_LEVEL}"
set +x