一个完整的方案,介绍如何使用奥比中光(Orbbec)双目摄像头获取物品位置并实现机器人抓取功能。
1. 系统架构概述
text
[奥比中光摄像头] → [点云数据处理] → [物品识别定位] → [运动规划] → [机械臂控制]
2. 硬件准备
-
奥比中光 Astra/Astra Pro 或 Femto 系列深度摄像头
-
ROS2兼容的机械臂(如UR, Franka, xArm等)
-
计算平台(如NVIDIA Jetson或高性能PC)
3. ROS2软件配置
3.1 安装奥比中光ROS2驱动
bash
sudo apt install ros-${ROS_DISTRO}-libuvc-camera
sudo apt install ros-${ROS_DISTRO}-camera-calibration
# 从源码安装Orbbec ROS2驱动
mkdir -p ~/orbbec_ws/src
cd ~/orbbec_ws/src
git clone https://github.com/orbbec/ros2_astra_camera.git
cd ..
rosdep install --from-paths src --ignore-src -r -y
colcon build --symlink-install
source install/setup.bash
3.2 启动摄像头节点
bash
ros2 launch astra_camera astra.launch.py
4. 物品识别与定位实现
4.1 创建ROS2包
bash
ros2 pkg create object_grasping --build-type ament_cmake \
--dependencies rclcpp rclcpp_components cv_bridge image_transport \
pcl_conversions pcl_ros tf2 tf2_ros tf2_geometry_msgs moveit_core \
moveit_ros_planning_interface
4.2 物品检测节点实现 (C++)
src/object_detector.cpp
:
cpp
#include <rclcpp/rclcpp.hpp>
#include <sensor_msgs/msg/image.hpp>
#include <sensor_msgs/msg/point_cloud2.hpp>
#include <vision_msgs/msg/detection3_d_array.hpp>
#include <cv_bridge/cv_bridge.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl/filters/passthrough.h>
#include <pcl/segmentation/extract_clusters.h>
class ObjectDetector : public rclcpp::Node
{
public:
ObjectDetector() : Node("object_detector")
{
// 订阅深度点云
pointcloud_sub_ = this->create_subscription<sensor_msgs::msg::PointCloud2>(
"/camera/depth/points", 10,
std::bind(&ObjectDetector::pointcloudCallback, this, std::placeholders::_1));
// 发布检测结果
detection_pub_ = this->create_publisher<vision_msgs::msg::Detection3DArray>(
"/detected_objects", 10);
// 参数声明
this->declare_parameter("cluster_tolerance", 0.02);
this->declare_parameter("min_cluster_size", 100);
this->declare_parameter("max_cluster_size", 25000);
this->declare_parameter("workspace_height", 0.8);
}
private:
void pointcloudCallback(const sensor_msgs::msg::PointCloud2::SharedPtr msg)
{
// 转换为PCL点云
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
pcl::fromROSMsg(*msg, *cloud);
// 1. 预处理 - 去除NaN点和无效点
pcl::PointCloud<pcl::PointXYZ>::Ptr filtered_cloud(new pcl::PointCloud<pcl::PointXYZ>);
std::vector<int> indices;
pcl::removeNaNFromPointCloud(*cloud, *filtered_cloud, indices);
// 2. 工作空间限制 (可根据实际情况调整)
double workspace_height = this->get_parameter("workspace_height").as_double();
pcl::PassThrough<pcl::PointXYZ> pass;
pass.setInputCloud(filtered_cloud);
pass.setFilterFieldName("z");
pass.setFilterLimits(0.0, workspace_height);
pass.filter(*filtered_cloud);
// 3. 欧式聚类分割
pcl::search::KdTree<pcl::PointXYZ>::Ptr tree(new pcl::search::KdTree<pcl::PointXYZ>);
tree->setInputCloud(filtered_cloud);
std::vector<pcl::PointIndices> cluster_indices;
pcl::EuclideanClusterExtraction<pcl::PointXYZ> ec;
ec.setClusterTolerance(this->get_parameter("cluster_tolerance").as_double());
ec.setMinClusterSize(this->get_parameter("min_cluster_size").as_int());
ec.setMaxClusterSize(this->get_parameter("max_cluster_size").as_int());
ec.setSearchMethod(tree);
ec.setInputCloud(filtered_cloud);
ec.extract(cluster_indices);
// 4. 计算每个聚类的位置和边界框
vision_msgs::msg::Detection3DArray detections;
detections.header = msg->header;
for (const auto& cluster : cluster_indices) {
// 计算质心
Eigen::Vector4f centroid;
pcl::compute3DCentroid(*filtered_cloud, cluster, centroid);
// 计算边界框
pcl::PointXYZ min_pt, max_pt;
pcl::getMinMax3D(*filtered_cloud, cluster, min_pt, max_pt);
// 创建检测消息
vision_msgs::msg::Detection3D detection;
detection.header = msg->header;
detection.bbox.center.position.x = centroid[0];
detection.bbox.center.position.y = centroid[1];
detection.bbox.center.position.z = centroid[2];
detection.bbox.size.x = max_pt.x - min_pt.x;
detection.bbox.size.y = max_pt.y - min_pt.y;
detection.bbox.size.z = max_pt.z - min_pt.z;
detections.detections.push_back(detection);
}
// 发布检测结果
detection_pub_->publish(detections);
}
rclcpp::Subscription<sensor_msgs::msg::PointCloud2>::SharedPtr pointcloud_sub_;
rclcpp::Publisher<vision_msgs::msg::Detection3DArray>::SharedPtr detection_pub_;
};
int main(int argc, char** argv)
{
rclcpp::init(argc, argv);
auto node = std::make_shared<ObjectDetector>();
rclcpp::spin(node);
rclcpp::shutdown();
return 0;
}
5. 机械臂抓取控制实现
5.1 运动规划节点 (Python)
scripts/motion_planner.py
:
python
#!/usr/bin/env python3
import rclpy
from rclpy.node import Node
from vision_msgs.msg import Detection3DArray
from geometry_msgs.msg import PoseStamped
from moveit_msgs.srv import GetMotionPlan
from moveit_msgs.msg import MotionPlanRequest, Constraints, JointConstraint
from trajectory_msgs.msg import JointTrajectory
class MotionPlanner(Node):
def __init__(self):
super().__init__('motion_planner')
# 订阅检测到的物品
self.subscription = self.create_subscription(
Detection3DArray,
'/detected_objects',
self.detection_callback,
10)
# MoveIt运动规划服务客户端
self.planning_client = self.create_client(
GetMotionPlan,
'/plan_kinematic_path')
# 机械臂轨迹发布
self.trajectory_pub = self.create_publisher(
JointTrajectory,
'/arm_controller/joint_trajectory',
10)
# 抓取参数
self.declare_parameter('pregrasp_offset', 0.15)
self.declare_parameter('grasp_duration', 2.0)
def detection_callback(self, msg):
if not msg.detections:
return
# 选择最近的物体
target = min(msg.detections, key=lambda d: d.bbox.center.position.x**2 +
d.bbox.center.position.y**2)
# 创建预抓取位姿
pregrasp_pose = PoseStamped()
pregrasp_pose.header = msg.header
pregrasp_pose.pose = target.bbox.center
# 在Z轴上添加偏移
offset = self.get_parameter('pregrasp_offset').value
pregrasp_pose.pose.position.z += offset
# 规划运动
self.plan_and_execute(pregrasp_pose)
# 执行抓取
self.perform_grasp(target)
def plan_and_execute(self, target_pose):
# 创建运动规划请求
request = MotionPlanRequest()
request.workspace_parameters.header = target_pose.header
request.start_state.is_diff = True
# 添加目标约束
constraints = Constraints()
joint_constraint = JointConstraint()
joint_constraint.tolerance_above = 0.1
joint_constraint.tolerance_below = 0.1
joint_constraint.weight = 1.0
# 这里需要根据你的机械臂配置设置目标关节值
# 示例为6自由度机械臂
for i in range(6):
joint_constraint.joint_name = f"joint_{i+1}"
joint_constraint.position = 0.0 # 应根据目标位姿计算
constraints.joint_constraints.append(joint_constraint)
request.goal_constraints.append(constraints)
# 调用规划服务
while not self.planning_client.wait_for_service(timeout_sec=1.0):
self.get_logger().info('等待规划服务...')
future = self.planning_client.call_async(
GetMotionPlan.Request(request=request))
rclpy.spin_until_future_complete(self, future)
if future.result().motion_plan_response.error_code.val == 1: # SUCCESS
# 执行轨迹
trajectory = future.result().motion_plan_response.trajectory.joint_trajectory
self.trajectory_pub.publish(trajectory)
else:
self.get_logger().error('运动规划失败!')
def perform_grasp(self, target):
# 这里实现具体的抓取逻辑
# 1. 移动到精确抓取位置
# 2. 控制夹爪闭合
# 3. 验证抓取是否成功
# 4. 移动到放置位置
self.get_logger().info(f'正在抓取位于 ({target.bbox.center.position.x:.2f}, '
f'{target.bbox.center.position.y:.2f}, '
f'{target.bbox.center.position.z:.2f}) 的物体')
def main(args=None):
rclpy.init(args=args)
planner = MotionPlanner()
rclpy.spin(planner)
planner.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
6. 系统集成与启动
创建启动文件 launch/grasping.launch.py
:
python
from launch import LaunchDescription
from launch_ros.actions import Node
from ament_index_python.packages import get_package_share_directory
def generate_launch_description():
return LaunchDescription([
# 启动奥比中光摄像头
Node(
package='astra_camera',
executable='astra_camera_node',
name='camera',
output='screen',
parameters=[{
'depth_registration': True,
'color_depth_synchronization': True,
'depth_mode': '640x480',
'color_mode': '640x480',
}]
),
# 物品检测节点
Node(
package='object_grasping',
executable='object_detector',
name='object_detector',
output='screen'
),
# 运动规划节点
Node(
package='object_grasping',
executable='motion_planner.py',
name='motion_planner',
output='screen'
),
# RViz可视化 (可选)
Node(
package='rviz2',
executable='rviz2',
name='rviz2',
arguments=['-d', f'{get_package_share_directory("object_grasping")}/config/grasping.rviz']
)
])
7. 系统优化建议
-
物品识别改进:
-
集成深度学习模型(YOLO, Mask R-CNN)提高识别准确率
-
添加物品分类功能,针对不同类型物品采用不同抓取策略
-
-
抓取规划优化:
-
使用Grasp Pose Detection (GPD)算法生成最佳抓取位姿
-
集成Force-Torque传感器实现自适应抓取力控制
-
-
系统鲁棒性增强:
-
添加状态监测和错误恢复机制
-
实现视觉伺服(Visual Servoing)提高抓取精度
-
-
性能优化:
-
使用GPU加速点云处理和深度学习推理
-
优化运动规划算法减少计算时间
-
8. 实际部署注意事项
-
相机标定:
-
确保完成相机内参和外参标定
-
定期检查标定结果,防止机械振动导致偏差
-
-
坐标系对齐:
-
正确设置相机到机械臂基座的TF变换
-
使用AR Marker或其他方法验证坐标系对齐
-
-
安全措施:
-
设置合理的工作空间限制
-
添加紧急停止和碰撞检测功能
-