// SPDX-License-Identifier: BSD-2-Clause #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* 后端的作用(GPT): 1. 构建位资图: 后端根据前端生成的关键帧位姿和相对位姿变换,构建位姿图(Pose Graph),每个关键帧对应图中的一个节点,节点之间的相对运动构成边。 2. 闭环检测 后端通过检测闭环(即机器人经过某个已经访问过的地方)来减少累积误差。闭环检测通过计算当前帧和之前帧之间的相似度来发现闭环位置,并将其加入位姿图中,形成闭环约束边。 3. 图优化 利用优化算法(如g2o或Ceres)对整个位姿图进行优化。目标是调整位姿图中所有关键帧的位姿, 使之符合所有的约束(包括相邻关键帧的相对位姿约束以及闭环检测生成的约束),从而减少传感器噪声和漂移,生成全局一致的地图 */ namespace hdl_graph_slam { class HdlGraphSlamNodelet : public nodelet::Nodelet { public: typedef pcl::PointXYZI PointT; typedef message_filters::sync_policies::ApproximateTime ApproxSyncPolicy; HdlGraphSlamNodelet() {} virtual ~HdlGraphSlamNodelet() {} virtual void onInit() { // 初始化函数 // 申请节点句柄 nh = getNodeHandle(); mt_nh = getMTNodeHandle(); private_nh = getPrivateNodeHandle(); // init parameters 初始化参数 published_odom_topic = private_nh.param("published_odom_topic", "/odom"); map_frame_id = private_nh.param("map_frame_id", "map"); odom_frame_id = private_nh.param("odom_frame_id", "odom"); map_cloud_resolution = private_nh.param("map_cloud_resolution", 0.05); trans_odom2map.setIdentity(); max_keyframes_per_update = private_nh.param("max_keyframes_per_update", 10); // 一些指针的初始化 anchor_node = nullptr; // 普通指针, 直接给 nullptr anchor_edge = nullptr; floor_plane_node = nullptr; graph_slam.reset(new GraphSLAM(private_nh.param("g2o_solver_type", "lm_var"))); // 智能指针,用reset重新初始化,让它重新指向新分配的对象 keyframe_updater.reset(new KeyframeUpdater(private_nh)); loop_detector.reset(new LoopDetector(private_nh)); map_cloud_generator.reset(new MapCloudGenerator()); inf_calclator.reset(new InformationMatrixCalculator(private_nh)); nmea_parser.reset(new NmeaSentenceParser()); // 从参数服务器中读取参数 // 第一个参数是参数服务器中的变量名,第二个参数是如果参数服务器中没有这个变量的默认取值 gps_time_offset = private_nh.param("gps_time_offset", 0.0); // GPS时间戳与SLAM系统内部时间戳之间的偏移量,用于同步GPS数据和SLAM系统的时间 gps_edge_stddev_xy = private_nh.param("gps_edge_stddev_xy", 10000.0); // GPS测量在XY平面(水平方向)的标准差,用于量化GPS测量的不确定性,通常用于优化算法中权重的计算 gps_edge_stddev_z = private_nh.param("gps_edge_stddev_z", 10.0); // GPS测量在Z轴(垂直方向)的标准差,用于量化GPS测量的不确定性 floor_edge_stddev = private_nh.param("floor_edge_stddev", 10.0); // 与地面相关的测量或估计的标准差,例如在检测到地面时用于优化算法 imu_time_offset = private_nh.param("imu_time_offset", 0.0); // IMU(惯性测量单元)时间戳与SLAM系统内部时间戳之间的偏移量,用于同步IMU数据和SLAM系统的时间 enable_imu_orientation = private_nh.param("enable_imu_orientation", false); // 指示是否启用IMU的方位数据 enable_imu_acceleration = private_nh.param("enable_imu_acceleration", false); // 指示是否启用IMU的加速度数据 imu_orientation_edge_stddev = private_nh.param("imu_orientation_edge_stddev", 0.1); // IMU方位测量的标准差,用于量化IMU方位测量的不确定性 imu_acceleration_edge_stddev = private_nh.param("imu_acceleration_edge_stddev", 3.0); // IMU加速度测量的标准差,用于量化IMU加速度测量的不确定性 points_topic = private_nh.param("points_topic", "/velodyne_points"); // 点云数据的ROS话题名称,SLAM系统将订阅这个话题来获取点云数据 // 这个话题的info是: /* (slam_env) mj@mj-Lenovo-Legion-R9000P2021H:~$ rostopic info /velodyne_points Type: sensor_msgs/PointCloud2 Publishers: * /player (http://mj-Lenovo-Legion-R9000P2021H:35735/) 这个发布方是 rosbag 呀,是没处理过的点云信息,后端怎么会订阅没处理过的点云数据? 也不是用来订阅的,这个文件是用这个话题来发布的,但是发布的内容又只有时间戳和frameid? 为什么这么奇怪? 结合 read_until_pub 的解释来看,似乎是用来指示操作应该持续到何时,所以这个应该不用管 Subscribers: * /velodyne_nodelet_manager (http://mj-Lenovo-Legion-R9000P2021H:39235/) * /rviz_1729770290778318983 (http://mj-Lenovo-Legion-R9000P2021H:34629/) */ // subscribers // 话题通信的接受方 // 这个launch中的话题名是: odom odom_sub.reset(new message_filters::Subscriber(mt_nh, published_odom_topic, 256)); // 创建一个新的message_filters::Subscriber对象,用于订阅nav_msgs::Odometry类型的消息 cloud_sub.reset(new message_filters::Subscriber(mt_nh, "/filtered_points", 32)); // 创建一个新的message_filters::Subscriber对象,用于订阅sensor_msgs::PointCloud2类型的消息。 // ApproxSyncPolicy 是一个同步策略,允许在一定时间窗口内的消息进行同步,这里设置的窗口大小为32 sync.reset(new message_filters::Synchronizer(ApproxSyncPolicy(32), *odom_sub, *cloud_sub)); // 创建一个新的message_filters::Synchronizer对象,用于同步上面创建的两个订阅者(odom_sub和cloud_sub)的消息。 sync->registerCallback(boost::bind(&HdlGraphSlamNodelet::cloud_callback, this, _1, _2)); // 为同步器注册了一个回调函数,当同步条件满足时,会调用HdlGraphSlamNodelet类的cloud_callback成员函数 // 调用 cloud_callback 函数的条件: // 消息同步:odom_sub 和 cloud_sub 两个订阅者各自收到消息后,sync 会根据 ApproximateTime 策略尝试同步它们的时间戳。 // 时间戳接近:两个消息的时间戳不需要完全一致,只要在一定范围内(由 ApproximateTime 策略自动决定),就会被认为是同步的。 // 队列不为空:odom_sub 和 cloud_sub 的消息队列必须都各自至少包含一条消息,以便同步器能进行匹配。 // 队列中找到匹配的消息对:当订阅者的队列中找到符合同步条件的消息对(即时间戳接近的 Odometry 和 PointCloud2 消息),就会触发 cloud_callback。 imu_sub = nh.subscribe("/gpsimu_driver/imu_data", 1024, &HdlGraphSlamNodelet::imu_callback, this); // 接受并处理 imu 数据(小车没有imu,所以这个回调函数可以不看) floor_sub = nh.subscribe("/floor_detection/floor_coeffs", 1024, &HdlGraphSlamNodelet::floor_coeffs_callback, this); // 接受并处理 地板检测 数据(小车没有地板检测,所以这个回调函数可以不看) if(private_nh.param("enable_gps", true)) { // !!!因为我们的小车没有GPS,所以这几个 callback 函数可以不看 !!! gps_sub = mt_nh.subscribe("/gps/geopoint", 1024, &HdlGraphSlamNodelet::gps_callback, this); // 接受并处理 GPS 数据 nmea_sub = mt_nh.subscribe("/gpsimu_driver/nmea_sentence", 1024, &HdlGraphSlamNodelet::nmea_callback, this); // navsat_sub = mt_nh.subscribe("/gps/navsat", 1024, &HdlGraphSlamNodelet::navsat_callback, this); // } // publishers // 话题通信的发布方 markers_pub = mt_nh.advertise("/hdl_graph_slam/markers", 16); // 发布可视化数据(具体是什么还不知道) 这种类型的消息通常用于在RViz等可视化工具中显示标记(markers) odom2map_pub = mt_nh.advertise("/hdl_graph_slam/odom2pub", 16); // 发布从里程计到地图的变换 map_points_pub = mt_nh.advertise("/hdl_graph_slam/map_points", 1, true); // 这个话题会保留以前扫描过的点云!! 扫过的地方的点云就是 read_until_pub = mt_nh.advertise("/hdl_graph_slam/read_until", 32); // 这种类型的消息通常用于指示读取操作应该持续到何时 // 服务通信:一个节点(客户端)发送一个服务请求给服务服务器,服务服务器处理这个请求并返回一个响应。这是一种同步的、请求/响应模式的通信方式。 load_service_server = mt_nh.advertiseService("/hdl_graph_slam/load", &HdlGraphSlamNodelet::load_service, this); // 加载系统数据或状态 创建了一个服务服务器load_service_server,用于处理名为"/hdl_graph_slam/load"的服务请求,服务的处理函数是HdlGraphSlamNodelet类的load_service成员函数 dump_service_server = mt_nh.advertiseService("/hdl_graph_slam/dump", &HdlGraphSlamNodelet::dump_service, this); // 转储系统当前的状态或数据 创建了一个服务服务器dump_service_server,用于处理名为"/hdl_graph_slam/dump"的服务请求,服务的处理函数是HdlGraphSlamNodelet类的dump_service成员函数 save_map_service_server = mt_nh.advertiseService("/hdl_graph_slam/save_map", &HdlGraphSlamNodelet::save_map_service, this); // 保存地图 graph_updated = false; // 用于跟踪图优化是否已经更新 double graph_update_interval = private_nh.param("graph_update_interval", 3.0); // 表示图优化更新的时间间隔,默认值为3.0秒 interval:间隔 double map_cloud_update_interval = private_nh.param("map_cloud_update_interval", 10.0); // 地图点云更新的时间间隔,默认值为10.0秒 // 主要是这个回调函数!!! optimization_timer = mt_nh.createWallTimer(ros::WallDuration(graph_update_interval), &HdlGraphSlamNodelet::optimization_timer_callback, this); // 创建一个定时器,定时器会在每个 graph_update_interval 秒后触发,然后调用 optimization_timer_callback 函数 map_publish_timer = mt_nh.createWallTimer(ros::WallDuration(map_cloud_update_interval), &HdlGraphSlamNodelet::map_points_publish_timer_callback, this); // 创建一个定时器,定时器会在每个 map_cloud_update_interval 秒后触发,然后调用 map_points_publish_timer_callback 函数 // 函数入口都是 callback 函数,也即三个: cloud_callback 、 optimization_timer_callback 、 map_points_publish_timer_callback } private: /** * @brief received point clouds are pushed to #keyframe_queue * @param odom_msg // 前端的激光里程计数据 * @param cloud_msg // 前端滤波后的点云数据 */ void cloud_callback(const nav_msgs::OdometryConstPtr& odom_msg, const sensor_msgs::PointCloud2::ConstPtr& cloud_msg) { // 接收并处理激光里程计数据和滤波后的点云数据,并判断是否生成关键帧,然后将新生成的关键帧推送到 keyframe_queue 队列中 // 处理点云信息主要是:格式转换、设置frame_id const ros::Time& stamp = cloud_msg->header.stamp; // 获取点云数据的时间戳 Eigen::Isometry3d odom = odom2isometry(odom_msg); // 将里程计消息转换为Eigen库中的Isometry3d类型,它表示一个3D刚体变换(包括旋转和平移) pcl::PointCloud::Ptr cloud(new pcl::PointCloud()); // 创建一个点云对象的指针 pcl::fromROSMsg(*cloud_msg, *cloud); // 将ROS的点云消息 cloud_msg 转换为PCL(Point Cloud Library)的点云格式 if(base_frame_id.empty()) { base_frame_id = cloud_msg->header.frame_id; // 将当前点云消息的坐标系 frame_id 设置为基础坐标系 base_frame_id 是整个系统中关键帧数据的参考坐标系 } // 更新关键帧判断 if(!keyframe_updater->update(odom)) { // 根据当前的里程计信息 odom 判断是否需要生成新的关键帧 std::lock_guard lock(keyframe_queue_mutex); // 这行代码的作用是确保线程安全,防止多个线程同时访问或修改 keyframe_queue 队列 // std::lock_guard 是 C++ 标准库中的一个类,用来简化互斥锁(std::mutex)的使用。它的作用是在其作用域内自动对给定的互斥量加锁,并在作用域结束时自动解锁 // keyframe_queue_mutex 是一个互斥量(std::mutex),用于保护关键帧队列 keyframe_queue。当多个线程试图同时访问或修改 keyframe_queue 时,互斥量确保只有一个线程能够访问该资源,避免数据竞争和并发问题 // 工作原理:当这行代码执行时,lock_guard 会锁住 keyframe_queue_mutex,使其他线程无法访问与之关联的资源(即 keyframe_queue) 当程序执行离开该作用域时,lock_guard 会自动释放锁,无需显式调用 unlock() 函数 // 为什么需要锁? 在多线程环境中(比如 ROS 回调函数可能被不同线程调用),如果多个线程同时读取或写入 keyframe_queue,可能导致数据不一致或崩溃。使用互斥量确保对 keyframe_queue 的操作是原子性的,即一个线程在操作时,其他线程必须等待 if(keyframe_queue.empty()) { // 如果关键帧队列是空的,发布一个 ROS 消息通知系统继续读取点云数据,直到指定时间点(stamp + ros::Duration(10, 0))。这段代码确保在没有关键帧生成时,系统能够继续读取点云数据 std_msgs::Header read_until; read_until.stamp = stamp + ros::Duration(10, 0); read_until.frame_id = points_topic; read_until_pub.publish(read_until); read_until.frame_id = "/filtered_points"; read_until_pub.publish(read_until); } return; } double accum_d = keyframe_updater->get_accum_distance(); // 获取累计的运动距离,用于判断关键帧生成的条件 KeyFrame::Ptr keyframe(new KeyFrame(stamp, odom, accum_d, cloud)); // 创建一个新的关键帧 keyframe,其中包含当前的时间戳 stamp,里程计信息 odom,累计距离 accum_d,以及处理后的点云数据 cloud(这里的处理就是做了个消息类型的转换) std::lock_guard lock(keyframe_queue_mutex); // 使用 std::lock_guard 加锁,确保对关键帧队列 keyframe_queue 的操作是线程安全的 keyframe_queue.push_back(keyframe); // 将新生成的关键帧 keyframe 推入 keyframe_queue 队列,供后续的处理使用 } /** * @brief this method adds all the keyframes in #keyframe_queue to the pose graph (odometry edges) 此方法将#keyframe_queue中的所有关键帧添加到姿势图中(里程计边) * @return if true, at least one keyframe was added to the pose graph 如果为真,则至少有一个关键帧被添加到姿势图中 * 将队列中的关键帧添加到位姿图中,并返回是否至少有一个关键帧被添加 * * pose graph 是什么? */ bool flush_keyframe_queue() { std::lock_guard lock(keyframe_queue_mutex); // 多线程锁 if(keyframe_queue.empty()) { // 没有关键帧就返回 return false; } trans_odom2map_mutex.lock(); Eigen::Isometry3d odom2map(trans_odom2map.cast()); trans_odom2map_mutex.unlock(); std::cout << "flush_keyframe_queue - keyframes len:" << keyframes.size() << std::endl; int num_processed = 0; for(int i = 0; i < std::min(keyframe_queue.size(), max_keyframes_per_update); i++) { num_processed = i; const auto& keyframe = keyframe_queue[i]; // new_keyframes will be tested later for loop closure new_keyframes.push_back(keyframe); // add pose node Eigen::Isometry3d odom = odom2map * keyframe->odom; keyframe->node = graph_slam->add_se3_node(odom); keyframe_hash[keyframe->stamp] = keyframe; // fix the first node if(keyframes.empty() && new_keyframes.size() == 1) { if(private_nh.param("fix_first_node", false)) { Eigen::MatrixXd inf = Eigen::MatrixXd::Identity(6, 6); std::stringstream sst(private_nh.param("fix_first_node_stddev", "1 1 1 1 1 1")); for(int i = 0; i < 6; i++) { double stddev = 1.0; sst >> stddev; inf(i, i) = 1.0 / stddev; } anchor_node = graph_slam->add_se3_node(Eigen::Isometry3d::Identity()); anchor_node->setFixed(true); anchor_edge = graph_slam->add_se3_edge(anchor_node, keyframe->node, Eigen::Isometry3d::Identity(), inf); } } if(i == 0 && keyframes.empty()) { continue; } // add edge between consecutive keyframes const auto& prev_keyframe = i == 0 ? keyframes.back() : keyframe_queue[i - 1]; Eigen::Isometry3d relative_pose = keyframe->odom.inverse() * prev_keyframe->odom; Eigen::MatrixXd information = inf_calclator->calc_information_matrix(keyframe->cloud, prev_keyframe->cloud, relative_pose); auto edge = graph_slam->add_se3_edge(keyframe->node, prev_keyframe->node, relative_pose, information); graph_slam->add_robust_kernel(edge, private_nh.param("odometry_edge_robust_kernel", "NONE"), private_nh.param("odometry_edge_robust_kernel_size", 1.0)); } std_msgs::Header read_until; read_until.stamp = keyframe_queue[num_processed]->stamp + ros::Duration(10, 0); read_until.frame_id = points_topic; read_until_pub.publish(read_until); read_until.frame_id = "/filtered_points"; read_until_pub.publish(read_until); keyframe_queue.erase(keyframe_queue.begin(), keyframe_queue.begin() + num_processed + 1); return true; } void nmea_callback(const nmea_msgs::SentenceConstPtr& nmea_msg) { GPRMC grmc = nmea_parser->parse(nmea_msg->sentence); if(grmc.status != 'A') { return; } geographic_msgs::GeoPointStampedPtr gps_msg(new geographic_msgs::GeoPointStamped()); gps_msg->header = nmea_msg->header; gps_msg->position.latitude = grmc.latitude; gps_msg->position.longitude = grmc.longitude; gps_msg->position.altitude = NAN; gps_callback(gps_msg); } void navsat_callback(const sensor_msgs::NavSatFixConstPtr& navsat_msg) { geographic_msgs::GeoPointStampedPtr gps_msg(new geographic_msgs::GeoPointStamped()); gps_msg->header = navsat_msg->header; gps_msg->position.latitude = navsat_msg->latitude; gps_msg->position.longitude = navsat_msg->longitude; gps_msg->position.altitude = navsat_msg->altitude; gps_callback(gps_msg); } /** * @brief received gps data is added to #gps_queue * @param gps_msg */ void gps_callback(const geographic_msgs::GeoPointStampedPtr& gps_msg) { std::lock_guard lock(gps_queue_mutex); gps_msg->header.stamp += ros::Duration(gps_time_offset); gps_queue.push_back(gps_msg); } /** * @brief * @return */ bool flush_gps_queue() { std::lock_guard lock(gps_queue_mutex); if(keyframes.empty() || gps_queue.empty()) { return false; } bool updated = false; auto gps_cursor = gps_queue.begin(); for(auto& keyframe : keyframes) { if(keyframe->stamp > gps_queue.back()->header.stamp) { break; } if(keyframe->stamp < (*gps_cursor)->header.stamp || keyframe->utm_coord) { continue; } // find the gps data which is closest to the keyframe auto closest_gps = gps_cursor; for(auto gps = gps_cursor; gps != gps_queue.end(); gps++) { auto dt = ((*closest_gps)->header.stamp - keyframe->stamp).toSec(); auto dt2 = ((*gps)->header.stamp - keyframe->stamp).toSec(); if(std::abs(dt) < std::abs(dt2)) { break; } closest_gps = gps; } // if the time residual between the gps and keyframe is too large, skip it gps_cursor = closest_gps; if(0.2 < std::abs(((*closest_gps)->header.stamp - keyframe->stamp).toSec())) { continue; } // convert (latitude, longitude, altitude) -> (easting, northing, altitude) in UTM coordinate geodesy::UTMPoint utm; geodesy::fromMsg((*closest_gps)->position, utm); Eigen::Vector3d xyz(utm.easting, utm.northing, utm.altitude); // the first gps data position will be the origin of the map if(!zero_utm) { zero_utm = xyz; } xyz -= (*zero_utm); keyframe->utm_coord = xyz; g2o::OptimizableGraph::Edge* edge; if(std::isnan(xyz.z())) { Eigen::Matrix2d information_matrix = Eigen::Matrix2d::Identity() / gps_edge_stddev_xy; edge = graph_slam->add_se3_prior_xy_edge(keyframe->node, xyz.head<2>(), information_matrix); } else { Eigen::Matrix3d information_matrix = Eigen::Matrix3d::Identity(); information_matrix.block<2, 2>(0, 0) /= gps_edge_stddev_xy; information_matrix(2, 2) /= gps_edge_stddev_z; edge = graph_slam->add_se3_prior_xyz_edge(keyframe->node, xyz, information_matrix); } graph_slam->add_robust_kernel(edge, private_nh.param("gps_edge_robust_kernel", "NONE"), private_nh.param("gps_edge_robust_kernel_size", 1.0)); updated = true; } auto remove_loc = std::upper_bound(gps_queue.begin(), gps_queue.end(), keyframes.back()->stamp, [=](const ros::Time& stamp, const geographic_msgs::GeoPointStampedConstPtr& geopoint) { return stamp < geopoint->header.stamp; }); gps_queue.erase(gps_queue.begin(), remove_loc); return updated; } void imu_callback(const sensor_msgs::ImuPtr& imu_msg) { if(!enable_imu_orientation && !enable_imu_acceleration) { return; } std::lock_guard lock(imu_queue_mutex); imu_msg->header.stamp += ros::Duration(imu_time_offset); imu_queue.push_back(imu_msg); } bool flush_imu_queue() { std::lock_guard lock(imu_queue_mutex); if(keyframes.empty() || imu_queue.empty() || base_frame_id.empty()) { return false; } bool updated = false; auto imu_cursor = imu_queue.begin(); for(auto& keyframe : keyframes) { if(keyframe->stamp > imu_queue.back()->header.stamp) { break; } if(keyframe->stamp < (*imu_cursor)->header.stamp || keyframe->acceleration) { continue; } // find imu data which is closest to the keyframe auto closest_imu = imu_cursor; for(auto imu = imu_cursor; imu != imu_queue.end(); imu++) { auto dt = ((*closest_imu)->header.stamp - keyframe->stamp).toSec(); auto dt2 = ((*imu)->header.stamp - keyframe->stamp).toSec(); if(std::abs(dt) < std::abs(dt2)) { break; } closest_imu = imu; } imu_cursor = closest_imu; if(0.2 < std::abs(((*closest_imu)->header.stamp - keyframe->stamp).toSec())) { continue; } const auto& imu_ori = (*closest_imu)->orientation; const auto& imu_acc = (*closest_imu)->linear_acceleration; geometry_msgs::Vector3Stamped acc_imu; geometry_msgs::Vector3Stamped acc_base; geometry_msgs::QuaternionStamped quat_imu; geometry_msgs::QuaternionStamped quat_base; quat_imu.header.frame_id = acc_imu.header.frame_id = (*closest_imu)->header.frame_id; quat_imu.header.stamp = acc_imu.header.stamp = ros::Time(0); acc_imu.vector = (*closest_imu)->linear_acceleration; quat_imu.quaternion = (*closest_imu)->orientation; try { tf_listener.transformVector(base_frame_id, acc_imu, acc_base); tf_listener.transformQuaternion(base_frame_id, quat_imu, quat_base); } catch(std::exception& e) { std::cerr << "failed to find transform!!" << std::endl; return false; } keyframe->acceleration = Eigen::Vector3d(acc_base.vector.x, acc_base.vector.y, acc_base.vector.z); keyframe->orientation = Eigen::Quaterniond(quat_base.quaternion.w, quat_base.quaternion.x, quat_base.quaternion.y, quat_base.quaternion.z); keyframe->orientation = keyframe->orientation; if(keyframe->orientation->w() < 0.0) { keyframe->orientation->coeffs() = -keyframe->orientation->coeffs(); } if(enable_imu_orientation) { Eigen::MatrixXd info = Eigen::MatrixXd::Identity(3, 3) / imu_orientation_edge_stddev; auto edge = graph_slam->add_se3_prior_quat_edge(keyframe->node, *keyframe->orientation, info); graph_slam->add_robust_kernel(edge, private_nh.param("imu_orientation_edge_robust_kernel", "NONE"), private_nh.param("imu_orientation_edge_robust_kernel_size", 1.0)); } if(enable_imu_acceleration) { Eigen::MatrixXd info = Eigen::MatrixXd::Identity(3, 3) / imu_acceleration_edge_stddev; g2o::OptimizableGraph::Edge* edge = graph_slam->add_se3_prior_vec_edge(keyframe->node, -Eigen::Vector3d::UnitZ(), *keyframe->acceleration, info); graph_slam->add_robust_kernel(edge, private_nh.param("imu_acceleration_edge_robust_kernel", "NONE"), private_nh.param("imu_acceleration_edge_robust_kernel_size", 1.0)); } updated = true; } auto remove_loc = std::upper_bound(imu_queue.begin(), imu_queue.end(), keyframes.back()->stamp, [=](const ros::Time& stamp, const sensor_msgs::ImuConstPtr& imu) { return stamp < imu->header.stamp; }); imu_queue.erase(imu_queue.begin(), remove_loc); return updated; } /** 不做地板检测,这个函数不用看 * @brief received floor coefficients are added to #floor_coeffs_queue * @param floor_coeffs_msg */ void floor_coeffs_callback(const hdl_graph_slam::FloorCoeffsConstPtr& floor_coeffs_msg) { if(floor_coeffs_msg->coeffs.empty()) { return; } std::lock_guard lock(floor_coeffs_queue_mutex); floor_coeffs_queue.push_back(floor_coeffs_msg); } /** 不做地板检测,这个函数不用看 * @brief this methods associates floor coefficients messages with registered keyframes, and then adds the associated coeffs to the pose graph * @brief 该方法将地板系数消息与注册的关键帧相关联,然后将关联的系数添加到姿势图中。 * @return if true, at least one floor plane edge is added to the pose graph * @return 如果为真,则至少有一个地板平面边被添加到姿势图中 */ bool flush_floor_queue() { std::lock_guard lock(floor_coeffs_queue_mutex); if(keyframes.empty()) { return false; } const auto& latest_keyframe_stamp = keyframes.back()->stamp; bool updated = false; for(const auto& floor_coeffs : floor_coeffs_queue) { if(floor_coeffs->header.stamp > latest_keyframe_stamp) { break; } auto found = keyframe_hash.find(floor_coeffs->header.stamp); if(found == keyframe_hash.end()) { continue; } if(!floor_plane_node) { floor_plane_node = graph_slam->add_plane_node(Eigen::Vector4d(0.0, 0.0, 1.0, 0.0)); floor_plane_node->setFixed(true); } const auto& keyframe = found->second; Eigen::Vector4d coeffs(floor_coeffs->coeffs[0], floor_coeffs->coeffs[1], floor_coeffs->coeffs[2], floor_coeffs->coeffs[3]); Eigen::Matrix3d information = Eigen::Matrix3d::Identity() * (1.0 / floor_edge_stddev); auto edge = graph_slam->add_se3_plane_edge(keyframe->node, floor_plane_node, coeffs, information); graph_slam->add_robust_kernel(edge, private_nh.param("floor_edge_robust_kernel", "NONE"), private_nh.param("floor_edge_robust_kernel_size", 1.0)); keyframe->floor_coeffs = coeffs; updated = true; } auto remove_loc = std::upper_bound(floor_coeffs_queue.begin(), floor_coeffs_queue.end(), latest_keyframe_stamp, [=](const ros::Time& stamp, const hdl_graph_slam::FloorCoeffsConstPtr& coeffs) { return stamp < coeffs->header.stamp; }); floor_coeffs_queue.erase(floor_coeffs_queue.begin(), remove_loc); return updated; } /** * @brief generate map point cloud and publish it * @param event */ void map_points_publish_timer_callback(const ros::WallTimerEvent& event) { if(!map_points_pub.getNumSubscribers() || !graph_updated) { return; } std::vector snapshot; keyframes_snapshot_mutex.lock(); snapshot = keyframes_snapshot; keyframes_snapshot_mutex.unlock(); auto cloud = map_cloud_generator->generate(snapshot, map_cloud_resolution); if(!cloud) { return; } cloud->header.frame_id = map_frame_id; cloud->header.stamp = snapshot.back()->cloud->header.stamp; sensor_msgs::PointCloud2Ptr cloud_msg(new sensor_msgs::PointCloud2()); pcl::toROSMsg(*cloud, *cloud_msg); map_points_pub.publish(cloud_msg); } /** * @brief this methods adds all the data in the queues to the pose graph, and then optimizes the pose graph * @brief 此方法将队列中的所有数据添加到姿势图中,然后优化姿势图 * @param event // 是ROS提供的定时器时间类,包含信息有:计时器出发的时间戳、上一次出发的时间戳、计时器的周期信息等 */ void optimization_timer_callback(const ros::WallTimerEvent& event) { std::lock_guard lock(main_thread_mutex); // 创建了一个锁,用于保护对主线程的访问,确保线程安全 // add keyframes and floor coeffs in the queues to the pose graph 将队列中的关键帧和楼层系数添加到姿势图中 bool keyframe_updated = flush_keyframe_queue(); // 调用 flush_keyframe_queue 函数,将关键帧队列中的数据添加到位姿图中,并返回是否有关键帧被更新 if(!keyframe_updated) { // 如果没有关键帧被更新,则执行大括号内的代码 std_msgs::Header read_until; read_until.stamp = ros::Time::now() + ros::Duration(30, 0); // 时间戳为什么要加30秒? read_until.frame_id = points_topic; read_until_pub.publish(read_until); read_until.frame_id = "/filtered_points"; read_until_pub.publish(read_until); // 不同的 frame_id 和 话题 发布两次, 是干嘛用的? } if(!keyframe_updated & !flush_floor_queue() & !flush_gps_queue() & !flush_imu_queue()) { // 检查地板、GPS、IMU等队列,分别调用相应的 flush 函数(例如 flush_floor_queue、flush_gps_queue、flush_imu_queue),如果这些数据没有更新,则函数直接返回,不进行后续操作。 return; } // loop detection 闭环检测 std::vector loops = loop_detector->detect(keyframes, new_keyframes, *graph_slam); // 使用 loop_detector->detect 检测关键帧之间的闭环。闭环检测用于检测机器人是否回到了之前的某个位置,这在位姿图优化中起到约束作用。 (这部分都是GPT解释) // keyframes: 包含所有历史关键帧的容器 // new_keyframes: 当前生成的最新的关键帧 // *graph_slam: 一个图优化的实例,用来存储闭环检测的结果,可能包括相对位资信息,图结构,以及优化过程中所需要的数据 // 相对位资信息: new_keyframes 与 keyframes 中所有帧的相对位资 // 图结构: 一种数学表达,用来描述机器人在环境中的位资及其与环境中特征(如地标和其他关键帧)的关系. 具体而言,包括下面几个基本要素: // 节点: 每个节点代表一个特定的位资或状态 例如slam中,一个节点可以表示机器人在某个点的位置信息,也可以表示环境中的特征点 // 边: 表示节点之间的约束关系, 这些约束可以是基于传感器测量的数据(例如激光里程计)或者闭环检测结果. 边通常回代有权重,表示约束的可信度和不确定性 // 优化目标: 通过最小化节点之间的约束误差,图结构可以通过图优化算法进行优化. 常见的方法包括最小二乘法, 梯度下降法等等. 优化的目标是使得地图和机器人轨迹尽可能一致, 提高整体的定位和建图精度 // 闭环检测: 在闭环检测过程中, 图结构会被更新,以反映新的闭环约束.这意味着新的边被添加到图中, 链接当前节点和历史节点,改善整体地图的一致性 // 数据关联: 通过匹配特征和关键帧, 确定节点之间的关联性,从而形成合理的地图和路径 // 检测不到闭环的情况: 没有足够的关键帧、环境发生显著变化、特征匹配失败(发生错误或特征点数量不足)、定位精度不足、参数设置不当、移动机器人的运动太快、内存或计算资源不够 // 如果检测到闭环,则为闭环关系添加一个新的边到位姿图中,计算相对位姿并生成信息矩阵,确保闭环边的优化权重。 for(const auto& loop : loops) { // 遍历闭环检测结果 Eigen::Isometry3d relpose(loop->relative_pose.cast()); Eigen::MatrixXd information_matrix = inf_calclator->calc_information_matrix(loop->key1->cloud, loop->key2->cloud, relpose); auto edge = graph_slam->add_se3_edge(loop->key1->node, loop->key2->node, relpose, information_matrix); graph_slam->add_robust_kernel(edge, private_nh.param("loop_closure_edge_robust_kernel", "NONE"), private_nh.param("loop_closure_edge_robust_kernel_size", 1.0)); } std::copy(new_keyframes.begin(), new_keyframes.end(), std::back_inserter(keyframes)); // 将新关键帧 new_keyframes 合并到已有的关键帧列表 keyframes 中 new_keyframes.clear(); // 清空 new_keyframes // move the first node anchor position to the current estimate of the first node pose 将第一节点锚点位置移动到第一节点姿态的当前估计值 // so the first node moves freely while trying to stay around the origin 因此,第一个节点在试图停留在原点附近的同时可以自由移动 if(anchor_node && private_nh.param("fix_first_node_adaptive", true)) { // launch文件中,fix_first_node_adaptive 设置为 true Eigen::Isometry3d anchor_target = static_cast(anchor_edge->vertices()[1])->estimate(); anchor_node->setEstimate(anchor_target); // 如果启用了自适应固定第一帧的功能(参数 "fix_first_node_adaptive"),则将第一个关键帧的锚点位置更新为当前估计的位置,以允许它自由移动但仍然保持在原点附近。 } // optimize the pose graph int num_iterations = private_nh.param("g2o_solver_num_iterations", 1024); // launch文件中都是设置成512 graph_slam->optimize(num_iterations); // 使用 g2o 优化器对位姿图进行优化,优化的迭代次数由参数 "g2o_solver_num_iterations" 控制 // publish tf 发布位姿变换 const auto& keyframe = keyframes.back(); // 获取最新的关键帧估计 Eigen::Isometry3d trans = keyframe->node->estimate() * keyframe->odom.inverse(); trans_odom2map_mutex.lock(); trans_odom2map = trans.matrix().cast(); trans_odom2map_mutex.unlock(); std::vector snapshot(keyframes.size()); std::transform(keyframes.begin(), keyframes.end(), snapshot.begin(), [=](const KeyFrame::Ptr& k) { return std::make_shared(k); }); keyframes_snapshot_mutex.lock(); keyframes_snapshot.swap(snapshot); keyframes_snapshot_mutex.unlock(); graph_updated = true; if(odom2map_pub.getNumSubscribers()) { geometry_msgs::TransformStamped ts = matrix2transform(keyframe->stamp, trans.matrix().cast(), map_frame_id, odom_frame_id); odom2map_pub.publish(ts); // 发布odom2map_pub话题中 } if(markers_pub.getNumSubscribers()) { auto markers = create_marker_array(ros::Time::now()); markers_pub.publish(markers); } } /** * @brief create visualization marker * @param stamp * @return */ visualization_msgs::MarkerArray create_marker_array(const ros::Time& stamp) const { visualization_msgs::MarkerArray markers; markers.markers.resize(4); // node markers visualization_msgs::Marker& traj_marker = markers.markers[0]; traj_marker.header.frame_id = "map"; traj_marker.header.stamp = stamp; traj_marker.ns = "nodes"; traj_marker.id = 0; traj_marker.type = visualization_msgs::Marker::SPHERE_LIST; traj_marker.pose.orientation.w = 1.0; traj_marker.scale.x = traj_marker.scale.y = traj_marker.scale.z = 0.5; visualization_msgs::Marker& imu_marker = markers.markers[1]; imu_marker.header = traj_marker.header; imu_marker.ns = "imu"; imu_marker.id = 1; imu_marker.type = visualization_msgs::Marker::SPHERE_LIST; imu_marker.pose.orientation.w = 1.0; imu_marker.scale.x = imu_marker.scale.y = imu_marker.scale.z = 0.75; traj_marker.points.resize(keyframes.size()); traj_marker.colors.resize(keyframes.size()); for(int i = 0; i < keyframes.size(); i++) { Eigen::Vector3d pos = keyframes[i]->node->estimate().translation(); traj_marker.points[i].x = pos.x(); traj_marker.points[i].y = pos.y(); traj_marker.points[i].z = pos.z(); double p = static_cast(i) / keyframes.size(); traj_marker.colors[i].r = 1.0 - p; traj_marker.colors[i].g = p; traj_marker.colors[i].b = 0.0; traj_marker.colors[i].a = 1.0; if(keyframes[i]->acceleration) { Eigen::Vector3d pos = keyframes[i]->node->estimate().translation(); geometry_msgs::Point point; point.x = pos.x(); point.y = pos.y(); point.z = pos.z(); std_msgs::ColorRGBA color; color.r = 0.0; color.g = 0.0; color.b = 1.0; color.a = 0.1; imu_marker.points.push_back(point); imu_marker.colors.push_back(color); } } // edge markers visualization_msgs::Marker& edge_marker = markers.markers[2]; edge_marker.header.frame_id = "map"; edge_marker.header.stamp = stamp; edge_marker.ns = "edges"; edge_marker.id = 2; edge_marker.type = visualization_msgs::Marker::LINE_LIST; edge_marker.pose.orientation.w = 1.0; edge_marker.scale.x = 0.05; edge_marker.points.resize(graph_slam->graph->edges().size() * 2); edge_marker.colors.resize(graph_slam->graph->edges().size() * 2); auto edge_itr = graph_slam->graph->edges().begin(); for(int i = 0; edge_itr != graph_slam->graph->edges().end(); edge_itr++, i++) { g2o::HyperGraph::Edge* edge = *edge_itr; g2o::EdgeSE3* edge_se3 = dynamic_cast(edge); if(edge_se3) { g2o::VertexSE3* v1 = dynamic_cast(edge_se3->vertices()[0]); g2o::VertexSE3* v2 = dynamic_cast(edge_se3->vertices()[1]); Eigen::Vector3d pt1 = v1->estimate().translation(); Eigen::Vector3d pt2 = v2->estimate().translation(); edge_marker.points[i * 2].x = pt1.x(); edge_marker.points[i * 2].y = pt1.y(); edge_marker.points[i * 2].z = pt1.z(); edge_marker.points[i * 2 + 1].x = pt2.x(); edge_marker.points[i * 2 + 1].y = pt2.y(); edge_marker.points[i * 2 + 1].z = pt2.z(); double p1 = static_cast(v1->id()) / graph_slam->graph->vertices().size(); double p2 = static_cast(v2->id()) / graph_slam->graph->vertices().size(); edge_marker.colors[i * 2].r = 1.0 - p1; edge_marker.colors[i * 2].g = p1; edge_marker.colors[i * 2].a = 1.0; edge_marker.colors[i * 2 + 1].r = 1.0 - p2; edge_marker.colors[i * 2 + 1].g = p2; edge_marker.colors[i * 2 + 1].a = 1.0; if(std::abs(v1->id() - v2->id()) > 2) { edge_marker.points[i * 2].z += 0.5; edge_marker.points[i * 2 + 1].z += 0.5; } continue; } g2o::EdgeSE3Plane* edge_plane = dynamic_cast(edge); if(edge_plane) { g2o::VertexSE3* v1 = dynamic_cast(edge_plane->vertices()[0]); Eigen::Vector3d pt1 = v1->estimate().translation(); Eigen::Vector3d pt2(pt1.x(), pt1.y(), 0.0); edge_marker.points[i * 2].x = pt1.x(); edge_marker.points[i * 2].y = pt1.y(); edge_marker.points[i * 2].z = pt1.z(); edge_marker.points[i * 2 + 1].x = pt2.x(); edge_marker.points[i * 2 + 1].y = pt2.y(); edge_marker.points[i * 2 + 1].z = pt2.z(); edge_marker.colors[i * 2].b = 1.0; edge_marker.colors[i * 2].a = 1.0; edge_marker.colors[i * 2 + 1].b = 1.0; edge_marker.colors[i * 2 + 1].a = 1.0; continue; } g2o::EdgeSE3PriorXY* edge_priori_xy = dynamic_cast(edge); if(edge_priori_xy) { g2o::VertexSE3* v1 = dynamic_cast(edge_priori_xy->vertices()[0]); Eigen::Vector3d pt1 = v1->estimate().translation(); Eigen::Vector3d pt2 = Eigen::Vector3d::Zero(); pt2.head<2>() = edge_priori_xy->measurement(); edge_marker.points[i * 2].x = pt1.x(); edge_marker.points[i * 2].y = pt1.y(); edge_marker.points[i * 2].z = pt1.z() + 0.5; edge_marker.points[i * 2 + 1].x = pt2.x(); edge_marker.points[i * 2 + 1].y = pt2.y(); edge_marker.points[i * 2 + 1].z = pt2.z() + 0.5; edge_marker.colors[i * 2].r = 1.0; edge_marker.colors[i * 2].a = 1.0; edge_marker.colors[i * 2 + 1].r = 1.0; edge_marker.colors[i * 2 + 1].a = 1.0; continue; } g2o::EdgeSE3PriorXYZ* edge_priori_xyz = dynamic_cast(edge); if(edge_priori_xyz) { g2o::VertexSE3* v1 = dynamic_cast(edge_priori_xyz->vertices()[0]); Eigen::Vector3d pt1 = v1->estimate().translation(); Eigen::Vector3d pt2 = edge_priori_xyz->measurement(); edge_marker.points[i * 2].x = pt1.x(); edge_marker.points[i * 2].y = pt1.y(); edge_marker.points[i * 2].z = pt1.z() + 0.5; edge_marker.points[i * 2 + 1].x = pt2.x(); edge_marker.points[i * 2 + 1].y = pt2.y(); edge_marker.points[i * 2 + 1].z = pt2.z(); edge_marker.colors[i * 2].r = 1.0; edge_marker.colors[i * 2].a = 1.0; edge_marker.colors[i * 2 + 1].r = 1.0; edge_marker.colors[i * 2 + 1].a = 1.0; continue; } } // sphere visualization_msgs::Marker& sphere_marker = markers.markers[3]; sphere_marker.header.frame_id = "map"; sphere_marker.header.stamp = stamp; sphere_marker.ns = "loop_close_radius"; sphere_marker.id = 3; sphere_marker.type = visualization_msgs::Marker::SPHERE; if(!keyframes.empty()) { Eigen::Vector3d pos = keyframes.back()->node->estimate().translation(); sphere_marker.pose.position.x = pos.x(); sphere_marker.pose.position.y = pos.y(); sphere_marker.pose.position.z = pos.z(); } sphere_marker.pose.orientation.w = 1.0; sphere_marker.scale.x = sphere_marker.scale.y = sphere_marker.scale.z = loop_detector->get_distance_thresh() * 2.0; sphere_marker.color.r = 1.0; sphere_marker.color.a = 0.3; return markers; } /** * @brief load all data from a directory 从目录加载所有数据 * @param req * @param res * @return */ bool load_service(hdl_graph_slam::LoadGraphRequest& req, hdl_graph_slam::LoadGraphResponse& res) { std::lock_guard lock(main_thread_mutex); std::string directory = req.path; std::cout << "loading data from:" << directory << std::endl; // Load graph. graph_slam->load(directory + "/graph.g2o"); // Iterate over the items in this directory and count how many sub directories there are. // This will give an upper limit on how many keyframe indexes we can expect to find. boost::filesystem::directory_iterator begin(directory), end; int max_directory_count = std::count_if(begin, end, [](const boost::filesystem::directory_entry& d) { return boost::filesystem::is_directory(d.path()); // only return true if a direcotry }); // Load keyframes by looping through key frame indexes that we expect to see. for(int i = 0; i < max_directory_count; i++) { std::stringstream sst; sst << boost::format("%s/%06d") % directory % i; std::string key_frame_directory = sst.str(); // If key_frame_directory doesnt exist, then we have run out so lets stop looking. if(!boost::filesystem::is_directory(key_frame_directory)) { break; } KeyFrame::Ptr keyframe(new KeyFrame(key_frame_directory, graph_slam->graph.get())); keyframes.push_back(keyframe); } std::cout << "loaded " << keyframes.size() << " keyframes" << std::endl; // Load special nodes. std::ifstream ifs(directory + "/special_nodes.csv"); if(!ifs) { return false; } while(!ifs.eof()) { std::string token; ifs >> token; if(token == "anchor_node") { int id = 0; ifs >> id; anchor_node = static_cast(graph_slam->graph->vertex(id)); } else if(token == "anchor_edge") { int id = 0; ifs >> id; // We have no way of directly pulling the edge based on the edge ID that we have just read in. // Fortunatly anchor edges are always attached to the anchor node so we can iterate over // the edges that listed against the node untill we find the one that matches our ID. if(anchor_node) { auto edges = anchor_node->edges(); for(auto e : edges) { int edgeID = e->id(); if(edgeID == id) { anchor_edge = static_cast(e); break; } } } } else if(token == "floor_node") { int id = 0; ifs >> id; floor_plane_node = static_cast(graph_slam->graph->vertex(id)); } } // check if we have any non null special nodes, if all are null then lets not bother. if(anchor_node->id() || anchor_edge->id() || floor_plane_node->id()) { std::cout << "loaded special nodes - "; // check exists before printing information about each special node if(anchor_node->id()) { std::cout << " anchor_node: " << anchor_node->id(); } if(anchor_edge->id()) { std::cout << " anchor_edge: " << anchor_edge->id(); } if(floor_plane_node->id()) { std::cout << " floor_node: " << floor_plane_node->id(); } // finish with a new line std::cout << std::endl; } // Update our keyframe snapshot so we can publish a map update, trigger update with graph_updated = true. std::vector snapshot(keyframes.size()); std::transform(keyframes.begin(), keyframes.end(), snapshot.begin(), [=](const KeyFrame::Ptr& k) { return std::make_shared(k); }); keyframes_snapshot_mutex.lock(); keyframes_snapshot.swap(snapshot); keyframes_snapshot_mutex.unlock(); graph_updated = true; res.success = true; std::cout << "snapshot updated" << std::endl << "loading successful" << std::endl; return true; } /** * @brief dump all data to the current directory 将所有数据转储到当前目录 * @param req * @param res * @return */ bool dump_service(hdl_graph_slam::DumpGraphRequest& req, hdl_graph_slam::DumpGraphResponse& res) { std::lock_guard lock(main_thread_mutex); std::string directory = req.destination; if(directory.empty()) { std::array buffer; buffer.fill(0); time_t rawtime; time(&rawtime); const auto timeinfo = localtime(&rawtime); strftime(buffer.data(), sizeof(buffer), "%d-%m-%Y %H:%M:%S", timeinfo); } if(!boost::filesystem::is_directory(directory)) { boost::filesystem::create_directory(directory); } std::cout << "dumping data to:" << directory << std::endl; // save graph graph_slam->save(directory + "/graph.g2o"); // save keyframes for(int i = 0; i < keyframes.size(); i++) { std::stringstream sst; sst << boost::format("%s/%06d") % directory % i; keyframes[i]->save(sst.str()); } if(zero_utm) { std::ofstream zero_utm_ofs(directory + "/zero_utm"); zero_utm_ofs << boost::format("%.6f %.6f %.6f") % zero_utm->x() % zero_utm->y() % zero_utm->z() << std::endl; } std::ofstream ofs(directory + "/special_nodes.csv"); ofs << "anchor_node " << (anchor_node == nullptr ? -1 : anchor_node->id()) << std::endl; ofs << "anchor_edge " << (anchor_edge == nullptr ? -1 : anchor_edge->id()) << std::endl; ofs << "floor_node " << (floor_plane_node == nullptr ? -1 : floor_plane_node->id()) << std::endl; res.success = true; return true; } /** * @brief save map data as pcd * @param req * @param res * @return */ bool save_map_service(hdl_graph_slam::SaveMapRequest& req, hdl_graph_slam::SaveMapResponse& res) { std::vector snapshot; keyframes_snapshot_mutex.lock(); snapshot = keyframes_snapshot; keyframes_snapshot_mutex.unlock(); auto cloud = map_cloud_generator->generate(snapshot, req.resolution); if(!cloud) { res.success = false; return true; } if(zero_utm && req.utm) { for(auto& pt : cloud->points) { pt.getVector3fMap() += (*zero_utm).cast(); } } cloud->header.frame_id = map_frame_id; cloud->header.stamp = snapshot.back()->cloud->header.stamp; if(zero_utm) { std::ofstream ofs(req.destination + ".utm"); ofs << boost::format("%.6f %.6f %.6f") % zero_utm->x() % zero_utm->y() % zero_utm->z() << std::endl; } int ret = pcl::io::savePCDFileBinary(req.destination, *cloud); res.success = ret == 0; return true; } private: // 一些参数的定义 // ROS ros::NodeHandle nh; // 节点句柄 ros::NodeHandle mt_nh; // ros::NodeHandle private_nh; // 私有节点句柄 申请多几个节点句柄主要是为了:命名空间隔离、参数隔离、代码组织、责任分离、调试和测试、灵活性和扩展性、并行处理(kimi解释) ros::WallTimer optimization_timer; ros::WallTimer map_publish_timer; std::unique_ptr> odom_sub; std::unique_ptr> cloud_sub; std::unique_ptr> sync; ros::Subscriber gps_sub; ros::Subscriber nmea_sub; ros::Subscriber navsat_sub; ros::Subscriber imu_sub; ros::Subscriber floor_sub; ros::Publisher markers_pub; std::string published_odom_topic; std::string map_frame_id; std::string odom_frame_id; std::mutex trans_odom2map_mutex; // 一个互斥量,用来确保在多线程环境中对 trans_odom2map 变量的访问是线程安全的。 Eigen::Matrix4f trans_odom2map; // 表示从 里程计坐标系(odom frame)到地图坐标系(map frame)的变换矩阵 这个变换矩阵是跟机械臂变换矩阵类似的 齐次变换矩阵 ros::Publisher odom2map_pub; // 发布里程计到地图的变换信息 std::string points_topic; // 点云数据的ROS话题名称,SLAM系统将订阅这个话题来获取点云数据 ros::Publisher read_until_pub; ros::Publisher map_points_pub; tf::TransformListener tf_listener; // ROS的一个类,用来监听和查询不同坐标系之间的变换关系 ros::ServiceServer load_service_server; // 一个 ROS 服务服务器,用于加载系统数据或状态 ros::ServiceServer dump_service_server; // 一个 ROS 服务服务器,用于转储(dump)系统当前的状态或数据 ros::ServiceServer save_map_service_server; // 一个 ROS 服务服务器,用于保存地图 // keyframe queue std::string base_frame_id; std::mutex keyframe_queue_mutex; // std::mutex 是一种独占式互斥量 std::deque keyframe_queue; // 关键帧队列 // gps queue double gps_time_offset; double gps_edge_stddev_xy; double gps_edge_stddev_z; boost::optional zero_utm; std::mutex gps_queue_mutex; std::deque gps_queue; // imu queue double imu_time_offset; // 用于对IMU数据时间戳进行校正或偏移 bool enable_imu_orientation; // 控制是否启用IMU提供姿态信息 double imu_orientation_edge_stddev; // 用于指定IMU姿态数据的不确定性(标准差) bool enable_imu_acceleration; // 控制是否启用IMU提供的加速信息 double imu_acceleration_edge_stddev; // 指定IMU加速度信息的不确定性(标准差) std::mutex imu_queue_mutex; // 互斥量 std::deque imu_queue; // IMU队列 // floor_coeffs queue double floor_edge_stddev; std::mutex floor_coeffs_queue_mutex; std::deque floor_coeffs_queue; // for map cloud generation std::atomic_bool graph_updated; // 用于标识 位资图(pose graph)是否更新,确保多线程环境下的线程安全 double map_cloud_resolution; // 控制生成的地图点云的分辨率 std::mutex keyframes_snapshot_mutex; // 互斥量 std::vector keyframes_snapshot; // 用来存储关键帧的快照。这个变量很重要,发布 map_point 和 保存 pcd 地图都是用这个变量!! std::unique_ptr map_cloud_generator; // 生成点云地图的对象指针 // graph slam // all the below members must be accessed after locking main_thread_mutex std::mutex main_thread_mutex; // 互斥量 int max_keyframes_per_update; // 限制每次更新中可以处理的最大关键帧数量 std::deque new_keyframes; // 存储新生成的关键帧指针 g2o::VertexSE3* anchor_node; // 一个指向 g2o::VertexSE3 类型的指针,表示位姿图中的一个特定节点(锚点节点)。锚点节点通常是用来约束图的某个部分,使得图优化时可以保持稳定。 // 锚点的引入可以帮助优化算法更好地收敛到一个合理的解,特别是在 SLAM 系统中,锚点可以用于固定某个已知的位置(如起始位置或特征点)。 g2o::EdgeSE3* anchor_edge; // 一个指向 g2o::EdgeSE3 类型的指针,表示连接锚点节点和其他节点的边。这条边用于描述锚点与其他位姿节点之间的相对约束关系。 // 在图优化中,这种约束有助于保持图的稳定性,并确保锚点的影响在优化过程中得以体现。 g2o::VertexPlane* floor_plane_node; // 用于表示地面平面节点的指针。 std::vector keyframes; // 存储当前系统中的所有关键帧。 std::unordered_map keyframe_hash; // 用于快速查找关键帧的哈希表 // keyframe_hash 是一个无序映射,使用 ros::Time 作为键,以关键帧指针为值。此哈希表用于存储每个关键帧的时间戳与其对应的指针,便于快速查找特定时间的关键帧。 // 这种数据结构可以加速对关键帧的查找过程,提高 SLAM 系统在处理数据时的效率,尤其是在需要频繁检索关键帧时。 // std::unique_ptr:一种独占所有权的智能指针,意味着同一时间内只能有一个std::unique_ptr指向某个对象 // 智能指针是一种自动管理动态分配内存的类,它们在对象不再使用时自动释放内存,从而帮助防止内存泄漏 std::unique_ptr graph_slam; // 声明一个指向GraphSLAM 类型的智能指针 graph_slam std::unique_ptr loop_detector; std::unique_ptr keyframe_updater; std::unique_ptr nmea_parser; std::unique_ptr inf_calclator; }; } // namespace hdl_graph_slam PLUGINLIB_EXPORT_CLASS(hdl_graph_slam::HdlGraphSlamNodelet, nodelet::Nodelet)