激光雷达与rgb相机外参标定

1 简介

最近在做livox与rgb相机的外参标定,网上看了很多开源方法大对数是基于ros做的,由于对ros不太熟悉,所以先基于python写一个初始版本,以下是全部代码,后面有有时间再整理,相机的内参是使用matlab工具箱标定的,大致思路是将标定板的点云数据通过左右、上下的坐标替换,深度值转换为灰度值,进而得到灰度图,对灰度图做传统图像处理,找到圆心,然后再逆转回在激光雷达坐标系上的坐标。

2 代码

powershell 复制代码
import pandas as pd
import pcl
import open3d as o3d
import numpy as np
import cv2 as cv

import warnings

import xml

from sympy import false
warnings.filterwarnings("ignore")

rgb_mtx = np.array([[164.9671, 0., 334.1256,
                    0., 167.1601, 219.2284,
                    0., 0., 1.]]).reshape((3, 3))

rgb_dist = np.array([-0.0844, 0.0065, -2.2149e-04, -1.1157e-04, 2.8005e-04]).reshape((1, 5))

# rgb_mtx = np.array([[120, 0., 640,
#                      0., 120, 360,
#                      0., 0., 1.]]).reshape((3, 3))

# rgb_dist = np.zeros((5, 1), dtype=np.float64) 

def find_lidar_blobs(input_img, show=False):
    input_img = 255 - input_img
    params = cv.SimpleBlobDetector_Params()
    params.minThreshold = 5
    # params.maxThreshold = 5
    params.blobColor = 0
    # Filter by Area.
    params.filterByArea = True
    params.minArea = 400
    params.maxArea = 21000
    # Filter by Circularity
    params.filterByCircularity = True
    params.minCircularity = 0.1
    # Filter by Convexity
    params.filterByConvexity = True
    params.minConvexity = 0.87
    # Filter by Inertia
    params.filterByInertia = True
    params.minInertiaRatio = 0.1
    detector = cv.SimpleBlobDetector_create(params)
    # keypoints是一个列表,其中的每个元素都是一个cv2.KeyPoint
    # KeyPoint包含两个属性 圆的直径以及圆心的位置
    keypoints = detector.detect(input_img)
    # keypoints = [kp for kp in keypoints if 72 <= kp.size <= 88]
    img_with_keypoints = cv.drawKeypoints(input_img, keypoints, np.array([]), (0,255,0), cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    # size_list = [kp.size for kp in keypoints]
    if show:
        cv.namedWindow("Keypoints", 0)
        cv.imshow('Keypoints', img_with_keypoints)
        cv.waitKey(0)
        cv.destroyAllWindows()


    return keypoints

def filter_raw_pcl(data_path, color_change):
    ori_pcl_data = pd.read_csv(data_path, low_memory=False)
    x_condition = (ori_pcl_data['X'] > 0.6) & (ori_pcl_data['X'] <2)  #纵向
    y_condition = (ori_pcl_data['Y'] > -0.6) & (ori_pcl_data['Y'] < 0.5) #横向
    z_condition = (ori_pcl_data['Z'] > 0.3) & (ori_pcl_data['Z'] < 3) #高度
    ref_condition =  (ori_pcl_data['Reflectivity'] > 0) & (ori_pcl_data['Reflectivity'] < 15)
    # ref_condition =  (ori_pcl_data['Reflectivity'] > 10)
    filtered_data = ori_pcl_data[x_condition & y_condition & z_condition & ref_condition]
    # filtered_data = ori_pcl_data[x_condition & y_condition & z_condition]
    if color_change:
        min = filtered_data['Reflectivity'].min()
        max = filtered_data['Reflectivity'].max()
        filtered_data['reflectance_normalized'] = (filtered_data['Reflectivity'] - min) / (max - min)
    return filtered_data

def show_pcl(data):
    point_cloud = o3d.geometry.PointCloud()
    # 根据数据类型显示
    if isinstance(data, pd.DataFrame):
        point_cloud.points = o3d.utility.Vector3dVector(data[['X', 'Y', 'Z']].values)
    else:
        point_cloud.points = o3d.utility.Vector3dVector(data)
    o3d.visualization.draw_geometries([point_cloud])

def sac_plane(valid_data_df):
    valid_data = valid_data_df[['X', 'Y', 'Z']].values.astype('float32')
    cloud = pcl.PointCloud(valid_data)

    seg = cloud.make_segmenter()
    seg.set_optimize_coefficients(True)
    seg.set_model_type(pcl.SACMODEL_PLANE)
    seg.set_method_type(pcl.SAC_RANSAC)
    seg.set_distance_threshold(0.01)

    inliners, coefficients = seg.segment()

    guess = np.expand_dims(np.array(coefficients), axis=1)
    res = np.dot(valid_data, guess[:3, :]) + guess[3, 0]
    
    plane_cloud = cloud.extract(inliners, negative=False)
    plane_cloud_arr = plane_cloud.to_array()


    return plane_cloud_arr, coefficients

def projecto2D(filter_data):
    projected_points = filter_data*1000
    x_min, x_max = projected_points[:, 0].min(), projected_points[:, 0].max()  
    z_min, z_max = projected_points[:, 2].min(), projected_points[:, 2].max()  
    y_min, y_max = projected_points[:, 1].min(), projected_points[:, 1].max()
    x_range = x_max -x_min
    
    # 创建RGB图像  
    image_width = int(y_max - y_min)  
    image_height = int(z_max - z_min)  
    # TODO 当前是用int8保存深度值 后面使用float16格式
    rgb_image = np.zeros((image_height, image_width, 1), dtype=np.uint8)
    # 映射颜色到RGB图像  
    for point in projected_points:  
        y, z = int(y_max -point[1]), int(z_max - point[2])
        if 0 <= y < image_width and 0 <= z < image_height:  
            rgb_image[z, y] = int(255*(point[0]- x_min)/x_range)
    cv.imwrite("demo.jpg", rgb_image)
    return[x_min, x_range, y_max, z_max], rgb_image
    

def cal_rgbd(img_path, xyz, show=False):
    if isinstance(img_path, str):
        ori_img = cv.imread(img_path, cv.IMREAD_UNCHANGED)
    else:
        ori_img = img_path
    img_show = cv.cvtColor(ori_img, cv.COLOR_GRAY2BGR)
    # 图片二值化
    ret, smoothed_img = cv.threshold(ori_img, 15, 255, cv.THRESH_BINARY)
    kernel = np.ones((5,5),np.uint8)
    smoothed_img = cv.dilate(smoothed_img,kernel,iterations = 1)
    if show:
        cv.namedWindow('threshold', 0)
        cv.imshow("threshold", smoothed_img)
        key = 0
        while True:
            key = cv.waitKey()
            if key == 27:
                break
    # 高斯滤波
    smoothed_img = cv.GaussianBlur(smoothed_img, (5, 5), 0)
    keypoints = find_lidar_blobs(smoothed_img, show= show)
    gridcell_list  = []
    for keypoint in keypoints:
        x, z = int(keypoint.pt[0]), int(keypoint.pt[1])
        valid_point_num = 0
        value_sum  = 0
        for i in range(10):
            for j in range(10):
                # 加入距离筛选
                # TODO 现在深度值有问题 深度值从左到右递减
                if(ori_img[z+j, x+i]) > 30:
                    valid_point_num += 1
                    value_sum += ori_img[z+j, x+i]
        if valid_point_num == 0:
            continue
        mean_value = value_sum / valid_point_num
        # 还原为之前的深度值 
        mean_value_trans = mean_value*xyz[1]/255 + xyz[0]
        # print("mean_value_trans", mean_value_trans)
        if isinstance(mean_value_trans, np.ndarray):
            mean_value_trans = mean_value_trans.item()
    # return[x_min, x_range, y_max, z_max], rgb_image
        x_trans= xyz[2] - x
        z_trans = xyz[3] -z
        if z_trans > 400: #在纵向上加入高度筛选
            gridcell_list.append([mean_value_trans,x_trans,z_trans])
            cv.circle(img_show,(x, z), 1, (0, 0, 255))
    if show:
        cv.namedWindow("img_show", 0)
        cv.imshow('img_show', img_show)
        key = 0
        while True:
            key = cv.waitKey()
            if key == 27:
                break
    return np.array(gridcell_list, dtype=np.float64)

def add_world_point(circle_dist=8,boardh=12, boardw=4):
    world_points = []
    for i in range(boardh):
        for j in range(boardw):        
            if i%2 == 0:
                x = circle_dist * (2 * j)
            else:
                x = circle_dist * (2 * j + 1)
            y = circle_dist * i
            world_points.append([x, y, 0])
    return np.array(world_points, dtype=np.float32)

def find_rgb_blobs(input_img, show=False):
    params = cv.SimpleBlobDetector_Params()
    params.minThreshold = 5
    # params.maxThreshold = 5
    # params.blobColor = 0
    # Filter by Area.
    params.filterByArea = True
    params.minArea = 20
    params.maxArea = 500
    # Filter by Circularity
    params.filterByCircularity = True
    params.minCircularity = 0.1
    # Filter by Convexity
    params.filterByConvexity = True
    params.minConvexity = 0.87
    # Filter by Inertia
    params.filterByInertia = True
    params.minInertiaRatio = 0.1
    detector = cv.SimpleBlobDetector_create(params)
    keypoints = detector.detect(input_img)
    img_with_keypoints = cv.drawKeypoints(input_img, keypoints, np.array([]), (0,255,0), cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    # size_list = [kp.size for kp in keypoints]
    if show:
        cv.namedWindow("Keypoints", 0)
        cv.imshow('Keypoints', img_with_keypoints)
        cv.waitKey(0)
        cv.destroyAllWindows()
    return keypoints

def cal_rgb(rgb_img_path, show=False):
    rgb_cell_list = []
    rgb_img = cv.imread(rgb_img_path)
    
    gray = cv.cvtColor(rgb_img,cv.COLOR_BGR2GRAY)
    
    rgb_img[0:100, :] = 0
    rgb_img[:, 0:250] = 0
    rgb_img[250:480, :] = 0
    rgb_img[:, 400:] = 0
    keypoints = find_rgb_blobs(rgb_img, show=show)
    for keypoint in keypoints:
        x, y = int(keypoint.pt[0]), int(keypoint.pt[1])
        rgb_cell_list.append([x, y])
    rgb_cell_list  = np.array(rgb_cell_list)
    ori = np.copy(rgb_cell_list)
    # 当前的rgb已经严格按照从上到下,从左到右排列
    ori[:, 1] = np.round(ori[:, 1] / 8) * 8
    indices = np.lexsort((ori[:, 0], ori[:, 1]))
    rgb_cell_list = rgb_cell_list[indices]
    # word_points = add_world_point()
    # if show:
    #     i = 0
    #     for point in rgb_cell_list:
    #         cv.putText(rgb_img, "{}".format(i), (point[0], point[1]), cv.FONT_HERSHEY_TRIPLEX,thickness = 1,fontScale= 0.5,color=(0,255,0))
    #         i = i + 1
    #     cv.namedWindow("img_show_whole", 0)
    #     cv.imshow('img_show_whole', rgb_img)
    #     key = 0
    #     while True:
    #         key = cv.waitKey()
    #         if key == 27:
    #             break
    # rgb_cell_list = np.array(rgb_cell_list, dtype=np.float32).reshape(1, -1, 2)
    # word_points = np.array(word_points).reshape(1, -1, 3)
    # ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(word_points, rgb_cell_list, gray.shape[::-1], None, None)
    # if ret:
    #     print(mtx, dist)
    rgb_cell_list = rgb_cell_list.tolist()[:12]
    return np.array(rgb_cell_list, dtype=np.float64)

def getRT(world_points,gridcell_list, mtx, dist):
    _, rvec, tvec = cv.solvePnP(world_points, 
                            gridcell_list, mtx, dist,cv.SOLVEPNP_ITERATIVE)
    rotation_m, _ = cv.Rodrigues(rvec)  # 罗德里格斯变换
    RT = np.transpose(rotation_m)
    shouldBeIdentity = np.dot(RT, rotation_m)
    I = np.identity(3, dtype=rotation_m.dtype)
    n = np.linalg.norm(I - shouldBeIdentity)
    assert (n < 1e-6)
    return rotation_m, tvec


def main():
    show = True
    data_path = '~/cxx_project/lidar_rgb_calib/data/0221/2024-02-21_18-06-03.Csv'
    filter_data = filter_raw_pcl(data_path, False)
    # show_pcl(filter_data)
    calib_board, conf = sac_plane(filter_data)
    xyz, depth_img = projecto2D(calib_board)
    depth_cell_list = cal_rgbd(depth_img, xyz, show=show)
    depth_ori = np.copy(depth_cell_list)
    # # 当前的rgb已经严格按照从上到下,从左到右排列
    depth_ori[:, 2] = np.round(depth_ori[:, 2] / 60) * 60
    indices = np.lexsort((-depth_ori[:, 1], -depth_ori[:, 2]))
    depth_cell_list = depth_cell_list[indices]/1000
    # depth_cell_list = depth_cell_list[:, [0, 2, 1]]
    print("depth_cell_list:", depth_cell_list)
    rgb_img_path = '~/cxx_project/lidar_rgb_calib/data/0221/BIAODINGBAN/undistort_img/52.jpg'
    img = cv.imread(rgb_img_path)
    rgb_cell_list = cal_rgb(rgb_img_path, show=show)
    print("rgb_cell_list:", rgb_cell_list)
    R, T= getRT(depth_cell_list, rgb_cell_list, rgb_mtx, rgb_dist)
    print("R:{}, T{}".format(R, T))
    image_points, _ = cv.projectPoints(calib_board, R, T, rgb_mtx, rgb_dist)
    for point in image_points:
        x, y = point[0]
        cv.circle(img, (int(x), int(y)), radius=1, color=(0, 255, 0), thickness=-1)
    # if show:
    cv.namedWindow("res", 0)
    cv.imshow('res', img)
    cv.waitKey(0)
    # cv.destroyAllWindows()


if __name__ == "__main__":
    main()
相关推荐
幻想趾于现实4 小时前
视觉应用工程师(面试)
人工智能·数码相机·计算机视觉
烟锁池塘柳01 天前
Camera ISP Pipeline(相机图像信号处理管线)
图像处理·数码相机·信号处理
3DVisionary1 天前
XTOP3D的DIC技术在极端条件下的应用解决方案
数码相机·3d·航空工业·全场应变测量·航空机匣内部四测头同步测量·反射镜辅助dic观测·四测头方案
视觉人机器视觉3 天前
3D与2D机器视觉机械臂引导的区别
人工智能·数码相机·计算机视觉·3d·视觉检测
LabVIEW开发3 天前
LabVIEW开发中的电机控制与相机像素差
数码相机·labview
pixle04 天前
Three.js 快速入门教程【二】透视投影相机
开发语言·javascript·数码相机
go54631584654 天前
python实现将RGB相机与事件相机的照片信息进行融合以进行目标检测
python·数码相机·目标检测
看星猩的柴狗5 天前
ROS-相机话题-获取图像-颜色目标识别与定位-目标跟随-人脸检测
数码相机
彩云的笔记5 天前
相机快门 deepseek
数码相机
视觉人机器视觉5 天前
机器视觉检测中,2D面阵相机和线扫相机的区别
人工智能·数码相机·计算机视觉·3d·视觉检测