基于图像尺寸的相机内参拼接视频

基础做法代码段

首先给出网上拼接图像的代码,这个只适合两幅图像拼接

c 复制代码
// 基于图像尺寸的相机内参粗略估计
// fx, fy ≈ image_width * factor
// cx ≈ image_width / 2
// cy ≈ image_height / 2
//4032 /2 = 2016*1.2 = 2419
cv::Mat K;

// 相机内参(假设已知,实际需标定)
//const Mat K = (Mat_<double>(3, 3) << 1000, 0, 960, 0, 1000, 540, 0, 0, 1);
// 图像融合(简单线性混合)
void blendImages(const Mat& img1, const Mat& img2, const Mat& H, Mat& panorama) {
    Mat warp_img2;
    warpPerspective(img2, warp_img2, H, Size(img1.cols * 2, img1.rows));

    panorama = Mat::zeros(img1.rows, img1.cols * 2, CV_8UC3);
    img1.copyTo(panorama(Rect(0, 0, img1.cols, img1.rows)));

    // 重叠区域混合
    for (int y = 0; y < panorama.rows; ++y) {
        for (int x = 0; x < panorama.cols; ++x) {
            if (warp_img2.at<Vec3b>(y, x) != Vec3b(0, 0, 0)) {
                if (panorama.at<Vec3b>(y, x) == Vec3b(0, 0, 0)) {
                    panorama.at<Vec3b>(y, x) = warp_img2.at<Vec3b>(y, x);
                }
                else {
                    // 线性渐变权重
                    float alpha = (float)x / panorama.cols;
                    panorama.at<Vec3b>(y, x) =
                        alpha * warp_img2.at<Vec3b>(y, x) +
                        (1 - alpha) * panorama.at<Vec3b>(y, x);
                }
            }
        }
    }
}

// 可视化特征匹配
void visualizeMatches(const Mat& img1, const Mat& img2,
    const vector<KeyPoint>& keypoints1, const vector<KeyPoint>& keypoints2,
    const vector<DMatch>& matches, const string& outputFile) {
    Mat matchImg;
    drawMatches(img1, keypoints1, img2, keypoints2, matches, matchImg,
        Scalar::all(-1), Scalar::all(-1), vector<char>(),
        DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
    //imwrite(outputFile, matchImg);
    //std::cout << "特征匹配可视化已保存为 " << outputFile << endl;
}

int main() {
    // 1. 读取图像
    //vector<Mat> images = { imread("E:/code/stitchtool_mfc/jpg/left1.jpg"), imread("E:/code/stitchtool_mfc/jpg/right1.jpg") };

    Mat img1 = imread("c:/left1.jpg");
    Mat img2 = imread("c:/right1.jpg");
    if (img1.empty() || img2.empty()) {
        cerr << "Error: Could not load images!" << endl;
        return -1;
    }
    K = estimateIntrinsics(img1.cols, img1.rows, 1.2);
    // 2. 特征提取(SIFT)
    Ptr<SIFT> sift = SIFT::create();
    vector<KeyPoint> kpts1, kpts2;
    Mat desc1, desc2;
    sift->detectAndCompute(img1, noArray(), kpts1, desc1);
    sift->detectAndCompute(img2, noArray(), kpts2, desc2);

    // 3. 特征匹配(FLANN + 比率测试)
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED);
    vector<vector<DMatch>> knn_matches;
    matcher->knnMatch(desc1, desc2, knn_matches, 2);

    vector<DMatch> good_matches;
    for (size_t i = 0; i < knn_matches.size(); ++i) {
        if (knn_matches[i][0].distance < 0.7 * knn_matches[i][1].distance) {
            good_matches.push_back(knn_matches[i][0]);
        }
    }

    // 4. 估计单应性矩阵(RANSAC)
    vector<Point2f> pts1, pts2;
    for (const auto& m : good_matches) {
        pts1.push_back(kpts1[m.queryIdx].pt);
        pts2.push_back(kpts2[m.trainIdx].pt);
    }

    Mat H = findHomography(pts2, pts1, RANSAC, 3);
    if (H.empty()) {
        cerr << "Error: Homography estimation failed!" << endl;
        return -1;
    }


    Mat panorama;
    blendImages(img1, img2, H, panorama);
    imshow("p", panorama);

}

问题:

这种做法最多就是两幅图像拼接,而且第二幅图像拉伸很大,没有均衡,opencv有高级拼接流程

高级拼接流程代码段1

  1. 图像读取
  2. 特征提取器选择
  3. 提取每张图的特征
  4. 特征匹配
  5. 相机参数估计
  6. 光束法平差(Bundle Adjustment)

假设场景为纯旋转(无平移),通过单应性矩阵估计每张图的相机内参(主要是焦距 focal)和外参(旋转 R)。

c 复制代码
using namespace cv;
using namespace cv::detail;

int main() {
    string features_type = "sift";
    // 1. 读取图像并提取特征
    vector<Mat> images = { imread("E:/code/stitchtool_mfc/jpg/left1.jpg"), imread("E:/code/stitchtool_mfc/jpg/right1.jpg") };

    vector<ImageFeatures> features(images.size());

    Ptr<Feature2D> finder;
    if (features_type == "orb")
        finder = ORB::create();
    else if (features_type == "akaze")
        finder = AKAZE::create();
#ifdef HAVE_OPENCV_XFEATURES2D
    else if (features_type == "surf")
        finder = xfeatures2d::SURF::create();
#endif
    else if (features_type == "sift")
        finder = SIFT::create();
    else {
        cerr << "Unknown 2D features type: '" << features_type << "'.\n";
        return -1;
    }

    // 提取特征并设置索引
    for (size_t i = 0; i < images.size(); ++i) {
        computeImageFeatures(finder, images[i], features[i]);
        features[i].img_idx = (int)i; // 确保设置img_idx
        std::cout << "图片 " << i + 1 << " 提取特征点: " << features[i].keypoints.size() << endl;
    }
    float match_conf = 0.3f;
    // 特征匹配
    vector<MatchesInfo> pairwise_matches;
    BestOf2NearestMatcher matcher(false, match_conf);
    matcher(features, pairwise_matches);

    // 检查匹配点数量
    if (pairwise_matches.empty()) {
        cerr << "错误: 匹配点数量不足 (" << (pairwise_matches.empty() ? 0 : pairwise_matches[0].matches.size()) << ")" << endl;
        return -1;
    }

    // 可视化匹配
    //visualizeMatches(images[0], images[1], features[0].keypoints, features[1].keypoints,
    //        pairwise_matches[0].matches, "matches.jpg");

    // 打印匹配信息
    std::cout << "匹配结果: " << pairwise_matches[0].matches.size() << " 个匹配点" << std::endl;
    std::cout << "内点数量: " << pairwise_matches[0].num_inliers << std::endl;
    if (!pairwise_matches[0].H.empty()) {
        std::cout << "单应性矩阵"<< ":\n" << pairwise_matches[0].H << "\n\n";
    }

    // 相机参数估计
    HomographyBasedEstimator estimator;
    vector<CameraParams> cameras;
 

    
    estimator(features, pairwise_matches, cameras);

    // 调整相机参数
    for (size_t i = 0; i < cameras.size(); ++i) {
        Mat R;
        cameras[i].R.convertTo(R, CV_32F);
        cameras[i].R = R;

        std::cout << "相机 " << i + 1 << " 焦距: " << cameras[i].focal << endl;

        // 限制焦距范围
        if (cameras[i].focal > 3000 || cameras[i].focal < 10) {
            cerr << "调整相机 " << i + 1 << " 焦距为 1000" << endl;
            cameras[i].focal = 1000;
        }
    }

    Ptr<BundleAdjusterBase> adjuster = makePtr<BundleAdjusterReproj>();
    adjuster->setConfThresh(1.0);
    adjuster->setRefinementMask(Mat::ones(3, 3, CV_8U));  // 优化所有参数
    (*adjuster)(features, pairwise_matches, cameras);
   

    cv::waitKey(0);
    return 0;
}


cv::Mat estimateIntrinsics(int image_width, int image_height, double factor = 1.2) {
    cv::Mat K = cv::Mat::eye(3, 3, CV_64F);
    K.at<double>(0, 0) = image_width * factor;   // fx
    K.at<double>(1, 1) = image_height * factor;  // fy
    K.at<double>(0, 2) = image_width / 2.0;      // cx
    K.at<double>(1, 2) = image_height / 2.0;     // cy
    return K;
}

void cylindricalProjection(const Mat& src, Mat& dst, const Mat& K) {
    CV_Assert(src.type() == CV_8UC3);  // 仅支持彩色图像
    double fx = K.at<double>(0, 0);
    double fy = K.at<double>(1, 1);
    double cx = K.at<double>(0, 2);
    double cy = K.at<double>(1, 2);

    // 预计算映射表
    Mat mapx(src.size(), CV_32FC1);
    Mat mapy(src.size(), CV_32FC1);

    for (int y = 0; y < src.rows; ++y) {
        for (int x = 0; x < src.cols; ++x) {
            // 柱面投影公式
            double theta = (x - cx) / fx;
            double h = (y - cy) / fy;

            // 计算映射坐标
            mapx.at<float>(y, x) = static_cast<float>(fx * tan(theta) + cx);
            mapy.at<float>(y, x) = static_cast<float>(fy * h / cos(theta) + cy);
        }
    }

    // 使用remap快速重映射
    remap(src, dst, mapx, mapy, INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
}

修改

c 复制代码
#include <opencv2/opencv.hpp>
#include <opencv2/stitching.hpp>
#include <opencv2/stitching/detail/matchers.hpp>
#include <opencv2/stitching/detail/motion_estimators.hpp>
#include <opencv2/stitching/detail/exposure_compensate.hpp>
#include <opencv2/stitching/detail/seam_finders.hpp>
#include <opencv2/stitching/detail/warpers.hpp>
#include <opencv2/stitching/detail/blenders.hpp>

using namespace cv;
using namespace cv::detail;

int main() {
    // ============ 1. 读取图像 ============
    std::vector<Mat> images;
    images.push_back(imread("E:/code/stitchtool_mfc/jpg/left1.jpg"));
    images.push_back(imread("E:/code/stitchtool_mfc/jpg/right1.jpg"));

    for (size_t i = 0; i < images.size(); ++i) {
        if (images[i].empty()) {
            std::cerr << "Error: Could not load image " << i << std::endl;
            return -1;
        }
    }

    // ============ 2. 特征提取 ============
    std::string features_type = "sift";
    std::vector<ImageFeatures> features(images.size());

    Ptr<Feature2D> finder;
    if (features_type == "orb")
        finder = ORB::create();
    else if (features_type == "akaze")
        finder = AKAZE::create();
#ifdef HAVE_OPENCV_XFEATURES2D
    else if (features_type == "surf")
        finder = xfeatures2d::SURF::create();
#endif
    else if (features_type == "sift")
        finder = SIFT::create();
    else {
        std::cerr << "Unknown 2D features type: '" << features_type << "'.\n";
        return -1;
    }

    for (size_t i = 0; i < images.size(); ++i) {
        computeImageFeatures(finder, images[i], features[i]);
        features[i].img_idx = static_cast<int>(i);
        std::cout << "Image " << i << ": " << features[i].keypoints.size() << " keypoints" << std::endl;
    }

    // ============ 3. 特征匹配 ============
    float match_conf = 0.3f;
    BestOf2NearestMatcher matcher(false, match_conf);
    std::vector<MatchesInfo> pairwise_matches;
    matcher(features, pairwise_matches);
    matcher.collectGarbage();

    // 检查是否有足够匹配
    bool has_matches = false;
    for (const auto& m : pairwise_matches) {
        if (m.confidence > 0) {
            has_matches = true;
            break;
        }
    }
    if (!has_matches) {
        std::cerr << "Not enough matches!" << std::endl;
        return -1;
    }

    // ============ 4. 相机位姿估计 ============
    HomographyBasedEstimator estimator;
    std::vector<CameraParams> cameras;
    if (!estimator(features, pairwise_matches, cameras)) {
        std::cerr << "Homography estimation failed!" << std::endl;
        return -1;
    }

    for (auto& cam : cameras) {
        cam.R.convertTo(cam.R, CV_32F);
    }

    // ============ 5. 光束法平差(Bundle Adjustment) ============
    Ptr<BundleAdjusterBase> adjuster = makePtr<BundleAdjusterRay>();
    adjuster->setConfThresh(1.0);
    Mat_<uchar> refine_mask = Mat::ones(3, 3, CV_8U);
    refine_mask(0, 2) = 0;  // 不优化主点 (cx, cy)
    refine_mask(1, 2) = 0;
    adjuster->setRefinementMask(refine_mask);

    if (!(*adjuster)(features, pairwise_matches, cameras)) {
        std::cerr << "Bundle adjustment failed!" << std::endl;
        return -1;
    }

    // 归一化焦距(使中位数为 1)
    std::vector<double> focals;
    for (const auto& cam : cameras)
        focals.push_back(cam.focal);
    std::sort(focals.begin(), focals.end());
    double median_focal = focals[focals.size() / 2];
    for (auto& cam : cameras)
        cam.focal /= median_focal;

    // ============ 6. 确定拼接区域大小和角点 ============
    std::vector<Point> corners(images.size());
    std::vector<UMat> masks_warped(images.size());
    std::vector<UMat> images_warped(images.size());
    std::vector<Size> sizes(images.size());

    // 使用柱面投影(也可选球面或平面)
    Ptr<RotationWarper> warper = makePtr<CylindricalWarper>();
    warper->setScale(median_focal);

    for (size_t i = 0; i < images.size(); ++i) {
        Mat_<float> R;
        cameras[i].R.convertTo(R, CV_32F);
        corners[i] = warper->warpRoi(images[i].size(), R, cameras[i].K());
        UMat mask;
        cv::threshold(images[i], mask, 0, 255, THRESH_BINARY);
        masks_warped[i] = warper->warp(mask, R, cameras[i].K(), INTER_NEAREST, BORDER_CONSTANT);
        images_warped[i] = warper->warp(images[i], R, cameras[i].K(), INTER_LINEAR, BORDER_REFLECT);
        sizes[i] = images_warped[i].size();
    }

    // 计算最终画布大小
    Point dst_tl, dst_br;
    dst_tl.x = dst_tl.y = std::numeric_limits<int>::max();
    dst_br.x = dst_br.y = std::numeric_limits<int>::min();
    for (int i = 0; i < corners.size(); ++i) {
        dst_tl.x = std::min(dst_tl.x, corners[i].x);
        dst_tl.y = std::min(dst_tl.y, corners[i].y);
        dst_br.x = std::max(dst_br.x, corners[i].x + sizes[i].width);
        dst_br.y = std::max(dst_br.y, corners[i].y + sizes[i].height);
    }
    Size dst_size(dst_br.x - dst_tl.x, dst_br.y - dst_tl.y);

    // 调整所有角点到正坐标系
    for (auto& corner : corners) {
        corner.x -= dst_tl.x;
        corner.y -= dst_tl.y;
    }

    // ============ 7. 曝光补偿 ============
    Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(ExposureCompensator::GAIN_BLOCKS);
    compensator->feed(corners, images_warped, masks_warped);

    // ============ 8. 接缝线查找 ============
    Ptr<SeamFinder> seam_finder = makePtr<detail::GraphCutSeamFinder>(GraphCutSeamFinderBase::COST_COLOR_GRAD);
    seam_finder->find(images_warped, corners, masks_warped);

    // ============ 9. 图像融合 ============
    Mat result, result_mask;
    Ptr<Blender> blender = Blender::createDefault(Blender::MULTI_BAND, true);
    blender->prepare(corners, sizes);

    for (size_t i = 0; i < images.size(); ++i) {
        compensator->apply(static_cast<int>(i), corners[i], images_warped[i], masks_warped[i]);
        blender->feed(images_warped[i], masks_warped[i], corners[i]);
    }

    blender->blend(result, result_mask);

    // ============ 10. 保存与显示结果 ============
    imwrite("panorama_result.jpg", result);
    imshow("Panorama", result);
    waitKey(0);

    return 0;
}

问题

这种拼接无法作为视频拼接的基础,OpenCV 的 stitching pipeline大概解决了单帧全景合成的核心问题:

  • 特征匹配与对齐
  • 相机位姿估计(焦距、旋转)
  • 几何 warping(柱面/球面)
  • 曝光补偿
  • 接缝融合

但是问题很多,只要图像有点问题,全盘奔溃

  1. 性能太低
    SIFT + FLANN + BA + Multi-band blending 单帧可能需 200~1000ms,远低于实时要求(>30fps)。
  2. 帧间抖动(Jitter)
    每帧独立计算相机参数 → 焦距/旋转微小波动 → 全景画面"抖动"。
  3. 特征不稳定
    动态场景中特征点变化大,匹配失败率高。
  4. 冗余计算
    相机内参(焦距)和相对位姿在视频中通常是固定的(假设相机不动),无需每帧重算。

核心思想:预标定 + 实时 warp + 轻量融合

使用完善后的代码,在第一帧或几帧静态图像上运行完整 pipeline。

保存结果:

1 每个摄像头的 CameraParams(主要是 R 和 focal)

2 最终 warp 尺寸、corners、blender 配置等

3 后续所有视频帧复用这些参数,跳过特征提取、匹配、BA

c 复制代码
// 伪代码
while (video_running) {
    // 1. 读取各路视频帧(img1, img2, ...)
    // 2. 使用预存的 R, K, warper 对每帧做 warp
    UMat warped1 = warper->warp(img1, pre_R1, pre_K1, ...);
    UMat warped2 = warper->warp(img2, pre_R2, pre_K2, ...);
    // 3. 应用预计算的曝光增益(可选)
    applyGain(warped1, gain1);
    // 4. 快速融合(避免 multi-band)
    Mat panorama = simpleBlend(warped1, warped2, pre_seam_mask);
    // 5. 显示/编码输出
}

其他

经常接到需求,动不动就要极低的延时,几十毫秒, 根本就不考虑网络抖动时候的问题,好像延时越低越优秀,不否认这种概念,但是实际上根本就没有必要,没有什么用,主要问题来自于

1 不考虑网络抖动

2 不考虑时间长了以后显卡发热引起的降频

3 GPU寿命

最主要的首要解决还是功耗问题

1 使用纯显存流通自己的解码---> 拼接 --->------> AI 算法-----> 渲染

2 使用分布式,必要时多加一个显卡

3 精简代码,达到极致

改进代码,(伪)

注意以下代码并不是完善可用的代码,实际需要加速,编译通过需要修改

c 复制代码
#include <opencv2/opencv.hpp>
#include <opencv2/stitching.hpp>
#include <opencv2/stitching/detail/warpers.hpp>
#include <opencv2/stitching/detail/exposure_compensate.hpp>
#include <iostream>
#include <vector>
#include <thread>
#include <mutex>
#include <queue>
#include <condition_variable>
#include <atomic>

using namespace cv;
using namespace cv::detail;

// ================== 配置 ==================
const bool USE_CALIBRATION = true; // 第一帧标定,后续复用
const int MAX_QUEUE_SIZE = 2;
const double CYLINDER_SCALE_FACTOR = 1.2;

// ================== 全局共享状态 ==================
struct StitchConfig {
    std::vector<Mat> R_mats;          // 每个相机的旋转矩阵 (3x3)
    std::vector<Mat> K_mats;          // 内参矩阵 (3x3)
    std::vector<Point> corners;       // warp 后左上角坐标
    Size result_size;                 // 输出全景图尺寸
    std::vector<double> gains;        // 曝光增益(简单版)
    bool calibrated = false;
    std::mutex mtx;
};

StitchConfig g_config;

// ================== 工具函数 ==================

Mat estimateIntrinsics(int w, int h, double factor = 1.2) {
    Mat K = Mat::eye(3, 3, CV_64F);
    K.at<double>(0, 0) = w * factor;
    K.at<double>(1, 1) = h * factor;
    K.at<double>(0, 2) = w / 2.0;
    K.at<double>(1, 2) = h / 2.0;
    return K;
}

// 快速线性融合(固定重叠区域)
void simpleBlend(const UMat& img1, const UMat& img2, const Point& corner2, Mat& result) {
    result.create(g_config.result_size, CV_8UC3);
    result.setTo(0);

    // 放置第一张图(在 (0,0))
    UMat roi1(result, Rect(0, 0, img1.cols, img1.rows));
    img1.copyTo(roi1);

    // 放置第二张图(在 corner2)
    UMat roi2(result, Rect(corner2.x, corner2.y, img2.cols, img2.rows));
    
    // 简单 alpha 融合:重叠区域线性渐变
    if (corner2.x < img1.cols) {
        int overlap = img1.cols - corner2.x;
        if (overlap > 0 && overlap < img2.cols) {
            // 分离非重叠与重叠区域
            UMat left_non_overlap(img1, Rect(0, 0, corner2.x, img1.rows));
            UMat left_overlap(img1, Rect(corner2.x, 0, overlap, img1.rows));
            UMat right_overlap(img2, Rect(0, 0, overlap, img2.rows));
            UMat right_non_overlap(img2, Rect(overlap, 0, img2.cols - overlap, img2.rows));

            // 非重叠部分直接复制
            left_non_overlap.copyTo(UMat(result, Rect(0, 0, corner2.x, img1.rows)));
            right_non_overlap.copyTo(UMat(result, Rect(corner2.x + overlap, 0, img2.cols - overlap, img2.rows)));

            // 重叠部分线性混合
            for (int x = 0; x < overlap; ++x) {
                double alpha = static_cast<double>(x) / overlap;
                UMat col_result = UMat(result, Rect(corner2.x + x, 0, 1, img1.rows));
                UMat col_left = left_overlap.col(x);
                UMat col_right = right_overlap.col(x);
                addWeighted(col_left, 1.0 - alpha, col_right, alpha, 0.0, col_result);
            }
        } else {
            img2.copyTo(roi2);
        }
    } else {
        img2.copyTo(roi2);
    }
}

// ================== 标定函数(仅调用一次) ==================
bool calibrateFromFrames(const std::vector<Mat>& imgs, StitchConfig& config) {
    std::cout << "Running calibration on first frame...\n";

    // 1. 特征提取
    Ptr<SIFT> finder = SIFT::create();
    std::vector<ImageFeatures> features(imgs.size());
    for (size_t i = 0; i < imgs.size(); ++i) {
        computeImageFeatures(finder, imgs[i], features[i]);
        features[i].img_idx = static_cast<int>(i);
    }

    // 2. 匹配
    BestOf2NearestMatcher matcher(false, 0.3f);
    std::vector<MatchesInfo> pairwise_matches;
    matcher(features, pairwise_matches);
    matcher.collectGarbage();

    // 3. 相机估计
    HomographyBasedEstimator estimator;
    std::vector<CameraParams> cameras;
    if (!estimator(features, pairwise_matches, cameras)) {
        std::cerr << "Calibration failed!\n";
        return false;
    }

    // 4. Bundle Adjustment
    Ptr<BundleAdjusterBase> adjuster = makePtr<BundleAdjusterRay>();
    adjuster->setConfThresh(1.0);
    Mat_<uchar> mask = Mat::ones(3, 3, CV_8U);
    mask(0,2) = mask(1,2) = 0;
    adjuster->setRefinementMask(mask);
    if (!(*adjuster)(features, pairwise_matches, cameras)) {
        std::cerr << "Bundle adjustment failed!\n";
        return false;
    }

    // 5. 归一化焦距
    std::vector<double> focals;
    for (auto& cam : cameras) focals.push_back(cam.focal);
    std::sort(focals.begin(), focals.end());
    double median_focal = focals[focals.size()/2];
    for (auto& cam : cameras) cam.focal /= median_focal;

    // 6. 计算 warping 参数
    Ptr<CylindricalWarper> warper = makePtr<CylindricalWarper>();
    warper->setScale(median_focal);

    config.R_mats.resize(imgs.size());
    config.K_mats.resize(imgs.size());
    config.corners.resize(imgs.size());
    std::vector<Size> sizes(imgs.size());

    for (size_t i = 0; i < imgs.size(); ++i) {
        cameras[i].R.convertTo(config.R_mats[i], CV_32F);
        config.K_mats[i] = cameras[i].K().clone();
        config.corners[i] = warper->warpRoi(imgs[i].size(), config.R_mats[i], config.K_mats[i]);
        sizes[i] = warper->warp(imgs[i], config.R_mats[i], config.K_mats[i], INTER_LINEAR).size();
    }

    // 7. 计算输出尺寸
    Point tl(std::numeric_limits<int>::max(), std::numeric_limits<int>::max());
    Point br(std::numeric_limits<int>::min(), std::numeric_limits<int>::min());
    for (size_t i = 0; i < imgs.size(); ++i) {
        tl.x = std::min(tl.x, config.corners[i].x);
        tl.y = std::min(tl.y, config.corners[i].y);
        br.x = std::max(br.x, config.corners[i].x + sizes[i].width);
        br.y = std::max(br.y, config.corners[i].y + sizes[i].height);
    }
    config.result_size = Size(br.x - tl.x, br.y - tl.y);

    // 调整 corners 到正坐标
    for (auto& pt : config.corners) {
        pt.x -= tl.x;
        pt.y -= tl.y;
    }

    // 8. 简单曝光补偿(平均亮度归一化)
    config.gains.resize(imgs.size(), 1.0);
    for (size_t i = 0; i < imgs.size(); ++i) {
        std::vector<Mat> ch;
        split(imgs[i], ch);
        double mean = (cv::mean(ch[0])[0] + cv::mean(ch[1])[0] + cv::mean(ch[2])[0]) / 3.0;
        config.gains[i] = 128.0 / (mean + 1e-5); // 目标亮度 128
    }

    std::cout << "Calibration done. Output size: " << config.result_size << "\n";
    return true;
}

// ================== 视频处理线程 ==================
void stitchingWorker(
    const std::vector<std::string>& video_paths,
    std::atomic<bool>& stop_flag
) {
    // 打开视频
    std::vector<VideoCapture> caps(video_paths.size());
    for (size_t i = 0; i < video_paths.size(); ++i) {
        caps[i].open(video_paths[i]);
        if (!caps[i].isOpened()) {
            std::cerr << "Cannot open video: " << video_paths[i] << "\n";
            return;
        }
    }

    Mat frame0, frame1;
    bool first_frame = true;
    Ptr<CylindricalWarper> warper;

    while (!stop_flag) {
        // 读取帧
        if (!caps[0].read(frame0) || !caps[1].read(frame1)) break;

        if (first_frame) {
            // 标定
            std::vector<Mat> first_frames = {frame0, frame1};
            if (!calibrateFromFrames(first_frames, g_config)) {
                std::cerr << "Failed to calibrate!\n";
                break;
            }
            warper = makePtr<CylindricalWarper>();
            warper->setScale(1.0); // 因为焦距已归一化
            first_frame = false;
        }

        if (!g_config.calibrated) continue;

        // Warp 每一帧
        UMat warped0 = warper->warp(frame0, g_config.R_mats[0], g_config.K_mats[0], INTER_LINEAR);
        UMat warped1 = warper->warp(frame1, g_config.R_mats[1], g_config.K_mats[1], INTER_LINEAR);

        // 应用曝光增益(简单乘法)
        warped0.convertTo(warped0, -1, g_config.gains[0]);
        warped1.convertTo(warped1, -1, g_config.gains[1]);

        // 融合
        Mat panorama;
        simpleBlend(warped0, warped1, g_config.corners[1], panorama);

        // 显示(实际项目中可替换为 PBO 上传或 FFmpeg 编码)
        imshow("Live Panorama", panorama);
        if (waitKey(1) == 27) break; // ESC 退出
    }

    stop_flag = true;
    for (auto& cap : caps) cap.release();
}

// ================== 主函数 ==================
int main() {
    // 输入视频路径(可改为摄像头索引 0, 1)
    std::vector<std::string> video_sources = {
        "c:/left.mp4",
        "c:/right.mp4"
    };

    std::atomic<bool> stop_flag{false};
    std::thread worker(stitchingWorker, video_sources, std::ref(stop_flag));

    // 主线程等待
    while (!stop_flag) {
        std::this_thread::sleep_for(std::chrono::milliseconds(100));
    }

    if (worker.joinable()) worker.join();
    destroyAllWindows();
    return 0;
}
相关推荐
却道天凉_好个秋5 小时前
音视频学习(八十三):视频压缩:MJPEG技术
学习·音视频·mjpeg·视频压缩
水中加点糖6 小时前
RagFlow实现多模态搜索(文、图、视频)与(关键字/相似度)搜索原理(二)
python·ai·音视频·knn·ragflow·多模态搜索·相似度搜索
却道天凉_好个秋6 小时前
音视频学习(八十二):mp4v
学习·音视频·mp4v
winfredzhang6 小时前
从零构建:基于 Node.js 的全栈视频资料管理系统开发实录
css·node.js·html·音视频·js·收藏,搜索,缩略图
行业探路者20 小时前
二维码标签是什么?主要有线上生成二维码和文件生成二维码功能吗?
学习·音视频·语音识别·二维码·设备巡检
Android系统攻城狮1 天前
Android16音频之获取Record状态AudioRecord.getState:用法实例(一百七十七)
音视频·android16·音频进阶
liefyuan1 天前
【RV1106】rkipc:分析(一)
音视频
aqi001 天前
FFmpeg开发笔记(九十八)基于FFmpeg的跨平台图形用户界面LosslessCut
android·ffmpeg·kotlin·音视频·直播·流媒体
广州服务器托管1 天前
比较优秀的视频音频播放器PotPlayer64-v1.7.22764绿色版
运维·windows·计算机网络·电脑·音视频·可信计算技术