一、两张图像拼接
import cv2
import numpy as np
def stitch_images(img1, img2):
"""
将两张图像拼接在一起
:param img1: 第一张图像
:param img2: 第二张图像
:return: 拼接后的图像
"""
# 转换为灰度图
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# 使用SIFT特征检测器
sift = cv2.SIFT_create()
# 检测关键点和计算描述符
kp1, des1 = sift.detectAndCompute(gray1, None)
kp2, des2 = sift.detectAndCompute(gray2, None)
# 使用FLANN匹配器进行特征匹配
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# 筛选好的匹配点
good_matches = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good_matches.append(m)
# 提取匹配点的坐标
src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
# 计算单应性矩阵
H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
# 获取图像尺寸
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
# 获取变换后的图像角点
corners1 = np.float32([[0, 0], [0, h1], [w1, h1], [w1, 0]]).reshape(-1, 1, 2)
corners2 = np.float32([[0, 0], [0, h2], [w2, h2], [w2, 0]]).reshape(-1, 1, 2)
transformed_corners = cv2.perspectiveTransform(corners1, H)
# 计算拼接后图像的尺寸
all_corners = np.concatenate((corners2, transformed_corners), axis=0)
[x_min, y_min] = np.int32(all_corners.min(axis=0).ravel() - 0.5)
[x_max, y_max] = np.int32(all_corners.max(axis=0).ravel() + 0.5)
# 计算平移变换矩阵
translation_dist = [-x_min, -y_min]
H_translation = np.array([[1, 0, translation_dist[0]], [0, 1, translation_dist[1]], [0, 0, 1]])
# 应用透视变换和平移
result = cv2.warpPerspective(img1, H_translation.dot(H), (x_max - x_min, y_max - y_min))
# 将第二张图像拼接到变换后的图像上
result[translation_dist[1]:translation_dist[1] + h2,
translation_dist[0]:translation_dist[0] + w2] = img2
return result
def blend_images(result, img2, translation_dist):
"""
对拼接后的图像进行融合处理,使过渡更自然
:param result: 拼接后的图像
:param img2: 第二张原始图像
:param translation_dist: 平移距离
:return: 融合后的图像
"""
h2, w2 = img2.shape[:2]
# 创建掩码
mask = np.zeros(result.shape[:2], dtype=np.uint8)
mask[translation_dist[1]:translation_dist[1] + h2,
translation_dist[0]:translation_dist[0] + w2] = 255
# 使用泊松融合使过渡更自然
center = (translation_dist[0] + w2 // 2, translation_dist[1] + h2 // 2)
blended_result = cv2.seamlessClone(img2, result, mask, center, cv2.NORMAL_CLONE)
return blended_result
# 主程序
if __name__ == "__main__":
# 读取两张图像
img1 = cv2.imread('01.png') # 请替换为你的第一张图像路径
img2 = cv2.imread('02.png') # 请替换为你的第二张图像路径
if img1 is None or img2 is None:
print("错误:无法读取图像文件,请检查路径是否正确")
exit()
# 调整图像大小(可选,如果图像太大可以调整)
img1 = cv2.resize(img1, (0, 0), fx=0.5, fy=0.5)
img2 = cv2.resize(img2, (0, 0), fx=0.5, fy=0.5)
# 拼接图像
result = stitch_images(img1, img2)
# 显示结果
cv2.imshow('fusion Image', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 保存结果
cv2.imwrite('fusion.jpg', result)
print("拼接完成,结果已保存为 fusion.jpg")
二、多张图像拼接
import cv2
import numpy as np
def correct_curvature(img, curvature_strength=0.3):
"""
校正图像的曲率,使其更接近平面图像
:param img: 输入图像
:param curvature_strength: 曲率校正强度 (0-1)
:return: 校正后的图像
"""
h, w = img.shape[:2]
# 创建映射矩阵
map_x = np.zeros((h, w), dtype=np.float32)
map_y = np.zeros((h, w), dtype=np.float32)
# 中心点坐标
center_x, center_y = w // 2, h // 2
# 曲率校正参数
max_dist = np.sqrt(center_x**2 + center_y**2)
for y in range(h):
for x in range(w):
# 计算当前点到中心的距离
dx = x - center_x
dy = y - center_y
dist = np.sqrt(dx**2 + dy**2)
# 曲率校正:将弯曲的点映射到平面上
if dist > 0:
# 计算校正因子
correction = 1 + curvature_strength * (dist / max_dist)**2
# 新的坐标
new_x = center_x + dx / correction
new_y = center_y + dy / correction
# 确保坐标在图像范围内
if 0 <= new_x < w and 0 <= new_y < h:
map_x[y, x] = new_x
map_y[y, x] = new_y
else:
map_x[y, x] = x
map_y[y, x] = y
else:
map_x[y, x] = x
map_y[y, x] = y
# 应用重映射进行曲率校正
corrected_img = cv2.remap(img, map_x, map_y, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
return corrected_img
def preprocess_images(images, target_width=1536, target_height=2048):
"""
预处理图像:曲率校正和尺寸调整
:param images: 输入图像列表
:param target_width: 目标宽度
:param target_height: 目标高度
:return: 预处理后的图像列表
"""
processed_images = []
for i, img in enumerate(images):
if img is None:
continue
print(f"预处理图像 {i+1}...")
# 1. 曲率校正
corrected_img = correct_curvature(img, curvature_strength=0.4)
# 2. 调整尺寸(保持宽高比)
h, w = corrected_img.shape[:2]
# 计算缩放比例,使图像高度接近目标高度
scale_factor = target_height / h
new_width = int(w * scale_factor)
new_height = target_height
# 调整尺寸
resized_img = cv2.resize(corrected_img, (new_width, new_height))
processed_images.append(resized_img)
print(f"图像 {i+1} 预处理完成: {w}x{h} -> {new_width}x{new_height}")
return processed_images
def find_homography_robust(img1, img2):
"""
鲁棒的单应性矩阵计算
:param img1: 第一张图像
:param img2: 第二张图像
:return: 单应性矩阵, 匹配点数量
"""
# 转换为灰度图
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# 使用SIFT特征检测器
sift = cv2.SIFT_create(nfeatures=5000)
# 检测关键点和计算描述符
kp1, des1 = sift.detectAndCompute(gray1, None)
kp2, des2 = sift.detectAndCompute(gray2, None)
if des1 is None or des2 is None or len(des1) < 10 or len(des2) < 10:
print("SIFT特征点不足,尝试使用ORB")
orb = cv2.ORB_create(3000)
kp1, des1 = orb.detectAndCompute(gray1, None)
kp2, des2 = orb.detectAndCompute(gray2, None)
if des1 is None or des2 is None or len(des1) < 10 or len(des2) < 10:
return None, 0
# 确保描述符数据类型正确
if des1.dtype != np.float32:
des1 = des1.astype(np.float32)
if des2.dtype != np.float32:
des2 = des2.astype(np.float32)
# 使用FLANN匹配器
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=100)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# 筛选好的匹配点
good_matches = []
for match_pair in matches:
if len(match_pair) == 2:
m, n = match_pair
if m.distance < 0.6 * n.distance:
good_matches.append(m)
if len(good_matches) < 10:
return None, len(good_matches)
# 提取匹配点坐标
src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
# 使用RANSAC计算单应性矩阵
H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
return H, len(good_matches)
def stitch_two_images_left_right(img1, img2, direction):
"""
拼接两张图像(修复版)
:param img1: 新图像
:param img2: 基准图像
:param direction: 拼接方向 ('left' 或 'right')
:return: 拼接结果
"""
H, num_matches = find_homography_robust(img1, img2)
if H is None:
print(f"拼接失败,匹配点数量: {num_matches}")
return img2
# 获取图像尺寸
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
try:
if direction == 'left':
# 将img1拼接到img2的左侧
H_inv = np.linalg.inv(H)
# 计算变换后的图像尺寸
corners1 = np.float32([[0, 0], [0, h1], [w1, h1], [w1, 0]]).reshape(-1, 1, 2)
transformed_corners = cv2.perspectiveTransform(corners1, H_inv)
all_corners = np.concatenate((np.float32([[0, 0], [0, h2], [w2, h2], [w2, 0]]).reshape(-1, 1, 2),
transformed_corners), axis=0)
[x_min, y_min] = np.int32(all_corners.min(axis=0).ravel() - 0.5)
[x_max, y_max] = np.int32(all_corners.max(axis=0).ravel() + 0.5)
# 计算平移变换矩阵
translation_dist = [-x_min, -y_min]
H_translation = np.array([[1, 0, translation_dist[0]],
[0, 1, translation_dist[1]],
[0, 0, 1]])
# 应用透视变换和平移
warped_img1 = cv2.warpPerspective(img1, H_translation.dot(H_inv),
(x_max - x_min, y_max - y_min))
# 创建结果图像
result = np.zeros_like(warped_img1)
result[:] = warped_img1[:]
# 将第二张图像拼接到右侧
y_start = translation_dist[1]
y_end = translation_dist[1] + h2
x_start = translation_dist[0]
x_end = translation_dist[0] + w2
# 确保不越界
y_start = max(0, y_start)
x_start = max(0, x_start)
y_end = min(result.shape[0], y_end)
x_end = min(result.shape[1], x_end)
if y_end > y_start and x_end > x_start:
result[y_start:y_end, x_start:x_end] = img2[0:y_end-y_start, 0:x_end-x_start]
else: # direction == 'right'
# 将img1拼接到img2的右侧
# 计算变换后的图像尺寸
corners1 = np.float32([[0, 0], [0, h1], [w1, h1], [w1, 0]]).reshape(-1, 1, 2)
transformed_corners = cv2.perspectiveTransform(corners1, H)
all_corners = np.concatenate((np.float32([[0, 0], [0, h2], [w2, h2], [w2, 0]]).reshape(-1, 1, 2),
transformed_corners), axis=0)
[x_min, y_min] = np.int32(all_corners.min(axis=0).ravel() - 0.5)
[x_max, y_max] = np.int32(all_corners.max(axis=0).ravel() + 0.5)
# 计算平移变换矩阵
translation_dist = [-x_min, -y_min]
H_translation = np.array([[1, 0, translation_dist[0]],
[0, 1, translation_dist[1]],
[0, 0, 1]])
# 应用透视变换和平移
warped_img1 = cv2.warpPerspective(img1, H_translation.dot(H),
(x_max - x_min, y_max - y_min))
# 创建结果图像
result = np.zeros_like(warped_img1)
# 先放置第二张图像(基准图像)
y_start = translation_dist[1]
y_end = translation_dist[1] + h2
x_start = translation_dist[0]
x_end = translation_dist[0] + w2
# 确保不越界
y_start = max(0, y_start)
x_start = max(0, x_start)
y_end = min(result.shape[0], y_end)
x_end = min(result.shape[1], x_end)
if y_end > y_start and x_end > x_start:
result[y_start:y_end, x_start:x_end] = img2[0:y_end-y_start, 0:x_end-x_start]
# 然后叠加变换后的第一张图像
result = blend_images_simple(result, warped_img1)
return result
except Exception as e:
print(f"拼接过程中出错: {str(e)}")
return img2
def blend_images_simple(base_img, overlay_img):
"""
简单的图像融合
:param base_img: 基础图像
:param overlay_img: 叠加图像
:return: 融合后的图像
"""
# 确保两张图像尺寸相同
if base_img.shape != overlay_img.shape:
h, w = base_img.shape[:2]
overlay_img = cv2.resize(overlay_img, (w, h))
# 创建掩码:叠加图像中非黑色的区域
gray_overlay = cv2.cvtColor(overlay_img, cv2.COLOR_BGR2GRAY)
mask = gray_overlay > 10
# 创建结果图像
result = base_img.copy()
# 只在叠加图像有内容的地方进行融合
for c in range(3): # 对每个颜色通道
result_channel = result[:, :, c]
overlay_channel = overlay_img[:, :, c]
result_channel[mask] = overlay_channel[mask]
return result
def stitch_images_optimized(images, target_width=1536, target_height=2048):
"""
优化的多图像拼接算法,专门处理曲率图像
:param images: 预处理后的图像列表
:param target_width: 目标宽度
:param target_height: 目标高度
:return: 拼接后的图像
"""
if len(images) < 2:
raise ValueError("至少需要两张图像进行拼接")
print("开始图像拼接...")
# 如果只有两张图像,直接拼接
if len(images) == 2:
print("直接拼接两张图像...")
result = stitch_two_images_left_right(images[0], images[1], 'right')
return resize_to_target(result, target_width, target_height)
# 从中间图像开始拼接
mid_index = len(images) // 2
base_img = images[mid_index]
print(f"以第 {mid_index+1} 张图像为基准")
# 向左拼接
left_result = base_img
for i in range(mid_index-1, -1, -1):
print(f"向左拼接第 {i+1} 张图像...")
left_result = stitch_two_images_left_right(images[i], left_result, 'left')
if left_result is None:
print("向左拼接失败,使用基准图像")
left_result = base_img
# 向右拼接
right_result = base_img
for i in range(mid_index+1, len(images)):
print(f"向右拼接第 {i+1} 张图像...")
right_result = stitch_two_images_left_right(images[i], right_result, 'right')
if right_result is None:
print("向右拼接失败,使用基准图像")
right_result = base_img
# 合并左右两部分
print("合并左右部分...")
try:
# 简单的左右合并:将右部分拼接到左部分的右侧
h_left, w_left = left_result.shape[:2]
h_right, w_right = right_result.shape[:2]
# 计算总宽度(减去重叠部分估计值)
overlap_estimate = w_left // 3 # 估计重叠部分为左图宽度的1/3
total_width = w_left + w_right - overlap_estimate
total_height = max(h_left, h_right)
# 创建结果画布
result = np.zeros((total_height, total_width, 3), dtype=np.uint8)
# 放置左侧部分
result[0:h_left, 0:w_left] = left_result
# 放置右侧部分(避免重叠)
right_start_x = w_left - overlap_estimate
result[0:h_right, right_start_x:right_start_x + w_right] = right_result
final_result = result
except Exception as e:
print(f"合并左右部分失败: {str(e)}")
# 如果合并失败,返回较大的那个结果
if left_result.shape[1] > right_result.shape[1]:
final_result = left_result
else:
final_result = right_result
# 最终尺寸调整
final_result = resize_to_target(final_result, target_width, target_height)
return final_result
def resize_to_target(img, target_width=1536, target_height=2048):
"""
调整图像到目标尺寸
:param img: 输入图像
:param target_width: 目标宽度
:param target_height: 目标高度
:return: 调整后的图像
"""
if img is None:
return None
h, w = img.shape[:2]
# 如果图像尺寸已经接近目标尺寸,直接返回
if abs(w - target_width) < 100 and abs(h - target_height) < 100:
return img
# 计算缩放比例
scale_x = target_width / w
scale_y = target_height / h
scale = min(scale_x, scale_y)
# 计算新尺寸
new_width = int(w * scale)
new_height = int(h * scale)
# 调整尺寸
resized = cv2.resize(img, (new_width, new_height))
# 创建目标尺寸的画布
result = np.zeros((target_height, target_width, 3), dtype=np.uint8)
# 计算居中位置
y_start = (target_height - new_height) // 2
x_start = (target_width - new_width) // 2
# 将图像放置在画布中央
result[y_start:y_start+new_height, x_start:x_start+new_width] = resized
return result
def post_process(result):
"""
后处理:增强图像质量
:param result: 拼接结果
:return: 处理后的图像
"""
if result is None:
return None
# 1. 直方图均衡化(分别对每个通道)
b, g, r = cv2.split(result)
b_eq = cv2.equalizeHist(b)
g_eq = cv2.equalizeHist(g)
r_eq = cv2.equalizeHist(r)
result_eq = cv2.merge([b_eq, g_eq, r_eq])
# 2. 锐化处理
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
sharpened = cv2.filter2D(result_eq, -1, kernel)
# 3. 去除黑边
gray = cv2.cvtColor(sharpened, cv2.COLOR_BGR2GRAY)
coords = np.column_stack(np.where(gray > 10))
if len(coords) > 0:
y_min, x_min = coords.min(axis=0)
y_max, x_max = coords.max(axis=0)
cropped = sharpened[y_min:y_max+1, x_min:x_max+1]
return cropped
return sharpened
def main():
"""
主程序
"""
# 图像文件列表(修改为你的图像路径)
image_paths = ['06.jpeg', '07.jpeg', '08.jpeg'] # 修改为你的图像路径
# 读取图像
images = []
for i, path in enumerate(image_paths):
img = cv2.imread(path)
if img is None:
print(f"错误:无法读取图像文件 {path}")
continue
images.append(img)
print(f"成功加载图像 {i+1}: {path} - 尺寸: {img.shape[1]}x{img.shape[0]}")
if len(images) < 2:
print("错误:至少需要两张有效图像")
return
# 目标尺寸
TARGET_WIDTH = 1536
TARGET_HEIGHT = 2048
print(f"目标尺寸: {TARGET_WIDTH}x{TARGET_HEIGHT}")
print(f"开始处理 {len(images)} 张图像...")
try:
# 1. 预处理:曲率校正和尺寸调整
processed_images = preprocess_images(images, TARGET_WIDTH, TARGET_HEIGHT)
if len(processed_images) < 2:
print("预处理后有效图像不足")
return
# 2. 拼接图像
result = stitch_images_optimized(processed_images, TARGET_WIDTH, TARGET_HEIGHT)
if result is None:
print("拼接失败")
return
# 3. 后处理
result = post_process(result)
# 4. 最终尺寸调整
result = resize_to_target(result, TARGET_WIDTH, TARGET_HEIGHT)
# 显示结果
cv2.imshow('Curvature Corrected Panorama', result)
print("按任意键关闭窗口...")
cv2.waitKey(0)
cv2.destroyAllWindows()
# 保存结果
output_path = 'curvature_panorama.jpg'
cv2.imwrite(output_path, result)
print(f"拼接完成!最终尺寸: {result.shape[1]}x{result.shape[0]}")
print(f"结果已保存为 {output_path}")
except Exception as e:
print(f"处理过程中出现错误: {str(e)}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
main()