- 代码来源
https://blog.51cto.com/u_16099242/10447591
- 改进
增加了视频合并;标明视频中心,可以具体相差。
-
具体代码
导入必要的库
import numpy as np
import cv2是否合并视频
merge_video = True
合并视频时,水平或垂直
merge_horizontal = True
网格线间隔
grid_size = 100
设置平滑半径
SMOOTHING_RADIUS = 50
给画面画网格线
def video_grid(frame, width, height, direction, color):
if (not merge_video):
returnif (direction): for i in range(1, int(width/grid_size)+1): cv2.line(frame, (int(grid_size*i), int(0) ), (int(grid_size*i), int(height )), color, 2) else: for i in range(1, int(height/grid_size)+1): cv2.line(frame, (int(0 ), int(grid_size*i)), (int(width ), int(grid_size*i)), color, 2)def video_circle(frame, width, height, color):
cv2.circle(frame,
(int(width/2), int(height/2)),
5, color, 3)cv2.circle(frame, (int(width/2), int(height/2)), grid_size, color, 2) size = min(width, height) for i in range(1, int(size/grid_size)+1): cv2.circle(frame, (int(width/2), int(height/2)), grid_size, color, 2)标出匹配的关键点
def video_keypoints(frame, matches, keypoints, check_x, check_y):
for match in matches:
curr_point = keypoints[match.trainIdx].pt
cv2.circle(frame,
(int(curr_point[0]+check_x), int(curr_point[1]+check_y)),
3, (0, 255, 0), 2)文件名
def get_filename():
filename = 'stab'
if (merge_video):
if (merge_horizontal):
filename += '_h'
else:
filename += '_v'filename += '.mp4' return filename定义一个函数,用于对曲线进行移动平均滤波,以平滑曲线
def moving_average(curve, radius):
window_size = 2 * radius + 1 # 窗口大小
f = np.ones(window_size) / window_size # 创建一个平均滤波器
curve_pad = np.pad(curve, (radius, radius), 'edge') # 对曲线进行边缘填充
curve_smoothed = np.convolve(curve_pad, f, mode='same') # 应用卷积操作进行滤波
curve_smoothed = curve_smoothed[radius:-radius] # 去除填充的边缘
return curve_smoothed定义一个函数,用于平滑整个轨迹
def smooth_trajectory(trajectory):
smoothed_trajectory = np.copy(trajectory) # 复制轨迹数组
for i in range(3): # 对轨迹的每个维度进行平滑处理
smoothed_trajectory[:, i] = moving_average(trajectory[:, i], radius=SMOOTHING_RADIUS)
return smoothed_trajectory定义一个函数,用于修复由于变换导致的边界问题
def fix_border(frame):
s = frame.shape # 获取帧的尺寸
T = cv2.getRotationMatrix2D((s[1] / 2, s[0] / 2), 0, 1.04) # 创建一个旋转矩阵
frame = cv2.warpAffine(frame, T, (s[1], s[0])) # 应用旋转和缩放
return framedef video_stabilization(video_path):
# 打开视频文件
cap = cv2.VideoCapture(video_path)
# 检查视频是否成功打开
if not cap.isOpened():
print("Error opening video file")
exit()
# 获取视频的总帧数、宽、高和帧率
n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
# 设置视频输出格式
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
# 创建视频写入对象
out = cv2.VideoWriter(get_filename(), fourcc, fps, (2 * w, h))
# 读取视频的第一帧
_, prev = cap.read()
if prev is None:
print("Error reading video file")
cap.release()
exit()
# 将第一帧转换为灰度图
prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
# 初始化变换数组
transforms = np.zeros((n_frames - 1, 3), np.float32)# 遍历视频的每一帧,计算帧间变换 for i in range(n_frames - 2): prev_pts = cv2.goodFeaturesToTrack(prev_gray, maxCorners=200, qualityLevel=0.01, minDistance=30, blockSize=3) success, curr = cap.read() if not success: break curr_gray = cv2.cvtColor(curr, cv2.COLOR_BGR2GRAY) curr_pts, status, err = cv2.calcOpticalFlowPyrLK(prev_gray, curr_gray, prev_pts, None) # 筛选出成功跟踪的点 idx = np.where(status == 1)[0] prev_pts = prev_pts[idx] curr_pts = curr_pts[idx] # 如果跟踪的点太少,则使用单位矩阵 if prev_pts.shape[0] < 4: m = np.eye(2, 3, dtype=np.float32) else: m, _ = cv2.estimateAffinePartial2D(prev_pts, curr_pts) # 估计仿射变换矩阵 if m is None: m = np.eye(2, 3, dtype=np.float32) # 提取变换参数 dx = m[0, 2] dy = m[1, 2] # print('dx=', dx, 'dy=', dy) da = np.arctan2(m[1, 0], m[0, 0]) # 保存变换 transforms[i] = [dx, dy, da] prev_gray = curr_gray # print(f"Frame: {i}/{n_frames - 2} - Tracked points: {len(prev_pts)}") print(transforms) # 计算累积变换轨迹 trajectory = np.cumsum(transforms, axis=0) # 平滑变换轨迹 smoothed_trajectory = smooth_trajectory(trajectory) # 计算平滑轨迹与原始轨迹的差异 difference = smoothed_trajectory - trajectory # 更新变换数组 # 将原始变换与差异相结合,以获得平滑的变换 transforms_smooth = transforms + difference # 重置视频读取位置到第一帧 cap.set(cv2.CAP_PROP_POS_FRAMES, 0) # 遍历视频帧,应用平滑变换 for i in range(n_frames - 2): success, frame = cap.read() if not success: break video_circle(frame, w, h, (0, 0, 255)) # 获取平滑变换参数 dx = transforms_smooth[i, 0] dy = transforms_smooth[i, 1] da = transforms_smooth[i, 2] # 构造仿射变换矩阵 m = np.zeros((2, 3), np.float32) m[0, 0] = np.cos(da) m[0, 1] = -np.sin(da) m[1, 0] = np.sin(da) m[1, 1] = np.cos(da) m[0, 2] = dx m[1, 2] = dy # 应用变换到当前帧 frame_stabilized = cv2.warpAffine(frame, m, (w, h)) # 修复变换后的边界问题 frame_stabilized = fix_border(frame_stabilized) video_circle(frame_stabilized, w, h, (0, 255, 255)) # 将原始帧和平滑帧并排放置 if (merge_horizontal): frame_out = cv2.hconcat([frame, frame_stabilized]) else: frame_out = cv2.vconcat([frame, frame_stabilized]) # 如果输出帧的宽度超过1920,则进行缩放 # if frame_out.shape[1] > 1920: # frame_out = cv2.resize(frame_out, (frame_out.shape[1] // 2, frame_out.shape[0] // 2)) # 将处理后的帧写入输出视频 out.write(frame_out) # 释放视频读取和写入对象 cap.release() out.release() # 关闭所有OpenCV窗口 cv2.destroyAllWindows()调用视频去抖动函数
video_stabilization('../test-1920X1080.mp4')