video
模块是 OpenCV 的视频分析核心,提供以下核心功能:
算法名称 | 特点 | 适用场景 | 核心参数 | 示例代码 |
---|---|---|---|---|
MOG2 |
混合高斯模型,自适应学习率 | 室内外场景(如监控视频) | history=200 , detectShadows=False |
back_sub = cv2.createBackgroundSubtractorMOG2() |
KNN |
K 近邻模型,内存效率高 | 低内存设备(如嵌入式系统) | history=100 , dist2Threshold=400.0 |
back_sub = cv2.createBackgroundSubtractorKNN() |
python
import cv2
# 初始化背景减除器
back_sub = cv2.createBackgroundSubtractorMOG2(
history=200, # 历史帧数
detectShadows=False # 关闭阴影检测(阴影会被误判为运动)
)
# 视频捕获
cap = cv2.VideoCapture("traffic.mp4")
while cap.isOpened():
ret, frame = cap.read()
if not ret: break
# 背景减除
fg_mask = back_sub.apply(frame)
# 后处理(形态学降噪)
fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_CLOSE, np.ones((5,5), np.uint8))
# 轮廓检测
contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# 绘制运动区域
for cnt in contours:
if cv2.contourArea(cnt) > 1000: # 过滤小区域
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(frame, (x,y), (x+w,y+h), (0, 255, 0), 2)
# 显示结果
cv2.imshow("Motion Detection", frame)
if cv2.waitKey(30) & 0xFF == 27: # ESC 退出
break
cap.release()
cv2.destroyAllWindows()
history
:历史帧数(建议 100-500,室外场景增大)。detectShadows
:关闭以减少误检(阴影常为灰色,可通过颜色过滤)。python
import cv2
import numpy as np
# 初始化视频捕获
cap = cv2.VideoCapture("walking.mp4")
# 读取第一帧并检测特征点
ret, prev_frame = cap.read()
prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
prev_pts = cv2.goodFeaturesToTrack(prev_gray, maxCorners=100, qualityLevel=0.3, minDistance=7)
# LK 光流参数
lk_params = dict(
winSize=(15, 15),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
)
while cap.isOpened():
ret, frame = cap.read()
if not ret: break
# 光流计算
curr_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
curr_pts, status, err = cv2.calcOpticalFlowPyrLK(
prev_gray, curr_gray, prev_pts, None, **lk_params
)
# 筛选成功跟踪的点
good_prev = prev_pts[status == 1]
good_curr = curr_pts[status == 1]
# 绘制轨迹
for (x1, y1), (x2, y2) in zip(good_prev, good_curr):
cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
cv2.circle(frame, (int(x2), int(y2)), 5, (0, 255, 0), -1)
# 更新帧
prev_gray = curr_gray.copy()
prev_pts = good_curr.reshape(-1, 1, 2)
# 显示结果
cv2.imshow("Optical Flow", frame)
if cv2.waitKey(30) & 0xFF == 27:
break
cap.release()
cv2.destroyAllWindows()
calib3d
模块)。跟踪器名称 | 特点 | 适用场景 | 初始化方式 |
---|---|---|---|
KCF (Kernelized Correlation Filters) |
精度高,速度快(30-60 FPS) | 遮挡鲁棒性(如行人跟踪) | tracker = cv2.TrackerKCF_create() |
MOSSE (Minimum Output Sum of Squared Error) |
极快(100+ FPS),光照敏感 | 实时监控(如交通监控) | tracker = cv2.TrackerMOSSE_create() |
CSRT (Discriminative Correlation Filters with Channel and Spatial Reliability) |
精度最高,速度中等(15-30 FPS) | 复杂场景(如多目标跟踪) | tracker = cv2.TrackerCSRT_create() |
python
import cv2
# 初始化视频捕获
cap = cv2.VideoCapture("multiple_objects.mp4")
trackers = [] # 存储跟踪器
# 选择初始 ROI(按 's' 开始跟踪)
while True:
ret, frame = cap.read()
if not ret: break
cv2.imshow("Select ROI", frame)
key = cv2.waitKey(1)
if key == ord('s'): # 按 's' 选择 ROI
roi = cv2.selectROI("Select ROI", frame, fromCenter=False, showCrosshair=True)
tracker = cv2.TrackerKCF_create()
tracker.init(frame, roi)
trackers.append(tracker)
elif key == 27: # ESC 开始跟踪
break
# 跟踪主循环
while cap.isOpened():
ret, frame = cap.read()
if not ret: break
for tracker in trackers:
success, roi = tracker.update(frame)
if success:
x, y, w, h = map(int, roi)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow("Multi-object Tracking", frame)
if cv2.waitKey(1) & 0xFF == 27:
break
cap.release()
cv2.destroyAllWindows()
python
import cv2
import numpy as np
# 初始化背景减除与跟踪器
back_sub = cv2.createBackgroundSubtractorMOG2(history=100)
trackers = []
# 视频捕获
cap = cv2.VideoCapture("surveillance.mp4")
while cap.isOpened():
ret, frame = cap.read()
if not ret: break
# 1. 运动检测
fg_mask = back_sub.apply(frame)
fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_CLOSE, np.ones((5,5), np.uint8))
# 2. 检测新目标(轮廓检测)
contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
if cv2.contourArea(cnt) > 1000: # 新目标检测
x, y, w, h = cv2.boundingRect(cnt)
tracker = cv2.TrackerKCF_create()
tracker.init(frame, (x, y, w, h))
trackers.append(tracker)
# 3. 多目标跟踪
for tracker in trackers[:]: # 复制列表避免迭代时修改
success, roi = tracker.update(frame)
if success:
x, y, w, h = map(int, roi)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
else:
trackers.remove(tracker) # 跟踪失败移除
# 显示结果
cv2.imshow("Surveillance", frame)
if cv2.waitKey(1) & 0xFF == 27:
break
cap.release()
cv2.destroyAllWindows()
python
import cv2
import numpy as np
# 初始化视频捕获
cap = cv2.VideoCapture(0)
# 肤色检测范围(HSV)
lower_skin = np.array([0, 20, 70])
upper_skin = np.array([30, 255, 255])
# 初始化光流
prev_gray = None
prev_pts = None
while cap.isOpened():
ret, frame = cap.read()
if not ret: break
# 1. 肤色分割
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_skin, upper_skin)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, np.ones((3,3), np.uint8))
# 2. 检测特征点(仅在掩码区域)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.bitwise_and(gray, gray, mask=mask)
curr_pts = cv2.goodFeaturesToTrack(gray, maxCorners=20, qualityLevel=0.3, minDistance=7)
# 3. 光流跟踪
if prev_pts is not None and curr_pts is not None:
curr_pts, status, _ = cv2.calcOpticalFlowPyrLK(prev_gray, gray, prev_pts, None)
good_prev = prev_pts[status == 1]
good_curr = curr_pts[status == 1]
# 绘制轨迹
for (x1, y1), (x2, y2) in zip(good_prev, good_curr):
cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
# 更新帧
prev_gray = gray.copy()
prev_pts = curr_pts
# 显示结果
cv2.imshow("Gesture Recognition", frame)
if cv2.waitKey(1) & 0xFF == 27:
break
cap.release()
cv2.destroyAllWindows()
back_sub.setHistory(0)
适应快速光照变化(history=0
表示自适应)。maxLevel=3
(平衡速度与精度)。update()
返回的 success
标志移除失效跟踪器。threading.Thread
)。任务类型 | 推荐算法 | 典型参数 | 帧率(FPS) | 内存占用 |
---|---|---|---|---|
运动检测 | MOG2 背景减除 | history=200 , detectShadows=False |
25-30 | 中 |
单目标跟踪 | KCF 跟踪器 | 初始化 ROI | 30-50 | 低 |
多目标跟踪 | CSRT 跟踪器(多实例) | 逐个初始化 ROI | 15-25 | 高 |
运动轨迹分析 | LK 光流 + 特征点检测 | maxCorners=100 |
20-25 | 低 |
python
# 初始化卡尔曼滤波器
kalman = cv2.KalmanFilter(4, 2) # 4 状态(x,y,vx,vy),2 测量(x,y)
kalman.measurementMatrix = np.array([[1,0,0,0],[0,1,0,0]], np.float32)
kalman.transitionMatrix = np.array([[1,0,1,0],[0,1,0,1],[0,0,1,0],[0,0,0,1]], np.float32)
# 预测与更新
predicted = kalman.predict()
measurement = np.array([[x],[y]], np.float32)
kalman.correct(measurement)
python
# 统计运动区域面积变化
areas = []
for cnt in contours:
area = cv2.contourArea(cnt)
areas.append(area)
if len(areas) > 5 and np.std(areas[-5:]) > 1000: # 面积波动大(异常)
cv2.putText(frame, "Anomaly Detected!", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
python
# 计算全局运动向量
prev_pts = cv2.goodFeaturesToTrack(prev_gray, 100, 0.3, 7)
curr_pts, status, _ = cv2.calcOpticalFlowPyrLK(prev_gray, curr_gray, prev_pts, None)
dx = np.mean(curr_pts[:,0] - prev_pts[:,0]) # 全局平移量
dy = np.mean(curr_pts[:,1] - prev_pts[:,1])
# 补偿运动
M = np.float32([[1, 0, -dx], [0, 1, -dy]])
stable_frame = cv2.warpAffine(frame, M, (frame.shape[1], frame.shape[0]))
plaintext
Video 模块学习路径:
基础 → 背景减除 → 光流法 → 目标跟踪 → 高级应用(轨迹预测、异常检测)
推荐项目:
1. 智能监控系统(运动检测 + 多目标跟踪)
2. 手势控制机器人(光流法轨迹识别)
3. 视频稳像器(光流法 + 姿态补偿)
性能优化:
- 背景减除:定期重置(`back_sub = createBackgroundSubtractorMOG2()`)适应场景变化
- 跟踪器:使用 `update()` 前检查 ROI 有效性(如在画面内)
- 光流法:限制特征点搜索范围(`roi` 参数)减少计算量
python
# Video 模块综合应用示例
import cv2
# 1. 背景减除(MOG2)
back_sub = cv2.createBackgroundSubtractorMOG2()
cap = cv2.VideoCapture("motion.mp4")
while cap.isOpened():
ret, frame = cap.read()
mask = back_sub.apply(frame)
cv2.imshow("MOG2", mask)
if cv2.waitKey(1) == 27: break
# 2. 光流法(LK 跟踪)
cap = cv2.VideoCapture("tracking.mp4")
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
pts = cv2.goodFeaturesToTrack(gray, 100, 0.3, 7)
while cap.isOpened():
ret, frame = cap.read()
gray_next = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
pts_next, status, _ = cv2.calcOpticalFlowPyrLK(gray, gray_next, pts, None)
for i in range(len(pts)):
if status[i]:
cv2.line(frame, tuple(pts[i][0]), tuple(pts_next[i][0]), (0,255,0), 2)
cv2.imshow("LK Flow", frame)
if cv2.waitKey(1) == 27: break
# 3. 目标跟踪(KCF)
cap = cv2.VideoCapture("car.mp4")
ret, frame = cap.read()
roi = cv2.selectROI("Track", frame)
tracker = cv2.TrackerKCF_create()
tracker.init(frame, roi)
while cap.isOpened():
ret, frame = cap.read()
success, roi = tracker.update(frame)
if success:
x, y, w, h = map(int, roi)
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), 2)
cv2.imshow("KCF Tracking", frame)
if cv2.waitKey(1) == 27: break
cap.release()
cv2.destroyAllWindows()
bash
# 安装 OpenCV(含 contrib 模块,支持跟踪器)
pip install opencv-contrib-python
# 验证安装
python -c "import cv2; print(cv2.__version__)" # 需 4.5+(contrib 模块)
通过此指南,开发者可全面掌握 Video 模块的核心功能,从运动检测到复杂跟踪算法,结合智能监控、手势识别等实战项目,快速构建视频分析解决方案。每个代码示例均可独立运行,方便在实际开发中复用和扩展。建议结合官方文档(Video 模块)和论文(如《Learning Visual Tracking》)进行深入学习。