python使用同一张图片的不同部分测试 LoFTR 匹配特征点

使用同一张图片的不同部分测试 LoFTR 匹配 的完整代码,用于验证模型是否正常工作。如果模型和代码正确,同一图片的裁剪区域应该能匹配到大量特征点:

https://drive.google.com/drive/folders/1DOcOPZb3-5cWxLqn256AhwUVjBPifhuf 下载权重

import cv2
import torch
import kornia as K
import matplotlib.pyplot as plt
from kornia.feature.loftr import LoFTR as LoFTR_model
import numpy as np
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 设置设备(优先用GPU)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"使用设备: {device}")

# 修改load_tensor_image函数,确保预处理一致
def load_tensor_image(img_path, resize=None, device="cuda"):
    """加载图像并转为tensor,可选调整尺寸"""
    img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
    if resize is not None:
        # 保持宽高比的resize
        h, w = img.shape[:2]
        scale = min(resize[0]/h, resize[1]/w)
        img = cv2.resize(img, (int(w*scale), int(h*scale)), 
                       interpolation=cv2.INTER_LINEAR)  # 使用线性插值
    img_tensor = K.image_to_tensor(img, False).float() / 255.0
    return img_tensor.to(device)

def run_loftr_matching(loftr, img0, img1, conf_thresh=0.1):
    """执行LoFTR匹配并过滤低置信度点"""
    with torch.no_grad():
        correspondences = loftr({"image0": img0, "image1": img1})
    
    mkpts0 = correspondences["keypoints0"].cpu().numpy()
    mkpts1 = correspondences["keypoints1"].cpu().numpy()
    conf = correspondences["confidence"].cpu().numpy()
    
    # 过滤低置信度匹配
    valid = conf > conf_thresh
    return mkpts0[valid], mkpts1[valid]
def load_loftr_model(ckpt_path, device="cuda"):
    """加载本地LoFTR模型"""
    model = LoFTR_model(pretrained=None)
    checkpoint = torch.load(ckpt_path, map_location=device)
    model.load_state_dict(checkpoint['state_dict'])
    model = model.to(device)
    model.eval()
    return model

def visualize_matches(img0, img1, mkpts0, mkpts1):
    """可视化匹配结果"""
    h0, w0 = img0.shape[:2]
    h1, w1 = img1.shape[:2]
    
    # 创建拼接图像
    composite = np.zeros((max(h0, h1), w0 + w1), dtype=np.uint8)
    composite[:h0, :w0] = img0
    composite[:h1, w0:w0+w1] = img1
    
    plt.figure(figsize=(16, 8))
    plt.imshow(composite, cmap='gray')
    
    # 绘制匹配线(最多100条)
    for i in range(min(100, len(mkpts0))):
        x0, y0 = mkpts0[i]
        x1, y1 = mkpts1[i]
        plt.plot([x0, x1 + w0], [y0, y1], 'y-', linewidth=1, alpha=0.3)
    
    # 绘制匹配点
    plt.scatter(mkpts0[:, 0], mkpts0[:, 1], c='cyan', s=10, label="裁剪区域特征点")
    plt.scatter(mkpts1[:, 0] + w0, mkpts1[:, 1], c='orange', s=10, label="原图匹配点")
    
    plt.legend()
    plt.title(f"自匹配测试 - 匹配点数量: {len(mkpts0)}", fontsize=14)
    plt.axis('off')
    plt.show()

# 1. 加载模型
ckpt_path = "./weights/indoor_ds_new.ckpt"
loftr = load_loftr_model(ckpt_path, device)

# 2. 加载大图
img_path = "./files/a.jpg"  # 替换为你的图片路径
img_large = load_tensor_image(img_path, resize=(1024, 1024), device=device)

# 方式一:从大图中裁剪一块区域作为小图(模拟待匹配图)
# h, w = img_large.shape[2], img_large.shape[3]
# 裁剪区域
# img_small = img_large[:, :, h//3:h//3*2, w//6:w//6*2].clone()
# 转为 NumPy 并保存
# img_small_np = K.tensor_to_image(img_small * 255.).astype(np.uint8)
# cv2.imwrite("./files/cropped_small.png", img_small_np)

# 方式二:需手动处理 resize,可能引入误差。若需精确匹配,必须确保:小图是大图的严格子集。使用 resize=None 或保持原始比例缩放
img_small_path = "./files/c.jpg"
img_small = load_tensor_image(img_small_path, resize=None, device=device)  # 注意 resize=None


# 4. 执行匹配
mkpts0, mkpts1 = run_loftr_matching(loftr, img_small, img_large, conf_thresh=0.1)
print(f"自匹配点数: {len(mkpts0)}")

# 5. 可视化
img0 = K.tensor_to_image(img_small * 255).astype(np.uint8)  # 转回numpy格式的小图
img1 = K.tensor_to_image(img_large * 255).astype(np.uint8)  # 转回numpy格式的大图
visualize_matches(img0, img1, mkpts0, mkpts1)

你可能感兴趣的:(python)