diff --git a/image_fusion/Image_Registration_test.py b/image_fusion/Image_Registration_test.py index e5e5b71..7161906 100644 --- a/image_fusion/Image_Registration_test.py +++ b/image_fusion/Image_Registration_test.py @@ -49,6 +49,10 @@ def Images_matching(img_base, img_target): """ start = time.time() orb = cv2.ORB_create() + + # 对可见光图像进行对比度拉伸 + # img_base = truncated_linear_stretch(img_base) + img_base = cv2.cvtColor(img_base, cv2.COLOR_BGR2GRAY) sift = cv2.SIFT_create() # 使用sift算子计算特征点和特征点周围的特征向量 @@ -56,7 +60,9 @@ def Images_matching(img_base, img_target): kp1, des1 = sift.detectAndCompute(img_base, None) # 1136 1136, 64 kp2, des2 = sift.detectAndCompute(img_target, None) en1 = time.time() + # print(en1 - st1, "特征提取") + # 进行KNN特征匹配 # FLANN_INDEX_KDTREE = 0 # 建立FLANN匹配器的参数 # indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) # 配置索引,密度树的数量为5 @@ -68,6 +74,7 @@ def Images_matching(img_base, img_target): # search_params = dict(checks=50) # flann = cv2.FlannBasedMatcher(index_params, search_params) # matches = flann.knnMatch(des1, des2, k=2) + st2 = time.time() matcher = cv2.BFMatcher() matches = matcher.knnMatch(des1, des2, k=2) @@ -143,7 +150,7 @@ def main(matchimg_vi, matchimg_in): return 0, None, 0 else: matched_ni = cv2.warpPerspective(orimg_in, H, (w, h)) - # matched_ni,left,right,top,bottom=removeBlackBorder(matched_ni) + matched_ni,left,right,top,bottom=removeBlackBorder(matched_ni) # fusion = fusions(orimg_vi[left:right, top:bottom], matched_ni) fusion = fusions(orimg_vi, matched_ni)