图像融合模块
This commit is contained in:
parent
1cfc280f34
commit
ca275ba74b
243
image_fusion/Image_Registration_test.py
Normal file
243
image_fusion/Image_Registration_test.py
Normal file
@ -0,0 +1,243 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# @Time :
|
||||
# @Author :
|
||||
# @File : Image_Registration_test.py
|
||||
|
||||
import time
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
|
||||
def sift_registration(img1, img2):
|
||||
img1gray = cv2.normalize(img1, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX).astype(np.uint8)
|
||||
img2gray = img2
|
||||
|
||||
sift = cv2.SIFT_create()
|
||||
# find the keypoints and descriptors with SIFT
|
||||
kp1, des1 = sift.detectAndCompute(img1gray, None)
|
||||
kp2, des2 = sift.detectAndCompute(img2gray, None)
|
||||
# FLANN parameters
|
||||
FLANN_INDEX_KDTREE = 1
|
||||
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
|
||||
search_params = dict(checks=50)
|
||||
flann = cv2.FlannBasedMatcher(index_params, search_params)
|
||||
matches = flann.knnMatch(des1, des2, k=2)
|
||||
|
||||
good = []
|
||||
pts1 = []
|
||||
pts2 = []
|
||||
|
||||
for i, (m, n) in enumerate(matches):
|
||||
if m.distance < 0.75 * n.distance:
|
||||
good.append(m)
|
||||
pts2.append(kp2[m.trainIdx].pt)
|
||||
pts1.append(kp1[m.queryIdx].pt)
|
||||
|
||||
MIN_MATCH_COUNT = 4
|
||||
if len(good) > MIN_MATCH_COUNT:
|
||||
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
|
||||
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
|
||||
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
|
||||
else:
|
||||
print("Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT))
|
||||
M = np.array([[1, 0, 0],
|
||||
[0, 1, 0],
|
||||
[0, 0, 1]], dtype=np.float64)
|
||||
if M is None:
|
||||
M = np.array([[1, 0, 0],
|
||||
[0, 1, 0],
|
||||
[0, 0, 1]], dtype=np.float64)
|
||||
return 1, M[0], len(pts2)
|
||||
|
||||
|
||||
# 裁剪线性RGB对比度拉伸:(去掉2%百分位以下的数,去掉98%百分位以上的数,上下百分位数一般相同,并设置输出上下限)
|
||||
def truncated_linear_stretch(image, truncated_value=2, maxout=255, min_out=0):
|
||||
"""
|
||||
:param image:
|
||||
:param truncated_value:
|
||||
:param maxout:
|
||||
:param min_out:
|
||||
:return:
|
||||
"""
|
||||
|
||||
def gray_process(gray, maxout=maxout, minout=min_out):
|
||||
truncated_down = np.percentile(gray, truncated_value)
|
||||
truncated_up = np.percentile(gray, 100 - truncated_value)
|
||||
gray_new = ((maxout - minout) / (truncated_up - truncated_down)) * gray
|
||||
gray_new[gray_new < minout] = minout
|
||||
gray_new[gray_new > maxout] = maxout
|
||||
return np.uint8(gray_new)
|
||||
|
||||
(b, g, r) = cv2.split(image)
|
||||
b = gray_process(b)
|
||||
g = gray_process(g)
|
||||
r = gray_process(r)
|
||||
result = cv2.merge((b, g, r)) # 合并每一个通道
|
||||
return result
|
||||
|
||||
|
||||
# RGB图片配准函数,采用白天的可见光与红外灰度图,计算两者Surf共同特征点,之间的仿射矩阵。
|
||||
def Images_matching(img_base, img_target):
|
||||
"""
|
||||
:param img_base:
|
||||
:param img_target:匹配图像
|
||||
:return: 返回仿射矩阵
|
||||
"""
|
||||
start = time.time()
|
||||
orb = cv2.ORB_create()
|
||||
img_base = cv2.cvtColor(img_base, cv2.COLOR_BGR2GRAY)
|
||||
sift = cv2.SIFT_create()
|
||||
# 使用sift算子计算特征点和特征点周围的特征向量
|
||||
st1 = time.time()
|
||||
kp1, des1 = sift.detectAndCompute(img_base, None) # 1136 1136, 64
|
||||
kp2, des2 = sift.detectAndCompute(img_target, None)
|
||||
en1 = time.time()
|
||||
# print(en1 - st1, "特征提取")
|
||||
# 进行KNN特征匹配
|
||||
# FLANN_INDEX_KDTREE = 0 # 建立FLANN匹配器的参数
|
||||
# indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) # 配置索引,密度树的数量为5
|
||||
# searchParams = dict(checks=50) # 指定递归次数
|
||||
# flann = cv2.FlannBasedMatcher(indexParams, searchParams) # 建立匹配器
|
||||
# matches = flann.knnMatch(des1, des2, k=2) # 得出匹配的关键点 list: 1136
|
||||
# FLANN_INDEX_KDTREE = 1
|
||||
# index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
|
||||
# search_params = dict(checks=50)
|
||||
# flann = cv2.FlannBasedMatcher(index_params, search_params)
|
||||
# matches = flann.knnMatch(des1, des2, k=2)
|
||||
st2 = time.time()
|
||||
matcher = cv2.BFMatcher()
|
||||
matches = matcher.knnMatch(des1, des2, k=2)
|
||||
de2 = time.time()
|
||||
# print(de2 - st2, "特征匹配")
|
||||
good = []
|
||||
# 提取优秀的特征点
|
||||
for m, n in matches:
|
||||
if m.distance < 0.75 * n.distance: # 如果第一个邻近距离比第二个邻近距离的0.7倍小,则保留
|
||||
good.append(m) # 134
|
||||
src_pts = np.array([kp1[m.queryIdx].pt for m in good]) # 查询图像的特征描述子索引 # 134, 2
|
||||
dst_pts = np.array([kp2[m.trainIdx].pt for m in good]) # 训练(模板)图像的特征描述子索引
|
||||
if len(src_pts) <= 4:
|
||||
return 0, None, 0
|
||||
else:
|
||||
# print(len(dst_pts), len(src_pts), "配准坐标点")
|
||||
H = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 4) # 生成变换矩阵 H[0]: 3, 3 H[1]: 134, 1
|
||||
end = time.time()
|
||||
times = end - start
|
||||
# print("配准时间", times)
|
||||
return 1, H[0], len(dst_pts)
|
||||
|
||||
|
||||
#
|
||||
|
||||
def fusions(img_vl, img_inf):
|
||||
"""
|
||||
:param img_vl: 原图像
|
||||
:param img_inf: 红外图像
|
||||
:return:
|
||||
"""
|
||||
img_YUV = cv2.cvtColor(img_vl, cv2.COLOR_RGB2YUV)
|
||||
y, u, v = cv2.split(img_YUV) # 分离通道,获取Y通道
|
||||
Yf = y * 0.5 + img_inf * 0.5
|
||||
Yf = Yf.astype(np.uint8)
|
||||
fusion = cv2.cvtColor(cv2.merge((Yf, u, v)), cv2.COLOR_YUV2RGB)
|
||||
return fusion
|
||||
|
||||
|
||||
def removeBlackBorder(gray):
|
||||
"""
|
||||
移除缝合后的图像的多余黑边
|
||||
输入:
|
||||
image:三维numpy矩阵,待处理图像
|
||||
输出:
|
||||
裁剪后的图像
|
||||
"""
|
||||
threshold = 40 # 阈值
|
||||
nrow = gray.shape[0] # 获取图片尺寸
|
||||
ncol = gray.shape[1]
|
||||
rowc = gray[:, int(1 / 2 * nrow)] # 无法区分黑色区域超过一半的情况
|
||||
colc = gray[int(1 / 2 * ncol), :]
|
||||
rowflag = np.argwhere(rowc > threshold)
|
||||
colflag = np.argwhere(colc > threshold)
|
||||
left, bottom, right, top = rowflag[0, 0], colflag[-1, 0], rowflag[-1, 0], colflag[0, 0]
|
||||
# cv2.imshow('name', gray[left:right, top:bottom]) # 效果展示
|
||||
cv2.waitKey(1)
|
||||
return gray[left:right, top:bottom], left, right, top, bottom
|
||||
|
||||
|
||||
def main(matchimg_vi, matchimg_in):
|
||||
"""
|
||||
:param matchimg_vi: 可见光图像
|
||||
:param matchimg_in: 红外图像
|
||||
:return: 融合好的图像
|
||||
"""
|
||||
orimg_vi = matchimg_vi
|
||||
orimg_in = matchimg_in
|
||||
h, w = orimg_vi.shape[:2] # 480 640
|
||||
flag, H, dot = Images_matching(matchimg_vi, matchimg_in) # (3, 3)//获取对应的配准坐标点
|
||||
if flag == 0:
|
||||
return 0, 0, 0
|
||||
else:
|
||||
|
||||
matched_ni = cv2.warpPerspective(orimg_in, H, (w, h))
|
||||
# matched_ni,left,right,top,bottom=removeBlackBorder(matched_ni)
|
||||
# fusion = fusions(orimg_vi[left:right, top:bottom], matched_ni)
|
||||
fusion = fusions(orimg_vi, matched_ni)
|
||||
# print(fusion.shape)
|
||||
return 1, fusion, dot
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
time_all = 0
|
||||
dots = 0
|
||||
i = 0
|
||||
fourcc = cv2.VideoWriter_fourcc(*'XVID')
|
||||
capture = cv2.VideoCapture("video/20190926_141816_1_8/20190926_141816_1_8/infrared.mp4")
|
||||
capture2 = cv2.VideoCapture("video/20190926_141816_1_8/20190926_141816_1_8/visible.mp4")
|
||||
fps = capture.get(cv2.CAP_PROP_FPS)
|
||||
out = cv2.VideoWriter('output2.mp4', fourcc, fps, (640, 480))
|
||||
# 持续读取摄像头数据
|
||||
while True:
|
||||
# 读取
|
||||
start = time.time()
|
||||
read_code, frame = capture.read()
|
||||
read_code2, frame2 = capture2.read()
|
||||
if not read_code:
|
||||
break
|
||||
i += 1
|
||||
# frame = cv2.resize(frame, (1920, 1080))
|
||||
# frame2 = cv2.resize(frame2, (640, 512))
|
||||
|
||||
# frame =frame[80:512, 0:640]
|
||||
# frame2=frame2[200:1080, 0:1920]
|
||||
cv2.imshow("color", frame2)
|
||||
cv2.imshow("gray", frame)
|
||||
# 按 'q' 键退出循环
|
||||
if cv2.waitKey(25) & 0xFF == ord('q'):
|
||||
break
|
||||
h1, w1 = frame.shape[:2]
|
||||
h2, w2 = frame2.shape[:2]
|
||||
print(h1, w1, h2, w2)
|
||||
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
flag, fusion, dot = main(frame2, frame_gray)
|
||||
if flag == 0:
|
||||
continue
|
||||
end = time.time()
|
||||
# print(dot)
|
||||
dots += dot
|
||||
cv2.imshow("fusion", fusion)
|
||||
# cv2.imshow("color", frame2)
|
||||
# cv2.imshow("gray", frame)
|
||||
out.write(fusion)
|
||||
use_time = end - start
|
||||
time_all += use_time
|
||||
if cv2.waitKey(1) == ord('q'):
|
||||
break
|
||||
# 释放资源
|
||||
capture.release()
|
||||
capture2.release()
|
||||
cv2.destroyAllWindows()
|
||||
ave = time_all / i
|
||||
print(ave, "平均时间")
|
||||
cv2.destroyAllWindows()
|
147
image_fusion/Img_Registration.py
Normal file
147
image_fusion/Img_Registration.py
Normal file
@ -0,0 +1,147 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# @Time :
|
||||
# @Author :
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
sift = cv2.SIFT_create()
|
||||
|
||||
|
||||
def compuerSift2GetPts(img1, img2):
|
||||
# sift 查找关键点,关键点 And 描述
|
||||
kp1, des1 = sift.detectAndCompute(img1, None)
|
||||
kp2, des2 = sift.detectAndCompute(img2, None)
|
||||
|
||||
matcher = cv2.BFMatcher()
|
||||
raw_matches = matcher.knnMatch(des1, des2, k=2)
|
||||
good_matches = []
|
||||
ratio = 0.75
|
||||
for m1, m2 in raw_matches:
|
||||
# 如果最接近和次接近的比值大于一个既定的值,那么我们保留这个最接近的值,认为它和其匹配的点为good_match
|
||||
if m1.distance < ratio * m2.distance:
|
||||
good_matches.append([m1])
|
||||
matches = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good_matches, None, flags=2)
|
||||
ptsA = np.float32([kp1[m[0].queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
|
||||
ptsB = np.float32([kp2[m[0].trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
|
||||
|
||||
ransacReprojThreshold = 4
|
||||
# 单应性矩阵可以将一张图通过旋转、变换等方式与另一张图对齐
|
||||
# print(len(ptsA), len(ptsB))
|
||||
if len(ptsA) == 0: return ptsA, ptsB, 0
|
||||
H, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, ransacReprojThreshold)
|
||||
cv2.imshow("matcher", matches)
|
||||
cv2.waitKey(100)
|
||||
|
||||
return ptsA, ptsB, 1
|
||||
|
||||
|
||||
def findBestDistanceAndPts(ptsA, ptsB):
|
||||
x_dct = {}
|
||||
y_dct = {}
|
||||
best_x, best_y = int(ptsA[0][0][0] - ptsB[0][0][0]), int(ptsA[0][0][1] - ptsB[0][0][1])
|
||||
x_cnt, y_cnt = 0, 0
|
||||
for i in range(len(ptsA)):
|
||||
# print(ptsA[i], ' ', ptsB[i])
|
||||
x_dis = int(ptsA[i][0][0] - ptsB[i][0][0])
|
||||
y_dis = int(ptsA[i][0][1] - ptsB[i][0][1])
|
||||
# print(x_dis)
|
||||
if x_dis in x_dct:
|
||||
x_dct.update({x_dis: int(x_dct.get(x_dis) + 1)})
|
||||
if x_dct.get(x_dis) > x_cnt:
|
||||
best_x = x_dis
|
||||
x_cnt = x_dct.get(x_dis)
|
||||
# print(x_dct.get(x_dis))
|
||||
else:
|
||||
x_dct.update({x_dis: 1})
|
||||
# print(x_dct.get(x_dis))
|
||||
# print(y_dis)
|
||||
if y_dis in y_dct:
|
||||
y_dct.update({y_dis: int(y_dct.get(y_dis) + 1)})
|
||||
if y_dct.get(y_dis) > y_cnt:
|
||||
best_y = y_dis
|
||||
y_cnt = y_dct.get(y_dis)
|
||||
# print(y_dct.get(y_dis))
|
||||
else:
|
||||
y_dct.update({y_dis: 1})
|
||||
# print(y_dct.get(y_dis))
|
||||
print(best_x, best_y)
|
||||
|
||||
pt = []
|
||||
ptb = []
|
||||
for i in range(len(ptsA)):
|
||||
x_dis = int(ptsA[i][0][0] - ptsB[i][0][0])
|
||||
y_dis = int(ptsA[i][0][1] - ptsB[i][0][1])
|
||||
if abs(best_x - x_dis) <= 0:
|
||||
pt.append([ptsA[i][0][0], ptsA[i][0][1]])
|
||||
# print(pt)
|
||||
return pt, best_x, best_y
|
||||
|
||||
|
||||
def minDistanceHasXy(ptsA, ptsB):
|
||||
dct = {}
|
||||
cnt = 0
|
||||
best = 's'
|
||||
for i in range(len(ptsA)):
|
||||
disx = int(ptsA[i][0][0] - ptsB[i][0][0] + 0.5)
|
||||
disy = int(ptsA[i][0][1] - ptsB[i][0][1] + 0.5)
|
||||
s = str(disx) + ',' + str(disy)
|
||||
# print(s)
|
||||
if s in dct:
|
||||
dct.updata({s: int(dct.get(s) + 1)})
|
||||
if dct.get(s) >= cnt:
|
||||
cnt = dct.get(s)
|
||||
best = s
|
||||
print(s)
|
||||
else:
|
||||
dct.update({s: int(1)})
|
||||
for i, j in dct.items():
|
||||
print(i, j)
|
||||
print(best)
|
||||
|
||||
|
||||
def detectImg(img1, img2, pta, best_x, best_y):
|
||||
# print(pta)
|
||||
min_x = int(min(x[0] for x in pta))
|
||||
max_x = int(max(x[0] for x in pta))
|
||||
min_y = int(min(x[1] for x in pta))
|
||||
max_y = int(max(x[1] for x in pta))
|
||||
# print(min_x, max_x)
|
||||
# print(min_x - best_x, max_x - best_x)
|
||||
# print(min_y, max_y)
|
||||
# print(min_y - best_y, max_y - best_y)
|
||||
newimg1 = img1[min_y: max_y, min_x: max_x]
|
||||
newimg2 = img2[min_y - best_y: max_y - best_y, min_x - best_x: max_x - best_x]
|
||||
# cv2.imshow("newimg1", newimg1)
|
||||
# cv2.imshow("newimg2", newimg2)
|
||||
# cv2.waitKey(0)
|
||||
return newimg1, newimg2
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
j = 0
|
||||
for i in range(20, 4771, 1):
|
||||
print(i)
|
||||
path1 = './data/907dat/gray/camera1-' + str(i) + '.png'
|
||||
path2 = './data/907dat/color/camera0-' + str(i) + '.png'
|
||||
img1 = cv2.imread(path1)
|
||||
img2 = cv2.imread(path2)
|
||||
if (img1 is None or img2 is None): continue
|
||||
PtsA, PtsB, f = compuerSift2GetPts(img1, img2)
|
||||
if (f == 0): continue
|
||||
pt, best_x, best_y = findBestDistanceAndPts(PtsA, PtsB)
|
||||
newimg1, newimg2 = detectImg(img1, img2, pt, best_x, best_y)
|
||||
if newimg1.shape[0] < 10 or newimg1.shape[1] < 10: continue
|
||||
print(newimg1.shape, newimg2.shape)
|
||||
# newimg1 = cv2.resize(newimg1, (320, 240))
|
||||
# newimg2 = cv2.resize(newimg2, (320, 240))
|
||||
wirtePath1 = './result/dat_result_2/gray/camera1-' + str(j) + '.png'
|
||||
wirtePath2 = './result/dat_result_2/color/camera0-' + str(j) + '.png'
|
||||
if newimg1.shape[0] > 255 and newimg1.shape[1] > 255 and newimg1.shape == newimg2.shape:
|
||||
# cv2.imwrite(wirtePath1, newimg1)
|
||||
# cv2.imwrite(wirtePath2, newimg2)
|
||||
j += 1
|
||||
cv2.imshow("newimg1", newimg1)
|
||||
cv2.imshow("newimg2", newimg2)
|
||||
cv2.waitKey()
|
||||
print(j)
|
||||
pass
|
0
image_fusion/__init__.py
Normal file
0
image_fusion/__init__.py
Normal file
Loading…
Reference in New Issue
Block a user