集成YOLOv8

This commit is contained in:
myh 2025-04-18 22:51:46 +08:00
parent ca275ba74b
commit 65ee0565c2

View File

@ -9,6 +9,12 @@ import time
import cv2 import cv2
import numpy as np import numpy as np
from ultralytics import YOLO
# 添加YOLOv8模型初始化
yolo_model = YOLO("yolov8n.pt") # 可替换为yolov8s/m/l等
yolo_model.to('cuda') # 启用GPU加速可选
def sift_registration(img1, img2): def sift_registration(img1, img2):
img1gray = cv2.normalize(img1, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX).astype(np.uint8) img1gray = cv2.normalize(img1, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX).astype(np.uint8)
@ -129,15 +135,14 @@ def Images_matching(img_base, img_target):
return 1, H[0], len(dst_pts) return 1, H[0], len(dst_pts)
#
def fusions(img_vl, img_inf): def fusions(img_vl, img_inf):
""" """
:param img_vl: 原图像 :param img_vl: 原图像
:param img_inf: 红外图像 :param img_inf: 红外图像
:return: :return:
""" """
img_YUV = cv2.cvtColor(img_vl, cv2.COLOR_RGB2YUV) img_YUV = cv2.cvtColor(img_vl, cv2.COLOR_BGR2YUV) # 如果输入是BGR需转换
# img_YUV = cv2.cvtColor(img_vl, cv2.COLOR_RGB2YUV)
y, u, v = cv2.split(img_YUV) # 分离通道,获取Y通道 y, u, v = cv2.split(img_YUV) # 分离通道,获取Y通道
Yf = y * 0.5 + img_inf * 0.5 Yf = y * 0.5 + img_inf * 0.5
Yf = Yf.astype(np.uint8) Yf = Yf.astype(np.uint8)
@ -170,22 +175,29 @@ def main(matchimg_vi, matchimg_in):
""" """
:param matchimg_vi: 可见光图像 :param matchimg_vi: 可见光图像
:param matchimg_in: 红外图像 :param matchimg_in: 红外图像
:return: 融合好的图像 :return: 融合好的图像带检测结果
""" """
try:
orimg_vi = matchimg_vi orimg_vi = matchimg_vi
orimg_in = matchimg_in orimg_in = matchimg_in
h, w = orimg_vi.shape[:2] # 480 640 h, w = orimg_vi.shape[:2] # 480 640
flag, H, dot = Images_matching(matchimg_vi, matchimg_in) # (3, 3)//获取对应的配准坐标点 flag, H, dot = Images_matching(matchimg_vi, matchimg_in) # (3, 3)//获取对应的配准坐标点
if flag == 0: if flag == 0:
return 0, 0, 0 return 0, None, 0
else: else:
matched_ni = cv2.warpPerspective(orimg_in, H, (w, h)) matched_ni = cv2.warpPerspective(orimg_in, H, (w, h))
# matched_ni,left,right,top,bottom=removeBlackBorder(matched_ni) # matched_ni,left,right,top,bottom=removeBlackBorder(matched_ni)
# fusion = fusions(orimg_vi[left:right, top:bottom], matched_ni) # fusion = fusions(orimg_vi[left:right, top:bottom], matched_ni)
fusion = fusions(orimg_vi, matched_ni) fusion = fusions(orimg_vi, matched_ni)
# print(fusion.shape)
return 1, fusion, dot # YOLOv8目标检测
results = yolo_model(fusion) # 输入融合后的图像
annotated_image = results[0].plot() # 绘制检测框
return 1, annotated_image, dot # 返回带检测结果的图像
except Exception as e:
print(f"Error in fusion/detection: {e}")
return 0, None, 0
if __name__ == '__main__': if __name__ == '__main__':
@ -199,39 +211,25 @@ if __name__ == '__main__':
out = cv2.VideoWriter('output2.mp4', fourcc, fps, (640, 480)) out = cv2.VideoWriter('output2.mp4', fourcc, fps, (640, 480))
# 持续读取摄像头数据 # 持续读取摄像头数据
while True: while True:
# 读取 read_code, frame = capture.read() # 红外帧
start = time.time() read_code2, frame2 = capture2.read() # 可见光帧
read_code, frame = capture.read()
read_code2, frame2 = capture2.read()
if not read_code: if not read_code:
break break
i += 1 i += 1
# frame = cv2.resize(frame, (1920, 1080)) # frame = cv2.resize(frame, (1920, 1080))
# frame2 = cv2.resize(frame2, (640, 512)) # frame2 = cv2.resize(frame2, (640, 512))
# frame =frame[80:512, 0:640] # 转换为灰度图(红外图像处理)
# frame2=frame2[200:1080, 0:1920]
cv2.imshow("color", frame2)
cv2.imshow("gray", frame)
# 按 'q' 键退出循环
if cv2.waitKey(25) & 0xFF == ord('q'):
break
h1, w1 = frame.shape[:2]
h2, w2 = frame2.shape[:2]
print(h1, w1, h2, w2)
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 调用main函数进行融合和检测
flag, fusion, dot = main(frame2, frame_gray) flag, fusion, dot = main(frame2, frame_gray)
if flag == 0:
continue if flag == 1:
end = time.time() # 显示带检测结果的融合图像
# print(dot) cv2.imshow("Fusion with YOLOv8 Detection", fusion)
dots += dot
cv2.imshow("fusion", fusion)
# cv2.imshow("color", frame2)
# cv2.imshow("gray", frame)
out.write(fusion) out.write(fusion)
use_time = end - start
time_all += use_time
if cv2.waitKey(1) == ord('q'): if cv2.waitKey(1) == ord('q'):
break break
# 释放资源 # 释放资源