Compare commits

...

4 Commits

Author SHA1 Message Date
myh
65ee0565c2 集成YOLOv8 2025-04-18 22:51:46 +08:00
myh
ca275ba74b 图像融合模块 2025-04-18 22:15:37 +08:00
myh
1cfc280f34 联邦学习模块 2025-04-18 22:15:25 +08:00
myh
f5e527e02e 排除.idea文件夹 2025-04-18 22:06:27 +08:00
10 changed files with 1188 additions and 2 deletions

5
.gitignore vendored
View File

@ -178,7 +178,7 @@ cython_debug/
# ---> JetBrains
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
.idea/
# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
@ -275,7 +275,8 @@ fabric.properties
.LSOverride
# Icon must end with two \r
Icon
Icon
# Thumbnails
._*

View File

View File

@ -0,0 +1,155 @@
import argparse
import torch
import os
from torch import optim
from torch.optim import lr_scheduler
from util.data_utils import get_data
from util.model_utils import get_model
from util.train_utils import train_model, validate_model, update_model_weights, v3_update_model_weights
def main(args):
device = torch.device(args.device)
# 数据加载器
loader1, loader2, loader3, subset_len, val_loader = get_data(
args.train_path, args.val_path, args.batch_size, args.number_workers
)
# 模型 get_model(name='ResNet', number_class=2, device=device, resnet_type='resnet18')
model_a = get_model(args.model_name, args.number_class, device, args.deep_backbone).to(device)
model_b = get_model(args.model_name, args.number_class, device, args.deep_backbone).to(device)
model_c = get_model(args.model_name, args.number_class, device, args.deep_backbone).to(device)
# 添加全局模型
global_model = get_model(args.model_name, args.number_class, device, args.deep_backbone).to(device)
if args.resume_training:
model_a.load_state_dict(torch.load(os.path.join(args.save_dir, 'best_model_a.pth')))
model_b.load_state_dict(torch.load(os.path.join(args.save_dir, 'best_model_b.pth')))
model_c.load_state_dict(torch.load(os.path.join(args.save_dir, 'best_model_c.pth')))
print("已加载之前保存的模型参数继续训练")
# 优化器和损失函数
criterion = torch.nn.BCEWithLogitsLoss().to(device)
optimizer_a = optim.Adam(model_a.parameters(), lr=args.lr, weight_decay=5e-4)
optimizer_b = optim.Adam(model_b.parameters(), lr=args.lr, weight_decay=5e-4)
optimizer_c = optim.Adam(model_c.parameters(), lr=args.lr, weight_decay=5e-4)
scheduler_a = lr_scheduler.ReduceLROnPlateau(optimizer_a, mode='min', factor=0.5, patience=2, verbose=True)
scheduler_b = lr_scheduler.ReduceLROnPlateau(optimizer_b, mode='min', factor=0.5, patience=2, verbose=True)
scheduler_c = lr_scheduler.ReduceLROnPlateau(optimizer_c, mode='min', factor=0.5, patience=2, verbose=True)
# 初始化最优验证损失和模型路径
best_val_loss_a = float('inf')
best_val_loss_b = float('inf')
best_val_loss_c = float('inf')
save_dir = args.save_dir
os.makedirs(save_dir, exist_ok=True)
# 训练与验证
for epoch in range(args.epochs):
print(f'Epoch {epoch + 1}/{args.epochs}')
# 训练模型
loss_a = train_model(device, model_a, loader1, optimizer_a, criterion, epoch, 'model_a')
loss_b = train_model(device, model_b, loader2, optimizer_b, criterion, epoch, 'model_b')
loss_c = train_model(device, model_c, loader3, optimizer_c, criterion, epoch, 'model_c')
# 验证模型
val_loss_a, val_acc_a, val_auc_a = validate_model(device, model_a, val_loader, criterion, epoch, 'model_a')
val_loss_b, val_acc_b, val_auc_b = validate_model(device, model_b, val_loader, criterion, epoch, 'model_b')
val_loss_c, val_acc_c, val_auc_c = validate_model(device, model_c, val_loader, criterion, epoch, 'model_c')
if args.save_model and val_loss_a < best_val_loss_a:
best_val_loss_a = val_loss_a
torch.save(model_a.state_dict(), os.path.join(save_dir, 'best_model_a.pth'))
print(f"Best model_a saved with val_loss: {best_val_loss_a:.4f}")
if args.save_model and val_loss_b < best_val_loss_b:
best_val_loss_b = val_loss_b
torch.save(model_b.state_dict(), os.path.join(save_dir, 'best_model_b.pth'))
print(f"Best model_b saved with val_loss: {best_val_loss_b:.4f}")
if args.save_model and val_loss_c < best_val_loss_c:
best_val_loss_c = val_loss_c
torch.save(model_c.state_dict(), os.path.join(save_dir, 'best_model_c.pth'))
print(f"Best model_c saved with val_loss: {best_val_loss_c:.4f}")
print(
f'Model A - Loss: {loss_a:.4f}, Val Loss: {val_loss_a:.4f}, Val Acc: {val_acc_a:.4f}, AUC: {val_auc_a:.4f}')
print(
f'Model B - Loss: {loss_b:.4f}, Val Loss: {val_loss_b:.4f}, Val Acc: {val_acc_b:.4f}, AUC: {val_auc_b:.4f}')
print(
f'Model C - Loss: {loss_c:.4f}, Val Loss: {val_loss_c:.4f}, Val Acc: {val_acc_c:.4f}, AUC: {val_auc_c:.4f}')
# 更新模型 A 的权重,每 3 轮 1
val_acc_a, val_auc_a, val_acc_a_threshold = v3_update_model_weights(
epoch=epoch,
model_to_update=model_a,
other_models=[model_a, model_b, model_c],
global_model=global_model,
losses=[loss_a, loss_b, loss_c],
val_loader=val_loader,
device=device,
val_auc_threshold=val_auc_a,
validate_model=validate_model,
criterion=criterion,
update_frequency=1
)
# 更新模型 B 的权重,每 5 轮1
val_acc_b, val_auc_b, val_acc_b_threshold = v3_update_model_weights(
epoch=epoch,
model_to_update=model_b,
other_models=[model_a, model_b, model_c],
global_model=global_model,
losses=[loss_a, loss_b, loss_c],
val_loader=val_loader,
device=device,
val_auc_threshold=val_auc_b,
validate_model=validate_model,
criterion=criterion,
update_frequency=1
)
# 更新模型 C 的权重,每 2 轮 1
val_acc_c, val_auc_c, val_acc_c_threshold = v3_update_model_weights(
epoch=epoch,
model_to_update=model_c,
other_models=[model_a, model_b, model_c],
global_model=global_model,
losses=[loss_a, loss_b, loss_c],
val_loader=val_loader,
device=device,
val_auc_threshold=val_auc_c,
validate_model=validate_model,
criterion=criterion,
update_frequency=1
)
print("Training complete! Best models saved.")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, default='resnet18_psa', help='Model name')
parser.add_argument('--deep_backbone', type=str, default='*', help='deeplab backbone')
parser.add_argument('--train_path', type=str, default='/media/terminator/实验&代码/yhs/FF++/c40/total/train')
parser.add_argument('--val_path', type=str, default='/media/terminator/实验&代码/yhs/FF++/c40/total/val')
# parser.add_argument('--train_path', type=str, default='/media/terminator/实验&代码/yhs/FF++_mask_sample/c23/df/train')
# parser.add_argument('--val_path', type=str, default='/media/terminator/实验&代码/yhs/FF++_mask_sample/c23/df/val')
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--number_workers', type=int, default=8)
parser.add_argument('--number_class', type=int, default=1)
parser.add_argument('--device', type=str, default='cuda:0')
parser.add_argument('--lr', type=float, default=0.00005)
parser.add_argument('--save_dir', type=str,
default='/media/terminator/实验&代码/yhs/output/work2/resnet18_psa/c40/total/e10',
help='Directory to save best models')
parser.add_argument('--save_model', type=bool, default=True, help='是否保存最优模型')
parser.add_argument('--resume_training', type=bool, default=False, help='是否从保存的模型参数继续训练')
args = parser.parse_args()
main(args)

View File

View File

@ -0,0 +1,217 @@
import os
from PIL import Image
import torch
from torchvision import transforms
from torch.utils.data import DataLoader, Dataset, random_split
from collections import Counter
from torch.utils.data import DataLoader, Subset
from torchvision import transforms, datasets
import os
from sklearn.model_selection import train_test_split
class CustomImageDataset(Dataset):
def __init__(self, root_dir, transform=None):
self.root_dir = root_dir
self.transform = transform
self.image_paths = []
self.labels = []
# 遍历 root_dir 下的子文件夹 0 和 1
for label in [0, 1]:
folder_path = os.path.join(root_dir, str(label))
if os.path.isdir(folder_path):
for img_name in os.listdir(folder_path):
img_path = os.path.join(folder_path, img_name)
self.image_paths.append(img_path)
self.labels.append(label)
# 打印用于调试的图像路径和标签
# print("Loaded image paths and labels:")
# for path, label in zip(self.image_paths[:5], self.labels[:5]):
# print(f"Path: {path}, Label: {label}")
# print(f"Total samples: {len(self.image_paths)}\n")
def __len__(self):
return len(self.image_paths)
def __getitem__(self, idx):
img_path = self.image_paths[idx]
label = self.labels[idx]
image = Image.open(img_path).convert("RGB")
if self.transform:
image = self.transform(image)
return image, label
def get_test_data(test_image_path, batch_size, nw):
data_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# test_dataset = datasets.ImageFolder(root=test_image_path, transform=data_transform)
test_dataset = CustomImageDataset(root_dir=test_image_path, transform=data_transform)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=nw)
return test_loader
def get_Onedata(train_image_path, val_image_path, batch_size, num_workers):
"""
加载完整的训练数据集和验证数据集
"""
data_transform = {
"train": transforms.Compose([
transforms.Resize((256, 256)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
"val": transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
# 创建训练和验证数据集
train_dataset = CustomImageDataset(root_dir=train_image_path, transform=data_transform["train"])
val_dataset = CustomImageDataset(root_dir=val_image_path, transform=data_transform["val"])
# 创建数据加载器
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return train_loader, val_loader
def get_data(train_image_path, val_image_path, batch_size, num_workers):
data_transform = {
"train": transforms.Compose([
transforms.Resize((256, 256)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
"val": transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
"test": transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
train_dataset = CustomImageDataset(root_dir=train_image_path, transform=data_transform["train"])
val_dataset = CustomImageDataset(root_dir=val_image_path, transform=data_transform["val"])
# 切分数据集为三个等分
train_len = (len(train_dataset) // 3) * 3
train_dataset_truncated = torch.utils.data.Subset(train_dataset, range(train_len))
subset_len = train_len // 3
dataset1, dataset2, dataset3 = random_split(train_dataset_truncated, [subset_len] * 3)
loader1 = DataLoader(dataset1, batch_size=batch_size, shuffle=True, num_workers=num_workers)
loader2 = DataLoader(dataset2, batch_size=batch_size, shuffle=True, num_workers=num_workers)
loader3 = DataLoader(dataset3, batch_size=batch_size, shuffle=True, num_workers=num_workers)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return loader1, loader2, loader3, subset_len, val_loader
def get_Fourdata(train_path, val_path, batch_size, num_workers):
"""
加载训练集和验证集
包括 4 个客户端验证集dff2ffsnt 1 个全局验证集
Args:
train_path (str): 训练数据路径
val_path (str): 验证数据路径
batch_size (int): 批量大小
num_workers (int): DataLoader 的工作线程数
Returns:
tuple: 包含 4 个客户端训练和验证加载器以及全局验证加载器
"""
# 数据预处理
train_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
val_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
# 定义 4 个客户端数据集路径
client_names = ['df', 'f2f', 'fs', 'nt']
client_train_loaders = []
client_val_loaders = []
for client in client_names:
client_train_path = os.path.join(train_path, client)
client_val_path = os.path.join(val_path, client)
# 加载客户端训练数据
train_dataset = datasets.ImageFolder(root=client_train_path, transform=train_transform)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
# 加载客户端验证数据
val_dataset = datasets.ImageFolder(root=client_val_path, transform=val_transform)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
client_train_loaders.append(train_loader)
client_val_loaders.append(val_loader)
# 全局验证集
global_val_dataset = datasets.ImageFolder(root=val_path, transform=val_transform)
global_val_loader = DataLoader(global_val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return (*client_train_loaders, *client_val_loaders, global_val_loader)
def main():
# 设置参数
train_image_path = "/media/terminator/实验&代码/yhs/FF++_mask/c23/f2f/train"
val_image_path = "/media/terminator/实验&代码/yhs/FF++_mask/c23/f2f/val"
batch_size = 4
num_workers = 2
# 获取数据加载器
loader1, loader2, loader3, subset_len, val_loader = get_data(train_image_path, val_image_path, batch_size,
num_workers)
# 统计标签数量和类型
train_labels = []
for dataset in [loader1, loader2, loader3]:
for _, labels in dataset:
train_labels.extend(labels.tolist())
val_labels = []
for _, labels in val_loader:
val_labels.extend(labels.tolist())
# 使用 Counter 统计标签数量
train_label_counts = Counter(train_labels)
val_label_counts = Counter(val_labels)
# 打印统计结果
print("Training Dataset - Label Counts:", train_label_counts)
print("Validation Dataset - Label Counts:", val_label_counts)
print("Label Types in Training:", set(train_labels))
print("Label Types in Validation:", set(val_labels))
if __name__ == "__main__":
main()

View File

@ -0,0 +1,57 @@
import torch
from torch import nn
from torchvision import models
from Deeplab.deeplab import DeepLab_F
from Deeplab.resnet_psa import BasicBlockWithPSA
from Deeplab.resnet_psa_v2 import ResNet
from model_base.efNet_base_model import DeepLab
from model_base.efficientnet import EfficientNet
from model_base.resnet_more import CustomResNet
from model_base.xcption import Xception
def get_model(name, number_class, device, backbone):
"""
根据指定的模型名称加载模型并根据任务类别数调整最后的分类层
Args:
name (str): 模型名称 ('Vgg', 'ResNet', 'EfficientNet', 'Xception')
number_class (int): 分类类别数
device (torch.device): 设备 ('cuda' or 'cpu')
resnet_type (str): ResNet类型 ('resnet18', 'resnet34', 'resnet50', 'resnet101', etc.)
Returns:
nn.Module: 经过修改的模型
"""
if name == 'Vgg':
model = models.vgg16_bn(pretrained=True).to(device)
model.classifier[6] = nn.Linear(model.classifier[6].in_features, number_class)
elif name == 'ResNet18':
model = CustomResNet(resnet_type='resnet18', num_classes=number_class, pretrained=True).to(device)
elif name == 'ResNet34':
model = CustomResNet(resnet_type='resnet34', num_classes=number_class, pretrained=True).to(device)
elif name == 'ResNet50':
model = CustomResNet(resnet_type='resnet50', num_classes=number_class, pretrained=True).to(device)
elif name == 'ResNet101':
model = CustomResNet(resnet_type='resnet101', num_classes=number_class, pretrained=True).to(device)
elif name == 'ResNet152':
model = CustomResNet(resnet_type='resnet152', num_classes=number_class, pretrained=True).to(device)
elif name == 'EfficientNet':
# 使用自定义的 DeepLab 类加载 EfficientNet
model = DeepLab(backbone='efficientnet', num_classes=number_class).to(device)
elif name == 'Xception':
model = Xception(
in_planes=3,
num_classes=number_class,
pretrained=True,
pretrained_path="/home/terminator/1325/yhs/fedLeaning/pre_model/xception-43020ad28.pth"
).to(device)
elif name == 'DeepLab':
# 使用自定义的 DeepLab 类加载 EfficientNet
model = DeepLab_F(num_classes=1, backbone=backbone).to(device)
elif name == 'resnet18_psa':
model = ResNet(BasicBlockWithPSA, [2, 2, 2, 2], number_class)
else:
raise ValueError(f"Model {name} is not supported.")
return model

View File

@ -0,0 +1,368 @@
import numpy as np
import torch
from tqdm import tqdm
from sklearn.metrics import roc_auc_score, accuracy_score
import copy
import torch.nn.functional as F
import random
def train_deepmodel(device, model, loader, optimizer, criterion, epoch, model_name):
model.train()
running_loss = 0.0
corrects = 0.0
alpha = 1
beta = 0.1
for inputs, labels in tqdm(loader, desc=f'Training {model_name} Epoch {epoch + 1}', unit='batch'):
inputs, labels = inputs.float().to(device), labels.to(device) # 确保数据在 device 上
optimizer.zero_grad()
outputs, re_img = model(inputs)
loss = criterion(outputs.squeeze(), labels.float())
loss_F1 = F.l1_loss(re_img, inputs)
loss = alpha * loss + beta * loss_F1
loss.backward()
optimizer.step()
running_loss += loss.item()
avg_loss = running_loss / len(loader)
print(f'{model_name} Training Loss: {avg_loss:.4f}')
return avg_loss
def validate_deepmodel(device, model, loader, criterion, epoch, model_name):
model.eval()
running_loss = 0.0
correct, total = 0, 0
all_labels, all_preds = [], []
val_corrects = 0.0
alpha = 1
beta = 0.1
with torch.no_grad():
for inputs, labels in tqdm(loader, desc=f'Validating {model_name} Epoch {epoch + 1}', unit='batch'):
inputs, labels = inputs.float().to(device), labels.to(device) # 确保数据在 device 上
outputs, re_img = model(inputs)
# 将 logits 转换为预测
predicted = torch.sigmoid(outputs).data
all_preds.extend(predicted.cpu().numpy())
all_labels.extend(labels.cpu().numpy())
# loss = criterion(outputs.squeeze(), labels.float())
loss = criterion(outputs.squeeze(), labels.float())
loss_F1 = F.l1_loss(re_img, inputs)
loss = alpha * loss + beta * loss_F1
running_loss += loss.item()
auc = roc_auc_score(all_labels, all_preds)
predicted_labels = (np.array(all_preds) >= 0.5).astype(int) # 确保转换为 NumPy 数组
acc = accuracy_score(all_labels, predicted_labels)
avg_loss = running_loss / len(loader)
print(f'{model_name} Validation Loss: {avg_loss:.4f}, Accuracy: {acc:.4f}, AUC: {auc:.4f}')
return avg_loss, acc, auc
def test_deepmodel(device, model, loader):
model.eval()
all_labels, all_preds = [], []
with torch.no_grad():
for inputs, labels in tqdm(loader, desc=f'Testing', unit='batch'):
inputs, labels = inputs.float().to(device), labels.to(device) # 确保数据在 device 上
outputs, re_img = model(inputs)
predicted = torch.sigmoid(outputs).data # 将 logits 转换为预测
# 收集预测值和真实标签
all_preds.extend(predicted.cpu().numpy())
all_labels.extend(labels.cpu().numpy())
# 将预测值转换为二值标签
predicted_labels = (np.array(all_preds) >= 0.5).astype(int)
# 计算准确率和AUC
acc = accuracy_score(all_labels, predicted_labels)
auc = roc_auc_score(all_labels, all_preds)
print(f'Test Accuracy: {acc:.4f}, Test AUC: {auc:.4f}')
return acc, auc
# def train_model(device, model, loader, optimizer, criterion, epoch, model_name):
# model.train()
# running_loss = 0.0
# for i, (inputs, labels) in enumerate(tqdm(loader, desc=f'Training {model_name} Epoch {epoch + 1}', unit='batch')):
# inputs, labels = inputs.float().to(device), labels.float().to(device) # 确保数据格式正确
# optimizer.zero_grad()
#
# outputs = model(inputs)
# loss = criterion(outputs.squeeze(), labels)
#
# # 随机打印部分输出和标签,检查格式
# if i % 10 == 0: # 每100个批次打印一次
# print(f"Batch {i} - Sample Output: {outputs[0].item():.4f}, Sample Label: {labels[0].item()}")
#
# # 检查损失值是否异常
# if loss.item() < 0:
# print(f"Warning: Negative loss detected at batch {i}. Loss: {loss.item()}")
#
# loss.backward()
# optimizer.step()
#
# running_loss += loss.item()
#
# avg_loss = running_loss / len(loader)
# print(f'{model_name} Training Loss: {avg_loss:.4f}')
# return avg_loss
def train_model(device, model, loader, optimizer, criterion, epoch, model_name):
model.train()
running_loss = 0.0
for inputs, labels in tqdm(loader, desc=f'Training {model_name} Epoch {epoch + 1}', unit='batch'):
inputs, labels = inputs.float().to(device), labels.to(device) # 确保数据在 device 上
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs.squeeze(), labels.float())
loss.backward()
optimizer.step()
running_loss += loss.item()
avg_loss = running_loss / len(loader)
print(f'{model_name} Training Loss: {avg_loss:.4f}')
return avg_loss
def validate_model(device, model, loader, criterion, epoch, model_name):
model.eval()
running_loss = 0.0
correct, total = 0, 0
all_labels, all_preds = [], []
with torch.no_grad():
for inputs, labels in tqdm(loader, desc=f'Validating {model_name} Epoch {epoch + 1}', unit='batch'):
inputs, labels = inputs.float().to(device), labels.to(device) # 确保数据在 device 上
outputs = model(inputs)
# 将 logits 转换为预测
predicted = torch.sigmoid(outputs).data
all_preds.extend(predicted.cpu().numpy())
all_labels.extend(labels.cpu().numpy())
# loss = criterion(outputs.squeeze(), labels.float())
loss = criterion(outputs.squeeze(), labels.float())
running_loss += loss.item()
auc = roc_auc_score(all_labels, all_preds)
predicted_labels = (np.array(all_preds) >= 0.5).astype(int) # 确保转换为 NumPy 数组
acc = accuracy_score(all_labels, predicted_labels)
avg_loss = running_loss / len(loader)
print(f'{model_name} Validation Loss: {avg_loss:.4f}, Accuracy: {acc:.4f}, AUC: {auc:.4f}')
return avg_loss, acc, auc
# 权重聚合函数
def aggregate_weights(weights_list, alpha=1 / 3, beta=1 / 3, gamma=1 / 3):
new_state_dict = copy.deepcopy(weights_list[0]) # 从模型a复制权重结构
for key in new_state_dict.keys():
new_state_dict[key] = (alpha * weights_list[0][key] +
beta * weights_list[1][key] +
gamma * weights_list[2][key])
return new_state_dict
def v3_update_model_weights(
epoch,
model_to_update,
other_models,
global_model,
losses,
val_loader,
device,
val_auc_threshold, # 当前需要更新模型的验证 AUC 阈值
validate_model,
criterion,
update_frequency
):
"""
根据给定的条件更新模型的权重
参数:
epoch (int): 当前训练轮次
model_to_update: 需要更新的模型
other_models (list): 其他模型列表用于计算全局模型权重
global_model: 全局模型
losses (list): 各模型的损失值列表
val_loader: 验证数据的 DataLoader
device: 设备 ('cuda' 'cpu')
val_auc_threshold (float): 当前需要更新模型的验证 AUC
aggregate_weights (function): 权重聚合函数
validate_model (function): 验证模型的函数
update_frequency (int): 权重更新的频率
返回:
val_acc (float): 全局模型的验证精度
val_auc (float): 全局模型的验证 AUC
updated_val_auc_threshold (float): 更新后的验证 AUC
"""
if (epoch + 1) % update_frequency == 0:
# 获取所有模型的权重
all_weights = [model.state_dict() for model in other_models]
avg_weights = aggregate_weights(all_weights) # 聚合权重
# 更新全局模型权重
global_model.load_state_dict(avg_weights)
# 计算加权平均损失
weighted_loss = sum(loss * 0.33 for loss in losses)
print(f"Weighted Average Loss: {weighted_loss:.4f}")
# 验证全局模型
val_loss, val_acc, val_auc = validate_model(device, global_model, val_loader, criterion, epoch, 'global_model')
print(f'global_model Validation Accuracy: {val_acc:.4f}, global_model Validation AUC: {val_auc:.4f}')
# 如果全局模型的 AUC 更高,则更新目标模型
if val_auc > val_auc_threshold:
print(f'Updating model at epoch {epoch + 1}')
model_to_update.load_state_dict(global_model.state_dict())
val_auc_threshold = val_auc # 更新 AUC 阈值
return val_acc, val_auc, val_auc_threshold
return None, None, val_auc_threshold
def update_model_weights(
epoch,
model_to_update,
other_models,
global_model,
losses,
val_loader,
device,
val_auc_threshold, # 当前需要更新模型的验证 AUC 阈值
validate_model,
criterion,
update_frequency
):
"""
根据给定的条件更新模型的权重
参数:
epoch (int): 当前训练轮次
model_to_update: 需要更新的模型
other_models (list): 其他模型列表用于计算全局模型权重
global_model: 全局模型
losses (list): 各模型的损失值列表
val_loader: 验证数据的 DataLoader
device: 设备 ('cuda' 'cpu')
val_auc_threshold (float): 当前需要更新模型的验证 AUC
aggregate_weights (function): 权重聚合函数
validate_model (function): 验证模型的函数
update_frequency (int): 权重更新的频率
返回:
val_acc (float): 全局模型的验证精度
val_auc (float): 全局模型的验证 AUC
updated_val_auc_threshold (float): 更新后的验证 AUC
"""
if (epoch + 1) % update_frequency == 0:
# 获取所有模型的权重
all_weights = [model.state_dict() for model in other_models]
avg_weights = aggregate_weights(all_weights) # 聚合权重
# 更新全局模型权重
global_model.load_state_dict(avg_weights)
# 计算加权平均损失
weighted_loss = sum(loss * 0.33 for loss in losses)
print(f"Weighted Average Loss: {weighted_loss:.4f}")
# 验证全局模型
val_loss, val_acc, val_auc = validate_deepmodel(device, global_model, val_loader, criterion, epoch,
'global_model')
print(f'global_model Validation Accuracy: {val_acc:.4f}, global_model Validation AUC: {val_auc:.4f}')
# 如果全局模型的 AUC 更高,则更新目标模型
if val_auc > val_auc_threshold:
print(f'Updating model at epoch {epoch + 1}')
model_to_update.load_state_dict(global_model.state_dict())
val_auc_threshold = val_auc # 更新 AUC 阈值
return val_acc, val_auc, val_auc_threshold
return None, None, val_auc_threshold
def f_update_model_weights(
epoch,
model_to_update,
other_models,
global_model,
losses,
val_loader,
device,
val_auc_threshold, # 当前需要更新模型的验证 AUC 阈值
aggregate_weights, # 权重聚合函数
validate_model,
criterion,
update_frequency
):
"""
根据给定的条件更新模型的权重
参数:
epoch (int): 当前训练轮次
model_to_update: 需要更新的模型
other_models (list): 其他模型列表用于计算全局模型权重
global_model: 全局模型
losses (list): 各模型的损失值列表
val_loader: 验证数据的 DataLoader
device: 设备 ('cuda' 'cpu')
val_auc_threshold (float): 当前需要更新模型的验证 AUC 阈值
aggregate_weights (function): 权重聚合函数
validate_model (function): 验证模型的函数
criterion: 损失函数
update_frequency (int): 权重更新的频率
返回:
val_acc (float): 全局模型的验证精度
val_auc (float): 全局模型的验证 AUC
updated_val_auc_threshold (float): 更新后的验证 AUC 阈值
"""
# 每隔指定的 epoch 更新一次模型权重
if (epoch + 1) % update_frequency == 0:
print(f"\n[Epoch {epoch + 1}] Updating global model weights...")
# 获取其他模型的权重
all_weights = [model.state_dict() for model in other_models]
# 使用聚合函数计算全局权重
avg_weights = aggregate_weights(all_weights)
print("Global model weights aggregated.")
# 更新全局模型权重
global_model.load_state_dict(avg_weights)
# 计算加权平均损失
weighted_loss = sum(loss * (1 / len(losses)) for loss in losses) # 平均加权
print(f"Weighted Average Loss: {weighted_loss:.4f}")
# 验证全局模型性能
val_loss, val_acc, val_auc = validate_deepmodel(device, global_model, val_loader, criterion, epoch,
'global_model')
print(f"[Global Model] Validation Loss: {val_loss:.4f}, Accuracy: {val_acc:.4f}, AUC: {val_auc:.4f}")
# 如果全局模型 AUC 高于阈值,则更新目标模型权重
if val_auc > val_auc_threshold:
print(f"Global model AUC improved ({val_auc:.4f} > {val_auc_threshold:.4f}). Updating target model.")
model_to_update.load_state_dict(global_model.state_dict())
val_auc_threshold = val_auc # 更新 AUC 阈值
else:
print(
f"Global model AUC did not improve ({val_auc:.4f} <= {val_auc_threshold:.4f}). No update to target model.")
return val_acc, val_auc, val_auc_threshold
# 如果未到达更新频率,返回当前的 AUC 阈值
return None, None, val_auc_threshold

View File

@ -0,0 +1,241 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time :
# @Author :
# @File : Image_Registration_test.py
import time
import cv2
import numpy as np
from ultralytics import YOLO
# 添加YOLOv8模型初始化
yolo_model = YOLO("yolov8n.pt") # 可替换为yolov8s/m/l等
yolo_model.to('cuda') # 启用GPU加速可选
def sift_registration(img1, img2):
img1gray = cv2.normalize(img1, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX).astype(np.uint8)
img2gray = img2
sift = cv2.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1gray, None)
kp2, des2 = sift.detectAndCompute(img2gray, None)
# FLANN parameters
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
good = []
pts1 = []
pts2 = []
for i, (m, n) in enumerate(matches):
if m.distance < 0.75 * n.distance:
good.append(m)
pts2.append(kp2[m.trainIdx].pt)
pts1.append(kp1[m.queryIdx].pt)
MIN_MATCH_COUNT = 4
if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
else:
print("Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT))
M = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]], dtype=np.float64)
if M is None:
M = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]], dtype=np.float64)
return 1, M[0], len(pts2)
# 裁剪线性RGB对比度拉伸去掉2%百分位以下的数去掉98%百分位以上的数,上下百分位数一般相同,并设置输出上下限)
def truncated_linear_stretch(image, truncated_value=2, maxout=255, min_out=0):
"""
:param image:
:param truncated_value:
:param maxout:
:param min_out:
:return:
"""
def gray_process(gray, maxout=maxout, minout=min_out):
truncated_down = np.percentile(gray, truncated_value)
truncated_up = np.percentile(gray, 100 - truncated_value)
gray_new = ((maxout - minout) / (truncated_up - truncated_down)) * gray
gray_new[gray_new < minout] = minout
gray_new[gray_new > maxout] = maxout
return np.uint8(gray_new)
(b, g, r) = cv2.split(image)
b = gray_process(b)
g = gray_process(g)
r = gray_process(r)
result = cv2.merge((b, g, r)) # 合并每一个通道
return result
# RGB图片配准函数采用白天的可见光与红外灰度图计算两者Surf共同特征点之间的仿射矩阵。
def Images_matching(img_base, img_target):
"""
:param img_base:
:param img_target:匹配图像
:return: 返回仿射矩阵
"""
start = time.time()
orb = cv2.ORB_create()
img_base = cv2.cvtColor(img_base, cv2.COLOR_BGR2GRAY)
sift = cv2.SIFT_create()
# 使用sift算子计算特征点和特征点周围的特征向量
st1 = time.time()
kp1, des1 = sift.detectAndCompute(img_base, None) # 1136 1136, 64
kp2, des2 = sift.detectAndCompute(img_target, None)
en1 = time.time()
# print(en1 - st1, "特征提取")
# 进行KNN特征匹配
# FLANN_INDEX_KDTREE = 0 # 建立FLANN匹配器的参数
# indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) # 配置索引密度树的数量为5
# searchParams = dict(checks=50) # 指定递归次数
# flann = cv2.FlannBasedMatcher(indexParams, searchParams) # 建立匹配器
# matches = flann.knnMatch(des1, des2, k=2) # 得出匹配的关键点 list: 1136
# FLANN_INDEX_KDTREE = 1
# index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
# search_params = dict(checks=50)
# flann = cv2.FlannBasedMatcher(index_params, search_params)
# matches = flann.knnMatch(des1, des2, k=2)
st2 = time.time()
matcher = cv2.BFMatcher()
matches = matcher.knnMatch(des1, des2, k=2)
de2 = time.time()
# print(de2 - st2, "特征匹配")
good = []
# 提取优秀的特征点
for m, n in matches:
if m.distance < 0.75 * n.distance: # 如果第一个邻近距离比第二个邻近距离的0.7倍小,则保留
good.append(m) # 134
src_pts = np.array([kp1[m.queryIdx].pt for m in good]) # 查询图像的特征描述子索引 # 134, 2
dst_pts = np.array([kp2[m.trainIdx].pt for m in good]) # 训练(模板)图像的特征描述子索引
if len(src_pts) <= 4:
return 0, None, 0
else:
# print(len(dst_pts), len(src_pts), "配准坐标点")
H = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 4) # 生成变换矩阵 H[0]: 3, 3 H[1]: 134, 1
end = time.time()
times = end - start
# print("配准时间", times)
return 1, H[0], len(dst_pts)
def fusions(img_vl, img_inf):
"""
:param img_vl: 原图像
:param img_inf: 红外图像
:return:
"""
img_YUV = cv2.cvtColor(img_vl, cv2.COLOR_BGR2YUV) # 如果输入是BGR需转换
# img_YUV = cv2.cvtColor(img_vl, cv2.COLOR_RGB2YUV)
y, u, v = cv2.split(img_YUV) # 分离通道,获取Y通道
Yf = y * 0.5 + img_inf * 0.5
Yf = Yf.astype(np.uint8)
fusion = cv2.cvtColor(cv2.merge((Yf, u, v)), cv2.COLOR_YUV2RGB)
return fusion
def removeBlackBorder(gray):
"""
移除缝合后的图像的多余黑边
输入
image三维numpy矩阵待处理图像
输出
裁剪后的图像
"""
threshold = 40 # 阈值
nrow = gray.shape[0] # 获取图片尺寸
ncol = gray.shape[1]
rowc = gray[:, int(1 / 2 * nrow)] # 无法区分黑色区域超过一半的情况
colc = gray[int(1 / 2 * ncol), :]
rowflag = np.argwhere(rowc > threshold)
colflag = np.argwhere(colc > threshold)
left, bottom, right, top = rowflag[0, 0], colflag[-1, 0], rowflag[-1, 0], colflag[0, 0]
# cv2.imshow('name', gray[left:right, top:bottom]) # 效果展示
cv2.waitKey(1)
return gray[left:right, top:bottom], left, right, top, bottom
def main(matchimg_vi, matchimg_in):
"""
:param matchimg_vi: 可见光图像
:param matchimg_in: 红外图像
:return: 融合好的图像带检测结果
"""
try:
orimg_vi = matchimg_vi
orimg_in = matchimg_in
h, w = orimg_vi.shape[:2] # 480 640
flag, H, dot = Images_matching(matchimg_vi, matchimg_in) # (3, 3)//获取对应的配准坐标点
if flag == 0:
return 0, None, 0
else:
matched_ni = cv2.warpPerspective(orimg_in, H, (w, h))
# matched_ni,left,right,top,bottom=removeBlackBorder(matched_ni)
# fusion = fusions(orimg_vi[left:right, top:bottom], matched_ni)
fusion = fusions(orimg_vi, matched_ni)
# YOLOv8目标检测
results = yolo_model(fusion) # 输入融合后的图像
annotated_image = results[0].plot() # 绘制检测框
return 1, annotated_image, dot # 返回带检测结果的图像
except Exception as e:
print(f"Error in fusion/detection: {e}")
return 0, None, 0
if __name__ == '__main__':
time_all = 0
dots = 0
i = 0
fourcc = cv2.VideoWriter_fourcc(*'XVID')
capture = cv2.VideoCapture("video/20190926_141816_1_8/20190926_141816_1_8/infrared.mp4")
capture2 = cv2.VideoCapture("video/20190926_141816_1_8/20190926_141816_1_8/visible.mp4")
fps = capture.get(cv2.CAP_PROP_FPS)
out = cv2.VideoWriter('output2.mp4', fourcc, fps, (640, 480))
# 持续读取摄像头数据
while True:
read_code, frame = capture.read() # 红外帧
read_code2, frame2 = capture2.read() # 可见光帧
if not read_code:
break
i += 1
# frame = cv2.resize(frame, (1920, 1080))
# frame2 = cv2.resize(frame2, (640, 512))
# 转换为灰度图(红外图像处理)
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 调用main函数进行融合和检测
flag, fusion, dot = main(frame2, frame_gray)
if flag == 1:
# 显示带检测结果的融合图像
cv2.imshow("Fusion with YOLOv8 Detection", fusion)
out.write(fusion)
if cv2.waitKey(1) == ord('q'):
break
# 释放资源
capture.release()
capture2.release()
cv2.destroyAllWindows()
ave = time_all / i
print(ave, "平均时间")
cv2.destroyAllWindows()

View File

@ -0,0 +1,147 @@
# -*- coding: utf-8 -*-
# @Time :
# @Author :
import cv2
import numpy as np
sift = cv2.SIFT_create()
def compuerSift2GetPts(img1, img2):
# sift 查找关键点,关键点 And 描述
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
matcher = cv2.BFMatcher()
raw_matches = matcher.knnMatch(des1, des2, k=2)
good_matches = []
ratio = 0.75
for m1, m2 in raw_matches:
# 如果最接近和次接近的比值大于一个既定的值那么我们保留这个最接近的值认为它和其匹配的点为good_match
if m1.distance < ratio * m2.distance:
good_matches.append([m1])
matches = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good_matches, None, flags=2)
ptsA = np.float32([kp1[m[0].queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
ptsB = np.float32([kp2[m[0].trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
ransacReprojThreshold = 4
# 单应性矩阵可以将一张图通过旋转、变换等方式与另一张图对齐
# print(len(ptsA), len(ptsB))
if len(ptsA) == 0: return ptsA, ptsB, 0
H, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, ransacReprojThreshold)
cv2.imshow("matcher", matches)
cv2.waitKey(100)
return ptsA, ptsB, 1
def findBestDistanceAndPts(ptsA, ptsB):
x_dct = {}
y_dct = {}
best_x, best_y = int(ptsA[0][0][0] - ptsB[0][0][0]), int(ptsA[0][0][1] - ptsB[0][0][1])
x_cnt, y_cnt = 0, 0
for i in range(len(ptsA)):
# print(ptsA[i], ' ', ptsB[i])
x_dis = int(ptsA[i][0][0] - ptsB[i][0][0])
y_dis = int(ptsA[i][0][1] - ptsB[i][0][1])
# print(x_dis)
if x_dis in x_dct:
x_dct.update({x_dis: int(x_dct.get(x_dis) + 1)})
if x_dct.get(x_dis) > x_cnt:
best_x = x_dis
x_cnt = x_dct.get(x_dis)
# print(x_dct.get(x_dis))
else:
x_dct.update({x_dis: 1})
# print(x_dct.get(x_dis))
# print(y_dis)
if y_dis in y_dct:
y_dct.update({y_dis: int(y_dct.get(y_dis) + 1)})
if y_dct.get(y_dis) > y_cnt:
best_y = y_dis
y_cnt = y_dct.get(y_dis)
# print(y_dct.get(y_dis))
else:
y_dct.update({y_dis: 1})
# print(y_dct.get(y_dis))
print(best_x, best_y)
pt = []
ptb = []
for i in range(len(ptsA)):
x_dis = int(ptsA[i][0][0] - ptsB[i][0][0])
y_dis = int(ptsA[i][0][1] - ptsB[i][0][1])
if abs(best_x - x_dis) <= 0:
pt.append([ptsA[i][0][0], ptsA[i][0][1]])
# print(pt)
return pt, best_x, best_y
def minDistanceHasXy(ptsA, ptsB):
dct = {}
cnt = 0
best = 's'
for i in range(len(ptsA)):
disx = int(ptsA[i][0][0] - ptsB[i][0][0] + 0.5)
disy = int(ptsA[i][0][1] - ptsB[i][0][1] + 0.5)
s = str(disx) + ',' + str(disy)
# print(s)
if s in dct:
dct.updata({s: int(dct.get(s) + 1)})
if dct.get(s) >= cnt:
cnt = dct.get(s)
best = s
print(s)
else:
dct.update({s: int(1)})
for i, j in dct.items():
print(i, j)
print(best)
def detectImg(img1, img2, pta, best_x, best_y):
# print(pta)
min_x = int(min(x[0] for x in pta))
max_x = int(max(x[0] for x in pta))
min_y = int(min(x[1] for x in pta))
max_y = int(max(x[1] for x in pta))
# print(min_x, max_x)
# print(min_x - best_x, max_x - best_x)
# print(min_y, max_y)
# print(min_y - best_y, max_y - best_y)
newimg1 = img1[min_y: max_y, min_x: max_x]
newimg2 = img2[min_y - best_y: max_y - best_y, min_x - best_x: max_x - best_x]
# cv2.imshow("newimg1", newimg1)
# cv2.imshow("newimg2", newimg2)
# cv2.waitKey(0)
return newimg1, newimg2
if __name__ == '__main__':
j = 0
for i in range(20, 4771, 1):
print(i)
path1 = './data/907dat/gray/camera1-' + str(i) + '.png'
path2 = './data/907dat/color/camera0-' + str(i) + '.png'
img1 = cv2.imread(path1)
img2 = cv2.imread(path2)
if (img1 is None or img2 is None): continue
PtsA, PtsB, f = compuerSift2GetPts(img1, img2)
if (f == 0): continue
pt, best_x, best_y = findBestDistanceAndPts(PtsA, PtsB)
newimg1, newimg2 = detectImg(img1, img2, pt, best_x, best_y)
if newimg1.shape[0] < 10 or newimg1.shape[1] < 10: continue
print(newimg1.shape, newimg2.shape)
# newimg1 = cv2.resize(newimg1, (320, 240))
# newimg2 = cv2.resize(newimg2, (320, 240))
wirtePath1 = './result/dat_result_2/gray/camera1-' + str(j) + '.png'
wirtePath2 = './result/dat_result_2/color/camera0-' + str(j) + '.png'
if newimg1.shape[0] > 255 and newimg1.shape[1] > 255 and newimg1.shape == newimg2.shape:
# cv2.imwrite(wirtePath1, newimg1)
# cv2.imwrite(wirtePath2, newimg2)
j += 1
cv2.imshow("newimg1", newimg1)
cv2.imshow("newimg2", newimg2)
cv2.waitKey()
print(j)
pass

0
image_fusion/__init__.py Normal file
View File