Compare commits
41 Commits
65ee0565c2
...
main
Author | SHA1 | Date | |
---|---|---|---|
2e7cf69512 | |||
76240a12e6 | |||
98321aa7d5 | |||
d39aa31651 | |||
f127ae2852 | |||
3a65d89315 | |||
2a3e5b17e7 | |||
c57c8f3552 | |||
310131d876 | |||
ba4508507b | |||
89d8f4c0df | |||
d1ed958db5 | |||
abd033b831 | |||
69482e6a3f | |||
9f827af58e | |||
338a5e07e8 | |||
9d99b00e55 | |||
dd0e0d869c | |||
8cd6df4527 | |||
132ed64136 | |||
be1e3627e7 | |||
d139f5afcf | |||
428790ab91 | |||
65e10f3e7d | |||
960b66a692 | |||
ef3d521e4a | |||
3b80f237fa | |||
f320e79702 | |||
34a5247dd2 | |||
1930e1b96b | |||
5095dbe6c0 | |||
554c7e6083 | |||
0d84bba234 | |||
c81de41b3e | |||
b8ffb902b3 | |||
da36a8fc09 | |||
45db741f35 | |||
5df0e15baf | |||
5e72ac28cc | |||
5b61b48d50 | |||
160bb2e365 |
7
.gitignore
vendored
@@ -297,3 +297,10 @@ Network Trash Folder
|
||||
Temporary Items
|
||||
.apdisk
|
||||
|
||||
# project files
|
||||
/whl_packages/
|
||||
runs/
|
||||
*.pt
|
||||
*.cache
|
||||
.vscode/
|
||||
*.json
|
||||
|
34
README.md
@@ -1,3 +1,35 @@
|
||||
# Graduation-Project
|
||||
|
||||
毕业设计:基于YOLO和图像融合技术的无人机检测系统及安全性研究
|
||||
毕业设计:基于YOLO和图像融合技术的无人机检测系统及安全性研究
|
||||
|
||||
Linux 运行联邦训练
|
||||
```bash
|
||||
cd federated_learning
|
||||
```
|
||||
|
||||
```bash
|
||||
nohup python -u yolov8_fed.py > runtime.log 2>&1 &
|
||||
```
|
||||
|
||||
Linux 运行集中训练
|
||||
```bash
|
||||
cd yolov8
|
||||
```
|
||||
|
||||
```bash
|
||||
nohup python -u yolov8_train.py > runtime.log 2>&1 &
|
||||
```
|
||||
|
||||
实时监控日志文件
|
||||
```bash
|
||||
tail -f runtime.log
|
||||
```
|
||||
|
||||
运行图像融合配准代码
|
||||
```bash
|
||||
cd image_fusion
|
||||
```
|
||||
|
||||
```bash
|
||||
python Image_Registration_test.py
|
||||
```
|
BIN
dataset/train1/images/6.jpg
Normal file
After Width: | Height: | Size: 145 KiB |
BIN
dataset/train1/images/7.jpg
Normal file
After Width: | Height: | Size: 97 KiB |
2
dataset/train1/labels/6.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
0 0.5375 0.37395833333333334 0.253125 0.16458333333333333
|
||||
0 0.2890625 0.5833333333333334 0.196875 0.1125
|
1
dataset/train1/labels/7.txt
Normal file
@@ -0,0 +1 @@
|
||||
0 0.36328125 0.525 0.7109375 0.8083333333333333
|
4
dataset/train1/train1.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
train: ./images
|
||||
val: ../val
|
||||
nc: 1
|
||||
names: ['uav']
|
BIN
dataset/train2/images/000007.JPG
Normal file
After Width: | Height: | Size: 136 KiB |
BIN
dataset/train2/images/02.jpg
Normal file
After Width: | Height: | Size: 1.5 MiB |
1
dataset/train2/labels/000007.txt
Normal file
@@ -0,0 +1 @@
|
||||
0 0.6934895833333333 0.6527777777777778 0.008854166666666666 0.018518518518518517
|
1
dataset/train2/labels/02.txt
Normal file
@@ -0,0 +1 @@
|
||||
0 0.423698 0.593519 0.061979 0.029630
|
4
dataset/train2/train2.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
train: ./images
|
||||
val: ../val
|
||||
nc: 1
|
||||
names: ['uav']
|
BIN
dataset/val/images/VS_P65.jpg
Normal file
After Width: | Height: | Size: 77 KiB |
BIN
dataset/val/images/VS_P66.jpg
Normal file
After Width: | Height: | Size: 74 KiB |
1
dataset/val/labels/VS_P65.txt
Normal file
@@ -0,0 +1 @@
|
||||
0 0.5109375 0.5322916666666667 0.125 0.13958333333333334
|
1
dataset/val/labels/VS_P66.txt
Normal file
@@ -0,0 +1 @@
|
||||
0 0.55078125 0.296875 0.0890625 0.08958333333333333
|
16
federated_learning/GenerateTestdata.sh
Normal file
@@ -0,0 +1,16 @@
|
||||
# 创建测试目录结构
|
||||
mkdir -p ./test_data/{client1,client2}/{train,val}/images
|
||||
mkdir -p ./test_data/{client1,client2}/{train,val}/labels
|
||||
|
||||
# 生成虚拟数据(各客户端仅需2张图片)
|
||||
for client in client1 client2; do
|
||||
for split in train val; do
|
||||
# 创建空图片(128x128 RGB)
|
||||
magick -size 128x128 xc:white test_data/${client}/${split}/images/img1.jpg
|
||||
magick -size 128x128 xc:black test_data/${client}/${split}/images/img2.jpg
|
||||
|
||||
# 创建示例标签文件
|
||||
echo "0 0.5 0.5 0.2 0.2" > test_data/${client}/${split}/labels/img1.txt
|
||||
echo "1 0.3 0.3 0.4 0.4" > test_data/${client}/${split}/labels/img2.txt
|
||||
done
|
||||
done
|
4
federated_learning/config/client1_data.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
train: ../test_data/client1/train/images
|
||||
val: ../test_data/client1/val/images
|
||||
nc: 2
|
||||
names: [ 'class0', 'class1' ]
|
4
federated_learning/config/client2_data.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
train: ../test_data/client2/train/images
|
||||
val: ../test_data/client2/val/images
|
||||
nc: 2
|
||||
names: [ 'class0', 'class1' ]
|
@@ -1,155 +0,0 @@
|
||||
import argparse
|
||||
import torch
|
||||
import os
|
||||
from torch import optim
|
||||
from torch.optim import lr_scheduler
|
||||
from util.data_utils import get_data
|
||||
from util.model_utils import get_model
|
||||
from util.train_utils import train_model, validate_model, update_model_weights, v3_update_model_weights
|
||||
|
||||
|
||||
def main(args):
|
||||
device = torch.device(args.device)
|
||||
|
||||
# 数据加载器
|
||||
loader1, loader2, loader3, subset_len, val_loader = get_data(
|
||||
args.train_path, args.val_path, args.batch_size, args.number_workers
|
||||
)
|
||||
|
||||
# 模型 get_model(name='ResNet', number_class=2, device=device, resnet_type='resnet18')
|
||||
model_a = get_model(args.model_name, args.number_class, device, args.deep_backbone).to(device)
|
||||
model_b = get_model(args.model_name, args.number_class, device, args.deep_backbone).to(device)
|
||||
model_c = get_model(args.model_name, args.number_class, device, args.deep_backbone).to(device)
|
||||
# 添加全局模型
|
||||
global_model = get_model(args.model_name, args.number_class, device, args.deep_backbone).to(device)
|
||||
|
||||
if args.resume_training:
|
||||
model_a.load_state_dict(torch.load(os.path.join(args.save_dir, 'best_model_a.pth')))
|
||||
model_b.load_state_dict(torch.load(os.path.join(args.save_dir, 'best_model_b.pth')))
|
||||
model_c.load_state_dict(torch.load(os.path.join(args.save_dir, 'best_model_c.pth')))
|
||||
print("已加载之前保存的模型参数继续训练")
|
||||
|
||||
# 优化器和损失函数
|
||||
criterion = torch.nn.BCEWithLogitsLoss().to(device)
|
||||
|
||||
optimizer_a = optim.Adam(model_a.parameters(), lr=args.lr, weight_decay=5e-4)
|
||||
optimizer_b = optim.Adam(model_b.parameters(), lr=args.lr, weight_decay=5e-4)
|
||||
optimizer_c = optim.Adam(model_c.parameters(), lr=args.lr, weight_decay=5e-4)
|
||||
scheduler_a = lr_scheduler.ReduceLROnPlateau(optimizer_a, mode='min', factor=0.5, patience=2, verbose=True)
|
||||
scheduler_b = lr_scheduler.ReduceLROnPlateau(optimizer_b, mode='min', factor=0.5, patience=2, verbose=True)
|
||||
scheduler_c = lr_scheduler.ReduceLROnPlateau(optimizer_c, mode='min', factor=0.5, patience=2, verbose=True)
|
||||
|
||||
# 初始化最优验证损失和模型路径
|
||||
best_val_loss_a = float('inf')
|
||||
best_val_loss_b = float('inf')
|
||||
best_val_loss_c = float('inf')
|
||||
|
||||
save_dir = args.save_dir
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
|
||||
# 训练与验证
|
||||
for epoch in range(args.epochs):
|
||||
print(f'Epoch {epoch + 1}/{args.epochs}')
|
||||
|
||||
# 训练模型
|
||||
loss_a = train_model(device, model_a, loader1, optimizer_a, criterion, epoch, 'model_a')
|
||||
loss_b = train_model(device, model_b, loader2, optimizer_b, criterion, epoch, 'model_b')
|
||||
loss_c = train_model(device, model_c, loader3, optimizer_c, criterion, epoch, 'model_c')
|
||||
|
||||
# 验证模型
|
||||
val_loss_a, val_acc_a, val_auc_a = validate_model(device, model_a, val_loader, criterion, epoch, 'model_a')
|
||||
val_loss_b, val_acc_b, val_auc_b = validate_model(device, model_b, val_loader, criterion, epoch, 'model_b')
|
||||
val_loss_c, val_acc_c, val_auc_c = validate_model(device, model_c, val_loader, criterion, epoch, 'model_c')
|
||||
|
||||
if args.save_model and val_loss_a < best_val_loss_a:
|
||||
best_val_loss_a = val_loss_a
|
||||
torch.save(model_a.state_dict(), os.path.join(save_dir, 'best_model_a.pth'))
|
||||
print(f"Best model_a saved with val_loss: {best_val_loss_a:.4f}")
|
||||
|
||||
if args.save_model and val_loss_b < best_val_loss_b:
|
||||
best_val_loss_b = val_loss_b
|
||||
torch.save(model_b.state_dict(), os.path.join(save_dir, 'best_model_b.pth'))
|
||||
print(f"Best model_b saved with val_loss: {best_val_loss_b:.4f}")
|
||||
|
||||
if args.save_model and val_loss_c < best_val_loss_c:
|
||||
best_val_loss_c = val_loss_c
|
||||
torch.save(model_c.state_dict(), os.path.join(save_dir, 'best_model_c.pth'))
|
||||
print(f"Best model_c saved with val_loss: {best_val_loss_c:.4f}")
|
||||
|
||||
print(
|
||||
f'Model A - Loss: {loss_a:.4f}, Val Loss: {val_loss_a:.4f}, Val Acc: {val_acc_a:.4f}, AUC: {val_auc_a:.4f}')
|
||||
print(
|
||||
f'Model B - Loss: {loss_b:.4f}, Val Loss: {val_loss_b:.4f}, Val Acc: {val_acc_b:.4f}, AUC: {val_auc_b:.4f}')
|
||||
print(
|
||||
f'Model C - Loss: {loss_c:.4f}, Val Loss: {val_loss_c:.4f}, Val Acc: {val_acc_c:.4f}, AUC: {val_auc_c:.4f}')
|
||||
|
||||
# 更新模型 A 的权重,每 3 轮 1
|
||||
val_acc_a, val_auc_a, val_acc_a_threshold = v3_update_model_weights(
|
||||
epoch=epoch,
|
||||
model_to_update=model_a,
|
||||
other_models=[model_a, model_b, model_c],
|
||||
global_model=global_model,
|
||||
losses=[loss_a, loss_b, loss_c],
|
||||
val_loader=val_loader,
|
||||
device=device,
|
||||
val_auc_threshold=val_auc_a,
|
||||
validate_model=validate_model,
|
||||
criterion=criterion,
|
||||
update_frequency=1
|
||||
)
|
||||
|
||||
# 更新模型 B 的权重,每 5 轮1
|
||||
val_acc_b, val_auc_b, val_acc_b_threshold = v3_update_model_weights(
|
||||
epoch=epoch,
|
||||
model_to_update=model_b,
|
||||
other_models=[model_a, model_b, model_c],
|
||||
global_model=global_model,
|
||||
losses=[loss_a, loss_b, loss_c],
|
||||
val_loader=val_loader,
|
||||
device=device,
|
||||
val_auc_threshold=val_auc_b,
|
||||
validate_model=validate_model,
|
||||
criterion=criterion,
|
||||
update_frequency=1
|
||||
)
|
||||
|
||||
# 更新模型 C 的权重,每 2 轮 1
|
||||
val_acc_c, val_auc_c, val_acc_c_threshold = v3_update_model_weights(
|
||||
epoch=epoch,
|
||||
model_to_update=model_c,
|
||||
other_models=[model_a, model_b, model_c],
|
||||
global_model=global_model,
|
||||
losses=[loss_a, loss_b, loss_c],
|
||||
val_loader=val_loader,
|
||||
device=device,
|
||||
val_auc_threshold=val_auc_c,
|
||||
validate_model=validate_model,
|
||||
criterion=criterion,
|
||||
update_frequency=1
|
||||
)
|
||||
|
||||
print("Training complete! Best models saved.")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--model_name', type=str, default='resnet18_psa', help='Model name')
|
||||
parser.add_argument('--deep_backbone', type=str, default='*', help='deeplab backbone')
|
||||
parser.add_argument('--train_path', type=str, default='/media/terminator/实验&代码/yhs/FF++/c40/total/train')
|
||||
parser.add_argument('--val_path', type=str, default='/media/terminator/实验&代码/yhs/FF++/c40/total/val')
|
||||
# parser.add_argument('--train_path', type=str, default='/media/terminator/实验&代码/yhs/FF++_mask_sample/c23/df/train')
|
||||
# parser.add_argument('--val_path', type=str, default='/media/terminator/实验&代码/yhs/FF++_mask_sample/c23/df/val')
|
||||
parser.add_argument('--epochs', type=int, default=10)
|
||||
parser.add_argument('--batch_size', type=int, default=16)
|
||||
parser.add_argument('--number_workers', type=int, default=8)
|
||||
parser.add_argument('--number_class', type=int, default=1)
|
||||
parser.add_argument('--device', type=str, default='cuda:0')
|
||||
parser.add_argument('--lr', type=float, default=0.00005)
|
||||
parser.add_argument('--save_dir', type=str,
|
||||
default='/media/terminator/实验&代码/yhs/output/work2/resnet18_psa/c40/total/e10',
|
||||
help='Directory to save best models')
|
||||
parser.add_argument('--save_model', type=bool, default=True, help='是否保存最优模型')
|
||||
parser.add_argument('--resume_training', type=bool, default=False, help='是否从保存的模型参数继续训练')
|
||||
args = parser.parse_args()
|
||||
|
||||
main(args)
|
@@ -1,217 +0,0 @@
|
||||
import os
|
||||
from PIL import Image
|
||||
import torch
|
||||
from torchvision import transforms
|
||||
from torch.utils.data import DataLoader, Dataset, random_split
|
||||
from collections import Counter
|
||||
from torch.utils.data import DataLoader, Subset
|
||||
from torchvision import transforms, datasets
|
||||
import os
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
|
||||
class CustomImageDataset(Dataset):
|
||||
def __init__(self, root_dir, transform=None):
|
||||
self.root_dir = root_dir
|
||||
self.transform = transform
|
||||
self.image_paths = []
|
||||
self.labels = []
|
||||
|
||||
# 遍历 root_dir 下的子文件夹 0 和 1
|
||||
for label in [0, 1]:
|
||||
folder_path = os.path.join(root_dir, str(label))
|
||||
if os.path.isdir(folder_path):
|
||||
for img_name in os.listdir(folder_path):
|
||||
img_path = os.path.join(folder_path, img_name)
|
||||
self.image_paths.append(img_path)
|
||||
self.labels.append(label)
|
||||
|
||||
# 打印用于调试的图像路径和标签
|
||||
# print("Loaded image paths and labels:")
|
||||
# for path, label in zip(self.image_paths[:5], self.labels[:5]):
|
||||
# print(f"Path: {path}, Label: {label}")
|
||||
# print(f"Total samples: {len(self.image_paths)}\n")
|
||||
|
||||
def __len__(self):
|
||||
return len(self.image_paths)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
img_path = self.image_paths[idx]
|
||||
label = self.labels[idx]
|
||||
image = Image.open(img_path).convert("RGB")
|
||||
|
||||
if self.transform:
|
||||
image = self.transform(image)
|
||||
|
||||
return image, label
|
||||
|
||||
|
||||
def get_test_data(test_image_path, batch_size, nw):
|
||||
data_transform = transforms.Compose([
|
||||
transforms.Resize((256, 256)),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
||||
])
|
||||
|
||||
# test_dataset = datasets.ImageFolder(root=test_image_path, transform=data_transform)
|
||||
|
||||
test_dataset = CustomImageDataset(root_dir=test_image_path, transform=data_transform)
|
||||
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=nw)
|
||||
return test_loader
|
||||
|
||||
|
||||
def get_Onedata(train_image_path, val_image_path, batch_size, num_workers):
|
||||
"""
|
||||
加载完整的训练数据集和验证数据集。
|
||||
"""
|
||||
data_transform = {
|
||||
"train": transforms.Compose([
|
||||
transforms.Resize((256, 256)),
|
||||
transforms.RandomHorizontalFlip(),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
||||
]),
|
||||
"val": transforms.Compose([
|
||||
transforms.Resize((256, 256)),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
||||
]),
|
||||
}
|
||||
|
||||
# 创建训练和验证数据集
|
||||
train_dataset = CustomImageDataset(root_dir=train_image_path, transform=data_transform["train"])
|
||||
val_dataset = CustomImageDataset(root_dir=val_image_path, transform=data_transform["val"])
|
||||
|
||||
# 创建数据加载器
|
||||
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
|
||||
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
|
||||
|
||||
return train_loader, val_loader
|
||||
|
||||
|
||||
def get_data(train_image_path, val_image_path, batch_size, num_workers):
|
||||
data_transform = {
|
||||
"train": transforms.Compose([
|
||||
transforms.Resize((256, 256)),
|
||||
transforms.RandomHorizontalFlip(),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
||||
]),
|
||||
"val": transforms.Compose([
|
||||
transforms.Resize((256, 256)),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
||||
]),
|
||||
"test": transforms.Compose([
|
||||
transforms.Resize((256, 256)),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
||||
]),
|
||||
}
|
||||
|
||||
train_dataset = CustomImageDataset(root_dir=train_image_path, transform=data_transform["train"])
|
||||
val_dataset = CustomImageDataset(root_dir=val_image_path, transform=data_transform["val"])
|
||||
|
||||
# 切分数据集为三个等分
|
||||
train_len = (len(train_dataset) // 3) * 3
|
||||
train_dataset_truncated = torch.utils.data.Subset(train_dataset, range(train_len))
|
||||
subset_len = train_len // 3
|
||||
dataset1, dataset2, dataset3 = random_split(train_dataset_truncated, [subset_len] * 3)
|
||||
|
||||
loader1 = DataLoader(dataset1, batch_size=batch_size, shuffle=True, num_workers=num_workers)
|
||||
loader2 = DataLoader(dataset2, batch_size=batch_size, shuffle=True, num_workers=num_workers)
|
||||
loader3 = DataLoader(dataset3, batch_size=batch_size, shuffle=True, num_workers=num_workers)
|
||||
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
|
||||
|
||||
return loader1, loader2, loader3, subset_len, val_loader
|
||||
|
||||
|
||||
def get_Fourdata(train_path, val_path, batch_size, num_workers):
|
||||
"""
|
||||
加载训练集和验证集。
|
||||
包括 4 个客户端验证集(df、f2f、fs、nt)和 1 个全局验证集。
|
||||
|
||||
Args:
|
||||
train_path (str): 训练数据路径
|
||||
val_path (str): 验证数据路径
|
||||
batch_size (int): 批量大小
|
||||
num_workers (int): DataLoader 的工作线程数
|
||||
|
||||
Returns:
|
||||
tuple: 包含 4 个客户端训练和验证加载器,以及全局验证加载器
|
||||
"""
|
||||
# 数据预处理
|
||||
train_transform = transforms.Compose([
|
||||
transforms.Resize((256, 256)),
|
||||
transforms.RandomHorizontalFlip(),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
||||
])
|
||||
|
||||
val_transform = transforms.Compose([
|
||||
transforms.Resize((256, 256)),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
||||
])
|
||||
|
||||
# 定义 4 个客户端数据集路径
|
||||
client_names = ['df', 'f2f', 'fs', 'nt']
|
||||
client_train_loaders = []
|
||||
client_val_loaders = []
|
||||
|
||||
for client in client_names:
|
||||
client_train_path = os.path.join(train_path, client)
|
||||
client_val_path = os.path.join(val_path, client)
|
||||
|
||||
# 加载客户端训练数据
|
||||
train_dataset = datasets.ImageFolder(root=client_train_path, transform=train_transform)
|
||||
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
|
||||
|
||||
# 加载客户端验证数据
|
||||
val_dataset = datasets.ImageFolder(root=client_val_path, transform=val_transform)
|
||||
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
|
||||
|
||||
client_train_loaders.append(train_loader)
|
||||
client_val_loaders.append(val_loader)
|
||||
|
||||
# 全局验证集
|
||||
global_val_dataset = datasets.ImageFolder(root=val_path, transform=val_transform)
|
||||
global_val_loader = DataLoader(global_val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
|
||||
|
||||
return (*client_train_loaders, *client_val_loaders, global_val_loader)
|
||||
|
||||
|
||||
def main():
|
||||
# 设置参数
|
||||
train_image_path = "/media/terminator/实验&代码/yhs/FF++_mask/c23/f2f/train"
|
||||
val_image_path = "/media/terminator/实验&代码/yhs/FF++_mask/c23/f2f/val"
|
||||
batch_size = 4
|
||||
num_workers = 2
|
||||
|
||||
# 获取数据加载器
|
||||
loader1, loader2, loader3, subset_len, val_loader = get_data(train_image_path, val_image_path, batch_size,
|
||||
num_workers)
|
||||
|
||||
# 统计标签数量和类型
|
||||
train_labels = []
|
||||
for dataset in [loader1, loader2, loader3]:
|
||||
for _, labels in dataset:
|
||||
train_labels.extend(labels.tolist())
|
||||
|
||||
val_labels = []
|
||||
for _, labels in val_loader:
|
||||
val_labels.extend(labels.tolist())
|
||||
|
||||
# 使用 Counter 统计标签数量
|
||||
train_label_counts = Counter(train_labels)
|
||||
val_label_counts = Counter(val_labels)
|
||||
|
||||
# 打印统计结果
|
||||
print("Training Dataset - Label Counts:", train_label_counts)
|
||||
print("Validation Dataset - Label Counts:", val_label_counts)
|
||||
print("Label Types in Training:", set(train_labels))
|
||||
print("Label Types in Validation:", set(val_labels))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -1,57 +0,0 @@
|
||||
import torch
|
||||
from torch import nn
|
||||
from torchvision import models
|
||||
|
||||
from Deeplab.deeplab import DeepLab_F
|
||||
from Deeplab.resnet_psa import BasicBlockWithPSA
|
||||
from Deeplab.resnet_psa_v2 import ResNet
|
||||
from model_base.efNet_base_model import DeepLab
|
||||
from model_base.efficientnet import EfficientNet
|
||||
from model_base.resnet_more import CustomResNet
|
||||
from model_base.xcption import Xception
|
||||
|
||||
|
||||
def get_model(name, number_class, device, backbone):
|
||||
"""
|
||||
根据指定的模型名称加载模型,并根据任务类别数调整最后的分类层。
|
||||
|
||||
Args:
|
||||
name (str): 模型名称 ('Vgg', 'ResNet', 'EfficientNet', 'Xception')。
|
||||
number_class (int): 分类类别数。
|
||||
device (torch.device): 设备 ('cuda' or 'cpu')。
|
||||
resnet_type (str): ResNet类型 ('resnet18', 'resnet34', 'resnet50', 'resnet101', etc.)。
|
||||
|
||||
Returns:
|
||||
nn.Module: 经过修改的模型。
|
||||
"""
|
||||
if name == 'Vgg':
|
||||
model = models.vgg16_bn(pretrained=True).to(device)
|
||||
model.classifier[6] = nn.Linear(model.classifier[6].in_features, number_class)
|
||||
elif name == 'ResNet18':
|
||||
model = CustomResNet(resnet_type='resnet18', num_classes=number_class, pretrained=True).to(device)
|
||||
elif name == 'ResNet34':
|
||||
model = CustomResNet(resnet_type='resnet34', num_classes=number_class, pretrained=True).to(device)
|
||||
elif name == 'ResNet50':
|
||||
model = CustomResNet(resnet_type='resnet50', num_classes=number_class, pretrained=True).to(device)
|
||||
elif name == 'ResNet101':
|
||||
model = CustomResNet(resnet_type='resnet101', num_classes=number_class, pretrained=True).to(device)
|
||||
elif name == 'ResNet152':
|
||||
model = CustomResNet(resnet_type='resnet152', num_classes=number_class, pretrained=True).to(device)
|
||||
elif name == 'EfficientNet':
|
||||
# 使用自定义的 DeepLab 类加载 EfficientNet
|
||||
model = DeepLab(backbone='efficientnet', num_classes=number_class).to(device)
|
||||
elif name == 'Xception':
|
||||
model = Xception(
|
||||
in_planes=3,
|
||||
num_classes=number_class,
|
||||
pretrained=True,
|
||||
pretrained_path="/home/terminator/1325/yhs/fedLeaning/pre_model/xception-43020ad28.pth"
|
||||
).to(device)
|
||||
elif name == 'DeepLab':
|
||||
# 使用自定义的 DeepLab 类加载 EfficientNet
|
||||
model = DeepLab_F(num_classes=1, backbone=backbone).to(device)
|
||||
elif name == 'resnet18_psa':
|
||||
model = ResNet(BasicBlockWithPSA, [2, 2, 2, 2], number_class)
|
||||
else:
|
||||
raise ValueError(f"Model {name} is not supported.")
|
||||
return model
|
@@ -1,368 +0,0 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
from sklearn.metrics import roc_auc_score, accuracy_score
|
||||
import copy
|
||||
import torch.nn.functional as F
|
||||
import random
|
||||
|
||||
|
||||
def train_deepmodel(device, model, loader, optimizer, criterion, epoch, model_name):
|
||||
model.train()
|
||||
running_loss = 0.0
|
||||
corrects = 0.0
|
||||
alpha = 1
|
||||
beta = 0.1
|
||||
for inputs, labels in tqdm(loader, desc=f'Training {model_name} Epoch {epoch + 1}', unit='batch'):
|
||||
inputs, labels = inputs.float().to(device), labels.to(device) # 确保数据在 device 上
|
||||
optimizer.zero_grad()
|
||||
|
||||
outputs, re_img = model(inputs)
|
||||
loss = criterion(outputs.squeeze(), labels.float())
|
||||
loss_F1 = F.l1_loss(re_img, inputs)
|
||||
loss = alpha * loss + beta * loss_F1
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
running_loss += loss.item()
|
||||
|
||||
avg_loss = running_loss / len(loader)
|
||||
print(f'{model_name} Training Loss: {avg_loss:.4f}')
|
||||
return avg_loss
|
||||
|
||||
|
||||
def validate_deepmodel(device, model, loader, criterion, epoch, model_name):
|
||||
model.eval()
|
||||
running_loss = 0.0
|
||||
correct, total = 0, 0
|
||||
all_labels, all_preds = [], []
|
||||
val_corrects = 0.0
|
||||
alpha = 1
|
||||
beta = 0.1
|
||||
|
||||
with torch.no_grad():
|
||||
for inputs, labels in tqdm(loader, desc=f'Validating {model_name} Epoch {epoch + 1}', unit='batch'):
|
||||
inputs, labels = inputs.float().to(device), labels.to(device) # 确保数据在 device 上
|
||||
|
||||
outputs, re_img = model(inputs)
|
||||
|
||||
# 将 logits 转换为预测
|
||||
predicted = torch.sigmoid(outputs).data
|
||||
all_preds.extend(predicted.cpu().numpy())
|
||||
all_labels.extend(labels.cpu().numpy())
|
||||
|
||||
# loss = criterion(outputs.squeeze(), labels.float())
|
||||
loss = criterion(outputs.squeeze(), labels.float())
|
||||
loss_F1 = F.l1_loss(re_img, inputs)
|
||||
loss = alpha * loss + beta * loss_F1
|
||||
running_loss += loss.item()
|
||||
|
||||
auc = roc_auc_score(all_labels, all_preds)
|
||||
predicted_labels = (np.array(all_preds) >= 0.5).astype(int) # 确保转换为 NumPy 数组
|
||||
acc = accuracy_score(all_labels, predicted_labels)
|
||||
avg_loss = running_loss / len(loader)
|
||||
print(f'{model_name} Validation Loss: {avg_loss:.4f}, Accuracy: {acc:.4f}, AUC: {auc:.4f}')
|
||||
return avg_loss, acc, auc
|
||||
|
||||
|
||||
def test_deepmodel(device, model, loader):
|
||||
model.eval()
|
||||
all_labels, all_preds = [], []
|
||||
|
||||
with torch.no_grad():
|
||||
for inputs, labels in tqdm(loader, desc=f'Testing', unit='batch'):
|
||||
inputs, labels = inputs.float().to(device), labels.to(device) # 确保数据在 device 上
|
||||
outputs, re_img = model(inputs)
|
||||
predicted = torch.sigmoid(outputs).data # 将 logits 转换为预测
|
||||
|
||||
# 收集预测值和真实标签
|
||||
all_preds.extend(predicted.cpu().numpy())
|
||||
all_labels.extend(labels.cpu().numpy())
|
||||
|
||||
# 将预测值转换为二值标签
|
||||
predicted_labels = (np.array(all_preds) >= 0.5).astype(int)
|
||||
|
||||
# 计算准确率和AUC
|
||||
acc = accuracy_score(all_labels, predicted_labels)
|
||||
auc = roc_auc_score(all_labels, all_preds)
|
||||
|
||||
print(f'Test Accuracy: {acc:.4f}, Test AUC: {auc:.4f}')
|
||||
return acc, auc
|
||||
|
||||
|
||||
# def train_model(device, model, loader, optimizer, criterion, epoch, model_name):
|
||||
# model.train()
|
||||
# running_loss = 0.0
|
||||
# for i, (inputs, labels) in enumerate(tqdm(loader, desc=f'Training {model_name} Epoch {epoch + 1}', unit='batch')):
|
||||
# inputs, labels = inputs.float().to(device), labels.float().to(device) # 确保数据格式正确
|
||||
# optimizer.zero_grad()
|
||||
#
|
||||
# outputs = model(inputs)
|
||||
# loss = criterion(outputs.squeeze(), labels)
|
||||
#
|
||||
# # 随机打印部分输出和标签,检查格式
|
||||
# if i % 10 == 0: # 每100个批次打印一次
|
||||
# print(f"Batch {i} - Sample Output: {outputs[0].item():.4f}, Sample Label: {labels[0].item()}")
|
||||
#
|
||||
# # 检查损失值是否异常
|
||||
# if loss.item() < 0:
|
||||
# print(f"Warning: Negative loss detected at batch {i}. Loss: {loss.item()}")
|
||||
#
|
||||
# loss.backward()
|
||||
# optimizer.step()
|
||||
#
|
||||
# running_loss += loss.item()
|
||||
#
|
||||
# avg_loss = running_loss / len(loader)
|
||||
# print(f'{model_name} Training Loss: {avg_loss:.4f}')
|
||||
# return avg_loss
|
||||
def train_model(device, model, loader, optimizer, criterion, epoch, model_name):
|
||||
model.train()
|
||||
running_loss = 0.0
|
||||
for inputs, labels in tqdm(loader, desc=f'Training {model_name} Epoch {epoch + 1}', unit='batch'):
|
||||
inputs, labels = inputs.float().to(device), labels.to(device) # 确保数据在 device 上
|
||||
optimizer.zero_grad()
|
||||
|
||||
outputs = model(inputs)
|
||||
loss = criterion(outputs.squeeze(), labels.float())
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
running_loss += loss.item()
|
||||
|
||||
avg_loss = running_loss / len(loader)
|
||||
print(f'{model_name} Training Loss: {avg_loss:.4f}')
|
||||
return avg_loss
|
||||
|
||||
|
||||
def validate_model(device, model, loader, criterion, epoch, model_name):
|
||||
model.eval()
|
||||
running_loss = 0.0
|
||||
correct, total = 0, 0
|
||||
all_labels, all_preds = [], []
|
||||
|
||||
with torch.no_grad():
|
||||
for inputs, labels in tqdm(loader, desc=f'Validating {model_name} Epoch {epoch + 1}', unit='batch'):
|
||||
inputs, labels = inputs.float().to(device), labels.to(device) # 确保数据在 device 上
|
||||
|
||||
outputs = model(inputs)
|
||||
|
||||
# 将 logits 转换为预测
|
||||
predicted = torch.sigmoid(outputs).data
|
||||
all_preds.extend(predicted.cpu().numpy())
|
||||
all_labels.extend(labels.cpu().numpy())
|
||||
|
||||
# loss = criterion(outputs.squeeze(), labels.float())
|
||||
loss = criterion(outputs.squeeze(), labels.float())
|
||||
running_loss += loss.item()
|
||||
auc = roc_auc_score(all_labels, all_preds)
|
||||
predicted_labels = (np.array(all_preds) >= 0.5).astype(int) # 确保转换为 NumPy 数组
|
||||
acc = accuracy_score(all_labels, predicted_labels)
|
||||
avg_loss = running_loss / len(loader)
|
||||
print(f'{model_name} Validation Loss: {avg_loss:.4f}, Accuracy: {acc:.4f}, AUC: {auc:.4f}')
|
||||
return avg_loss, acc, auc
|
||||
|
||||
|
||||
# 权重聚合函数
|
||||
def aggregate_weights(weights_list, alpha=1 / 3, beta=1 / 3, gamma=1 / 3):
|
||||
new_state_dict = copy.deepcopy(weights_list[0]) # 从模型a复制权重结构
|
||||
for key in new_state_dict.keys():
|
||||
new_state_dict[key] = (alpha * weights_list[0][key] +
|
||||
beta * weights_list[1][key] +
|
||||
gamma * weights_list[2][key])
|
||||
return new_state_dict
|
||||
|
||||
|
||||
def v3_update_model_weights(
|
||||
epoch,
|
||||
model_to_update,
|
||||
other_models,
|
||||
global_model,
|
||||
losses,
|
||||
val_loader,
|
||||
device,
|
||||
val_auc_threshold, # 当前需要更新模型的验证 AUC 阈值
|
||||
validate_model,
|
||||
criterion,
|
||||
update_frequency
|
||||
):
|
||||
"""
|
||||
根据给定的条件更新模型的权重。
|
||||
|
||||
参数:
|
||||
epoch (int): 当前训练轮次。
|
||||
model_to_update: 需要更新的模型。
|
||||
other_models (list): 其他模型列表,用于计算全局模型权重。
|
||||
global_model: 全局模型。
|
||||
losses (list): 各模型的损失值列表。
|
||||
val_loader: 验证数据的 DataLoader。
|
||||
device: 设备 ('cuda' 或 'cpu')。
|
||||
val_auc_threshold (float): 当前需要更新模型的验证 AUC。
|
||||
aggregate_weights (function): 权重聚合函数。
|
||||
validate_model (function): 验证模型的函数。
|
||||
update_frequency (int): 权重更新的频率。
|
||||
|
||||
返回:
|
||||
val_acc (float): 全局模型的验证精度。
|
||||
val_auc (float): 全局模型的验证 AUC。
|
||||
updated_val_auc_threshold (float): 更新后的验证 AUC。
|
||||
"""
|
||||
if (epoch + 1) % update_frequency == 0:
|
||||
# 获取所有模型的权重
|
||||
all_weights = [model.state_dict() for model in other_models]
|
||||
avg_weights = aggregate_weights(all_weights) # 聚合权重
|
||||
|
||||
# 更新全局模型权重
|
||||
global_model.load_state_dict(avg_weights)
|
||||
|
||||
# 计算加权平均损失
|
||||
weighted_loss = sum(loss * 0.33 for loss in losses)
|
||||
print(f"Weighted Average Loss: {weighted_loss:.4f}")
|
||||
|
||||
# 验证全局模型
|
||||
val_loss, val_acc, val_auc = validate_model(device, global_model, val_loader, criterion, epoch, 'global_model')
|
||||
print(f'global_model Validation Accuracy: {val_acc:.4f}, global_model Validation AUC: {val_auc:.4f}')
|
||||
|
||||
# 如果全局模型的 AUC 更高,则更新目标模型
|
||||
if val_auc > val_auc_threshold:
|
||||
print(f'Updating model at epoch {epoch + 1}')
|
||||
model_to_update.load_state_dict(global_model.state_dict())
|
||||
val_auc_threshold = val_auc # 更新 AUC 阈值
|
||||
|
||||
return val_acc, val_auc, val_auc_threshold
|
||||
return None, None, val_auc_threshold
|
||||
|
||||
|
||||
def update_model_weights(
|
||||
epoch,
|
||||
model_to_update,
|
||||
other_models,
|
||||
global_model,
|
||||
losses,
|
||||
val_loader,
|
||||
device,
|
||||
val_auc_threshold, # 当前需要更新模型的验证 AUC 阈值
|
||||
validate_model,
|
||||
criterion,
|
||||
update_frequency
|
||||
):
|
||||
"""
|
||||
根据给定的条件更新模型的权重。
|
||||
|
||||
参数:
|
||||
epoch (int): 当前训练轮次。
|
||||
model_to_update: 需要更新的模型。
|
||||
other_models (list): 其他模型列表,用于计算全局模型权重。
|
||||
global_model: 全局模型。
|
||||
losses (list): 各模型的损失值列表。
|
||||
val_loader: 验证数据的 DataLoader。
|
||||
device: 设备 ('cuda' 或 'cpu')。
|
||||
val_auc_threshold (float): 当前需要更新模型的验证 AUC。
|
||||
aggregate_weights (function): 权重聚合函数。
|
||||
validate_model (function): 验证模型的函数。
|
||||
update_frequency (int): 权重更新的频率。
|
||||
|
||||
返回:
|
||||
val_acc (float): 全局模型的验证精度。
|
||||
val_auc (float): 全局模型的验证 AUC。
|
||||
updated_val_auc_threshold (float): 更新后的验证 AUC。
|
||||
"""
|
||||
if (epoch + 1) % update_frequency == 0:
|
||||
# 获取所有模型的权重
|
||||
all_weights = [model.state_dict() for model in other_models]
|
||||
avg_weights = aggregate_weights(all_weights) # 聚合权重
|
||||
|
||||
# 更新全局模型权重
|
||||
global_model.load_state_dict(avg_weights)
|
||||
|
||||
# 计算加权平均损失
|
||||
weighted_loss = sum(loss * 0.33 for loss in losses)
|
||||
print(f"Weighted Average Loss: {weighted_loss:.4f}")
|
||||
|
||||
# 验证全局模型
|
||||
val_loss, val_acc, val_auc = validate_deepmodel(device, global_model, val_loader, criterion, epoch,
|
||||
'global_model')
|
||||
print(f'global_model Validation Accuracy: {val_acc:.4f}, global_model Validation AUC: {val_auc:.4f}')
|
||||
|
||||
# 如果全局模型的 AUC 更高,则更新目标模型
|
||||
if val_auc > val_auc_threshold:
|
||||
print(f'Updating model at epoch {epoch + 1}')
|
||||
model_to_update.load_state_dict(global_model.state_dict())
|
||||
val_auc_threshold = val_auc # 更新 AUC 阈值
|
||||
|
||||
return val_acc, val_auc, val_auc_threshold
|
||||
return None, None, val_auc_threshold
|
||||
|
||||
|
||||
def f_update_model_weights(
|
||||
epoch,
|
||||
model_to_update,
|
||||
other_models,
|
||||
global_model,
|
||||
losses,
|
||||
val_loader,
|
||||
device,
|
||||
val_auc_threshold, # 当前需要更新模型的验证 AUC 阈值
|
||||
aggregate_weights, # 权重聚合函数
|
||||
validate_model,
|
||||
criterion,
|
||||
update_frequency
|
||||
):
|
||||
"""
|
||||
根据给定的条件更新模型的权重。
|
||||
|
||||
参数:
|
||||
epoch (int): 当前训练轮次。
|
||||
model_to_update: 需要更新的模型。
|
||||
other_models (list): 其他模型列表,用于计算全局模型权重。
|
||||
global_model: 全局模型。
|
||||
losses (list): 各模型的损失值列表。
|
||||
val_loader: 验证数据的 DataLoader。
|
||||
device: 设备 ('cuda' 或 'cpu')。
|
||||
val_auc_threshold (float): 当前需要更新模型的验证 AUC 阈值。
|
||||
aggregate_weights (function): 权重聚合函数。
|
||||
validate_model (function): 验证模型的函数。
|
||||
criterion: 损失函数。
|
||||
update_frequency (int): 权重更新的频率。
|
||||
|
||||
返回:
|
||||
val_acc (float): 全局模型的验证精度。
|
||||
val_auc (float): 全局模型的验证 AUC。
|
||||
updated_val_auc_threshold (float): 更新后的验证 AUC 阈值。
|
||||
"""
|
||||
# 每隔指定的 epoch 更新一次模型权重
|
||||
if (epoch + 1) % update_frequency == 0:
|
||||
print(f"\n[Epoch {epoch + 1}] Updating global model weights...")
|
||||
|
||||
# 获取其他模型的权重
|
||||
all_weights = [model.state_dict() for model in other_models]
|
||||
|
||||
# 使用聚合函数计算全局权重
|
||||
avg_weights = aggregate_weights(all_weights)
|
||||
print("Global model weights aggregated.")
|
||||
|
||||
# 更新全局模型权重
|
||||
global_model.load_state_dict(avg_weights)
|
||||
|
||||
# 计算加权平均损失
|
||||
weighted_loss = sum(loss * (1 / len(losses)) for loss in losses) # 平均加权
|
||||
print(f"Weighted Average Loss: {weighted_loss:.4f}")
|
||||
|
||||
# 验证全局模型性能
|
||||
val_loss, val_acc, val_auc = validate_deepmodel(device, global_model, val_loader, criterion, epoch,
|
||||
'global_model')
|
||||
print(f"[Global Model] Validation Loss: {val_loss:.4f}, Accuracy: {val_acc:.4f}, AUC: {val_auc:.4f}")
|
||||
|
||||
# 如果全局模型 AUC 高于阈值,则更新目标模型权重
|
||||
if val_auc > val_auc_threshold:
|
||||
print(f"Global model AUC improved ({val_auc:.4f} > {val_auc_threshold:.4f}). Updating target model.")
|
||||
model_to_update.load_state_dict(global_model.state_dict())
|
||||
val_auc_threshold = val_auc # 更新 AUC 阈值
|
||||
else:
|
||||
print(
|
||||
f"Global model AUC did not improve ({val_auc:.4f} <= {val_auc_threshold:.4f}). No update to target model.")
|
||||
|
||||
return val_acc, val_auc, val_auc_threshold
|
||||
|
||||
# 如果未到达更新频率,返回当前的 AUC 阈值
|
||||
return None, None, val_auc_threshold
|
BIN
federated_learning/yolo11n.pt
Normal file
49
federated_learning/yolov8.yaml
Normal file
@@ -0,0 +1,49 @@
|
||||
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
||||
|
||||
# Ultralytics YOLOv8 object detection model with P3/8 - P5/32 outputs
|
||||
# Model docs: https://docs.ultralytics.com/models/yolov8
|
||||
# Task docs: https://docs.ultralytics.com/tasks/detect
|
||||
|
||||
# Parameters
|
||||
nc: 1 # number of classes
|
||||
scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
|
||||
# [depth, width, max_channels]
|
||||
n: [0.33, 0.25, 1024] # YOLOv8n summary: 129 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPS
|
||||
s: [0.33, 0.50, 1024] # YOLOv8s summary: 129 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPS
|
||||
m: [0.67, 0.75, 768] # YOLOv8m summary: 169 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPS
|
||||
l: [1.00, 1.00, 512] # YOLOv8l summary: 209 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPS
|
||||
x: [1.00, 1.25, 512] # YOLOv8x summary: 209 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPS
|
||||
|
||||
# YOLOv8.0n backbone
|
||||
backbone:
|
||||
# [from, repeats, module, args]
|
||||
- [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
|
||||
- [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
|
||||
- [-1, 3, C2f, [128, True]]
|
||||
- [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
|
||||
- [-1, 6, C2f, [256, True]]
|
||||
- [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
|
||||
- [-1, 6, C2f, [512, True]]
|
||||
- [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
|
||||
- [-1, 3, C2f, [1024, True]]
|
||||
- [-1, 1, SPPF, [1024, 5]] # 9
|
||||
|
||||
# YOLOv8.0n head
|
||||
head:
|
||||
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
|
||||
- [[-1, 6], 1, Concat, [1]] # cat backbone P4
|
||||
- [-1, 3, C2f, [512]] # 12
|
||||
|
||||
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
|
||||
- [[-1, 4], 1, Concat, [1]] # cat backbone P3
|
||||
- [-1, 3, C2f, [256]] # 15 (P3/8-small)
|
||||
|
||||
- [-1, 1, Conv, [256, 3, 2]]
|
||||
- [[-1, 12], 1, Concat, [1]] # cat head P4
|
||||
- [-1, 3, C2f, [512]] # 18 (P4/16-medium)
|
||||
|
||||
- [-1, 1, Conv, [512, 3, 2]]
|
||||
- [[-1, 9], 1, Concat, [1]] # cat head P5
|
||||
- [-1, 3, C2f, [1024]] # 21 (P5/32-large)
|
||||
|
||||
- [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
|
252
federated_learning/yolov8_fed.py
Normal file
@@ -0,0 +1,252 @@
|
||||
import glob
|
||||
import os
|
||||
from pathlib import Path
|
||||
import json
|
||||
from pydoc import cli
|
||||
from threading import local
|
||||
|
||||
import yaml
|
||||
from ultralytics import YOLO
|
||||
import copy
|
||||
import torch
|
||||
|
||||
|
||||
# ------------ 新增联邦学习工具函数 ------------
|
||||
def federated_avg(global_model, client_weights):
|
||||
"""联邦平均核心算法"""
|
||||
# 计算总样本数
|
||||
total_samples = sum(n for _, n in client_weights)
|
||||
if total_samples == 0:
|
||||
raise ValueError("Total number of samples must be positive.")
|
||||
|
||||
# DEBUG: global_dict
|
||||
# print(global_model)
|
||||
|
||||
# 获取YOLO底层PyTorch模型参数
|
||||
global_dict = global_model.model.state_dict()
|
||||
# 提取所有客户端的 state_dict 和对应样本数
|
||||
state_dicts, sample_counts = zip(*client_weights)
|
||||
|
||||
# 克隆参数并脱离计算图
|
||||
global_dict_copy = {
|
||||
k: v.clone().detach().requires_grad_(False) for k, v in global_dict.items()
|
||||
}
|
||||
|
||||
# 聚合可训练且存在的参数
|
||||
for key in global_dict_copy:
|
||||
# if global_dict_copy[key].dtype != torch.float32:
|
||||
# continue
|
||||
# if any(
|
||||
# x in key for x in ["running_mean", "running_var", "num_batches_tracked"]
|
||||
# ):
|
||||
# continue
|
||||
# 检查所有客户端是否包含当前键
|
||||
all_clients_have_key = all(key in sd for sd in state_dicts)
|
||||
if all_clients_have_key:
|
||||
# 计算每个客户端的加权张量
|
||||
# weighted_tensors = [
|
||||
# client_state[key].float() * (sample_count / total_samples)
|
||||
# for client_state, sample_count in zip(state_dicts, sample_counts)
|
||||
# ]
|
||||
weighted_tensors = []
|
||||
for client_state, sample_count in zip(state_dicts, sample_counts):
|
||||
weight = sample_count / total_samples # 计算权重
|
||||
weighted_tensor = client_state[key].float() * weight # 加权张量
|
||||
weighted_tensors.append(weighted_tensor)
|
||||
# 聚合加权张量并更新全局参数
|
||||
global_dict_copy[key] = torch.stack(weighted_tensors, dim=0).sum(dim=0)
|
||||
|
||||
# else:
|
||||
# print(f"错误: 键 {key} 在部分客户端缺失,已保留全局参数")
|
||||
# 终止训练或记录日志
|
||||
# raise KeyError(f"键 {key} 缺失")
|
||||
|
||||
# 加载回YOLO模型
|
||||
global_model.model.load_state_dict(global_dict_copy, strict=True)
|
||||
|
||||
# global_model.model.train()
|
||||
# with torch.no_grad():
|
||||
# global_model.model.load_state_dict(global_dict_copy, strict=True)
|
||||
|
||||
# 定义多个关键层
|
||||
MONITOR_KEYS = [
|
||||
"model.0.conv.weight",
|
||||
"model.1.conv.weight",
|
||||
"model.3.conv.weight",
|
||||
"model.5.conv.weight",
|
||||
"model.7.conv.weight",
|
||||
"model.9.cv1.conv.weight",
|
||||
"model.12.cv1.conv.weight",
|
||||
"model.15.cv1.conv.weight",
|
||||
"model.18.cv1.conv.weight",
|
||||
"model.21.cv1.conv.weight",
|
||||
"model.22.dfl.conv.weight",
|
||||
]
|
||||
|
||||
with open("aggregation_check.txt", "a") as f:
|
||||
f.write("\n=== 参数聚合检查 ===\n")
|
||||
for key in MONITOR_KEYS:
|
||||
# if key not in global_dict:
|
||||
# continue
|
||||
# if not all(key in sd for sd in state_dicts):
|
||||
# continue
|
||||
|
||||
# 计算聚合后均值
|
||||
aggregated_mean = global_dict[key].mean().item()
|
||||
|
||||
# 计算各客户端均值
|
||||
client_means = [sd[key].float().mean().item() for sd in state_dicts]
|
||||
with open("aggregation_check.txt", "a") as f:
|
||||
f.write(f"层 '{key}' 聚合后均值: {aggregated_mean:.6f}\n")
|
||||
f.write(f"各客户端该层均值差异: {[f'{cm:.6f}' for cm in client_means]}\n")
|
||||
f.write(f"客户端最大差异: {max(client_means) - min(client_means):.6f}\n\n")
|
||||
|
||||
return global_model
|
||||
|
||||
|
||||
# ------------ 修改训练流程 ------------
|
||||
def federated_train(num_rounds, clients_data):
|
||||
# ========== 初始化指标记录 ==========
|
||||
metrics = {
|
||||
"round": [],
|
||||
"val_mAP": [], # 每轮验证集mAP
|
||||
# "train_loss": [], # 每轮平均训练损失
|
||||
"client_mAPs": [], # 各客户端本地模型在验证集上的mAP
|
||||
"communication_cost": [], # 每轮通信开销(MB)
|
||||
}
|
||||
# 初始化全局模型
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
global_model = (
|
||||
YOLO("/home/image1325/DATA/Graduation-Project/federated_learning/yolov8n.yaml")
|
||||
.load("/home/image1325/DATA/Graduation-Project/federated_learning/yolov8n.pt")
|
||||
.to(device)
|
||||
)
|
||||
global_model.model.model[-1].nc = 1 # 设置检测类别数为1
|
||||
# global_model.model.train.ema.enabled = False
|
||||
|
||||
# 克隆全局模型
|
||||
local_model = copy.deepcopy(global_model)
|
||||
|
||||
for _ in range(num_rounds):
|
||||
client_weights = []
|
||||
# 各客户端的训练损失
|
||||
# client_losses = []
|
||||
|
||||
# DEBUG: 检查全局模型参数
|
||||
# global_dict = global_model.model.state_dict()
|
||||
# print(global_dict.keys())
|
||||
|
||||
# 每个客户端本地训练
|
||||
for data_path in clients_data:
|
||||
# 统计本地训练样本数
|
||||
with open(data_path, "r") as f:
|
||||
config = yaml.safe_load(f)
|
||||
# Resolve img_dir relative to the YAML file's location
|
||||
yaml_dir = os.path.dirname(data_path)
|
||||
img_dir = os.path.join(
|
||||
yaml_dir, config.get("train", data_path)
|
||||
) # 从配置文件中获取图像目录
|
||||
|
||||
# print(f"Image directory: {img_dir}")
|
||||
num_samples = (
|
||||
len(glob.glob(os.path.join(img_dir, "*.jpg")))
|
||||
+ len(glob.glob(os.path.join(img_dir, "*.png")))
|
||||
+ len(glob.glob(os.path.join(img_dir, "*.jpeg")))
|
||||
)
|
||||
# print(f"Number of images: {num_samples}")
|
||||
|
||||
local_model.model.load_state_dict(
|
||||
global_model.model.state_dict(), strict=True
|
||||
)
|
||||
|
||||
# 本地训练(保持你的原有参数设置)
|
||||
local_model.train(
|
||||
name=f"train{_ + 1}", # 当前轮次
|
||||
data=data_path,
|
||||
# model=local_model,
|
||||
epochs=16, # 每轮本地训练多少个epoch
|
||||
# save_period=16,
|
||||
imgsz=768, # 图像大小
|
||||
verbose=False, # 关闭冗余输出
|
||||
batch=-1, # 批大小
|
||||
workers=6, # 工作线程数
|
||||
)
|
||||
|
||||
# 记录客户端训练损失
|
||||
# client_loss = results.results_dict['train_loss']
|
||||
# client_losses.append(client_loss)
|
||||
|
||||
# 收集模型参数及样本数
|
||||
client_weights.append((local_model.model.state_dict(), num_samples))
|
||||
|
||||
# 聚合参数更新全局模型
|
||||
global_model = federated_avg(global_model, client_weights)
|
||||
|
||||
# DEBUG: 检查全局模型参数
|
||||
# keys = global_model.model.state_dict().keys()
|
||||
|
||||
# ========== 评估全局模型 ==========
|
||||
# 复制全局模型以避免在评估时修改参数
|
||||
val_model = copy.deepcopy(global_model)
|
||||
# 评估全局模型在验证集上的性能
|
||||
with torch.no_grad():
|
||||
val_results = val_model.val(
|
||||
data="/mnt/DATA/uav_dataset_old/UAVdataset/fed_data.yaml", # 指定验证集配置文件
|
||||
imgsz=768, # 图像大小
|
||||
batch=16, # 批大小
|
||||
verbose=False, # 关闭冗余输出
|
||||
)
|
||||
# 丢弃评估模型
|
||||
del val_model
|
||||
|
||||
# DEBUG: 检查全局模型参数
|
||||
# if keys != global_model.model.state_dict().keys():
|
||||
# print("模型参数不一致!")
|
||||
|
||||
val_mAP = val_results.box.map # 获取mAP@0.5
|
||||
|
||||
# 计算平均训练损失
|
||||
# avg_train_loss = sum(client_losses) / len(client_losses)
|
||||
|
||||
# 计算通信开销(假设传输全部模型参数)
|
||||
model_size = sum(p.numel() * 4 for p in global_model.model.parameters()) / (
|
||||
1024**2
|
||||
) # MB
|
||||
|
||||
# 记录到指标容器
|
||||
metrics["round"].append(_ + 1)
|
||||
metrics["val_mAP"].append(val_mAP)
|
||||
# metrics['train_loss'].append(avg_train_loss)
|
||||
metrics["communication_cost"].append(model_size)
|
||||
# 打印当前轮次结果
|
||||
with open("aggregation_check.txt", "a") as f:
|
||||
f.write(f"\n[Round {_ + 1}/{num_rounds}]\n")
|
||||
f.write(f"Validation mAP@0.5: {val_mAP:.4f}\n")
|
||||
# f.write(f"Average Train Loss: {avg_train_loss:.4f}")
|
||||
f.write(f"Communication Cost: {model_size:.2f} MB\n\n")
|
||||
|
||||
return global_model, metrics
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# 联邦训练配置
|
||||
clients_config = [
|
||||
"/mnt/DATA/uav_fed/train1/train1.yaml", # 客户端1数据路径
|
||||
"/mnt/DATA/uav_fed/train2/train2.yaml", # 客户端2数据路径
|
||||
]
|
||||
|
||||
# 使用本地数据集进行测试
|
||||
# clients_config = [
|
||||
# "/home/image1325/DATA/Graduation-Project/dataset/train1/train1.yaml",
|
||||
# "/home/image1325/DATA/Graduation-Project/dataset/train2/train2.yaml",
|
||||
# ]
|
||||
|
||||
# 运行联邦训练
|
||||
final_model, metrics = federated_train(num_rounds=10, clients_data=clients_config)
|
||||
|
||||
# 保存最终模型
|
||||
final_model.save("yolov8n_federated.pt")
|
||||
# final_model.export(format="onnx") # 导出为ONNX格式
|
||||
|
||||
with open("metrics.json", "w") as f:
|
||||
json.dump(metrics, f, indent=4)
|
@@ -1,61 +1,46 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# @Time :
|
||||
# @Author :
|
||||
# @File : Image_Registration_test.py
|
||||
|
||||
import time
|
||||
import argparse
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from ultralytics import YOLO
|
||||
from skimage.metrics import structural_similarity as ssim
|
||||
|
||||
# 添加YOLOv8模型初始化
|
||||
yolo_model = YOLO("yolov8n.pt") # 可替换为yolov8s/m/l等
|
||||
yolo_model.to('cuda') # 启用GPU加速(可选)
|
||||
yolo_model = YOLO("best.pt") # 可替换为yolov8s/m/l等
|
||||
yolo_model.to('cuda') # 启用GPU加速
|
||||
|
||||
|
||||
def sift_registration(img1, img2):
|
||||
img1gray = cv2.normalize(img1, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX).astype(np.uint8)
|
||||
img2gray = img2
|
||||
|
||||
sift = cv2.SIFT_create()
|
||||
# find the keypoints and descriptors with SIFT
|
||||
kp1, des1 = sift.detectAndCompute(img1gray, None)
|
||||
kp2, des2 = sift.detectAndCompute(img2gray, None)
|
||||
# FLANN parameters
|
||||
FLANN_INDEX_KDTREE = 1
|
||||
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
|
||||
search_params = dict(checks=50)
|
||||
flann = cv2.FlannBasedMatcher(index_params, search_params)
|
||||
matches = flann.knnMatch(des1, des2, k=2)
|
||||
|
||||
good = []
|
||||
pts1 = []
|
||||
pts2 = []
|
||||
|
||||
for i, (m, n) in enumerate(matches):
|
||||
if m.distance < 0.75 * n.distance:
|
||||
good.append(m)
|
||||
pts2.append(kp2[m.trainIdx].pt)
|
||||
pts1.append(kp1[m.queryIdx].pt)
|
||||
|
||||
MIN_MATCH_COUNT = 4
|
||||
if len(good) > MIN_MATCH_COUNT:
|
||||
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
|
||||
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
|
||||
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
|
||||
else:
|
||||
print("Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT))
|
||||
M = np.array([[1, 0, 0],
|
||||
[0, 1, 0],
|
||||
[0, 0, 1]], dtype=np.float64)
|
||||
if M is None:
|
||||
M = np.array([[1, 0, 0],
|
||||
[0, 1, 0],
|
||||
[0, 0, 1]], dtype=np.float64)
|
||||
return 1, M[0], len(pts2)
|
||||
def calculate_en(img):
|
||||
"""计算信息熵(处理灰度图)"""
|
||||
hist = cv2.calcHist([img], [0], None, [256], [0, 256])
|
||||
hist = hist / hist.sum()
|
||||
return -np.sum(hist * np.log2(hist + 1e-10))
|
||||
|
||||
|
||||
def calculate_sf(img):
|
||||
"""计算空间频率(处理灰度图)"""
|
||||
rf = np.sqrt(np.mean(np.square(np.diff(img, axis=0))))
|
||||
cf = np.sqrt(np.mean(np.square(np.diff(img, axis=1))))
|
||||
return np.sqrt(rf ** 2 + cf ** 2)
|
||||
|
||||
|
||||
def calculate_mi(img1, img2):
|
||||
"""计算互信息(处理灰度图)"""
|
||||
hist_2d = np.histogram2d(img1.ravel(), img2.ravel(), 256)[0]
|
||||
pxy = hist_2d / hist_2d.sum()
|
||||
px = np.sum(pxy, axis=1)
|
||||
py = np.sum(pxy, axis=0)
|
||||
return np.sum(pxy * np.log2(pxy / (px[:, None] * py[None, :] + 1e-10) + 1e-10))
|
||||
|
||||
|
||||
def calculate_ssim(img1, img2):
|
||||
"""计算SSIM(处理灰度图)"""
|
||||
return ssim(img1, img2, data_range=255)
|
||||
|
||||
|
||||
# 裁剪线性RGB对比度拉伸:(去掉2%百分位以下的数,去掉98%百分位以上的数,上下百分位数一般相同,并设置输出上下限)
|
||||
@@ -93,6 +78,10 @@ def Images_matching(img_base, img_target):
|
||||
"""
|
||||
start = time.time()
|
||||
orb = cv2.ORB_create()
|
||||
|
||||
# 对可见光图像进行对比度拉伸
|
||||
# img_base = truncated_linear_stretch(img_base)
|
||||
|
||||
img_base = cv2.cvtColor(img_base, cv2.COLOR_BGR2GRAY)
|
||||
sift = cv2.SIFT_create()
|
||||
# 使用sift算子计算特征点和特征点周围的特征向量
|
||||
@@ -100,7 +89,9 @@ def Images_matching(img_base, img_target):
|
||||
kp1, des1 = sift.detectAndCompute(img_base, None) # 1136 1136, 64
|
||||
kp2, des2 = sift.detectAndCompute(img_target, None)
|
||||
en1 = time.time()
|
||||
|
||||
# print(en1 - st1, "特征提取")
|
||||
|
||||
# 进行KNN特征匹配
|
||||
# FLANN_INDEX_KDTREE = 0 # 建立FLANN匹配器的参数
|
||||
# indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) # 配置索引,密度树的数量为5
|
||||
@@ -112,6 +103,7 @@ def Images_matching(img_base, img_target):
|
||||
# search_params = dict(checks=50)
|
||||
# flann = cv2.FlannBasedMatcher(index_params, search_params)
|
||||
# matches = flann.knnMatch(des1, des2, k=2)
|
||||
|
||||
st2 = time.time()
|
||||
matcher = cv2.BFMatcher()
|
||||
matches = matcher.knnMatch(des1, des2, k=2)
|
||||
@@ -125,9 +117,10 @@ def Images_matching(img_base, img_target):
|
||||
src_pts = np.array([kp1[m.queryIdx].pt for m in good]) # 查询图像的特征描述子索引 # 134, 2
|
||||
dst_pts = np.array([kp2[m.trainIdx].pt for m in good]) # 训练(模板)图像的特征描述子索引
|
||||
if len(src_pts) <= 4:
|
||||
print("Not enough matches are found - {}/{}".format(len(good), 4))
|
||||
return 0, None, 0
|
||||
else:
|
||||
# print(len(dst_pts), len(src_pts), "配准坐标点")
|
||||
print(len(dst_pts), len(src_pts), "配准坐标点")
|
||||
H = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 4) # 生成变换矩阵 H[0]: 3, 3 H[1]: 134, 1
|
||||
end = time.time()
|
||||
times = end - start
|
||||
@@ -181,61 +174,181 @@ def main(matchimg_vi, matchimg_in):
|
||||
orimg_vi = matchimg_vi
|
||||
orimg_in = matchimg_in
|
||||
h, w = orimg_vi.shape[:2] # 480 640
|
||||
flag, H, dot = Images_matching(matchimg_vi, matchimg_in) # (3, 3)//获取对应的配准坐标点
|
||||
# (3, 3)//获取对应的配准坐标点
|
||||
flag, H, dot = Images_matching(matchimg_vi, matchimg_in)
|
||||
if flag == 0:
|
||||
return 0, None, 0
|
||||
return 0, None, 0, 0.0, 0.0, 0.0, 0.0
|
||||
else:
|
||||
# 配准处理
|
||||
matched_ni = cv2.warpPerspective(orimg_in, H, (w, h))
|
||||
# matched_ni,left,right,top,bottom=removeBlackBorder(matched_ni)
|
||||
matched_ni, left, right, top, bottom = removeBlackBorder(matched_ni)
|
||||
|
||||
# 裁剪可见光图像
|
||||
# fusion = fusions(orimg_vi[left:right, top:bottom], matched_ni)
|
||||
|
||||
# 不裁剪可见光图像
|
||||
fusion = fusions(orimg_vi, matched_ni)
|
||||
|
||||
# 转换为灰度计算指标
|
||||
fusion_gray = cv2.cvtColor(fusion, cv2.COLOR_RGB2GRAY)
|
||||
cropped_vi_gray = cv2.cvtColor(orimg_vi, cv2.COLOR_BGR2GRAY)
|
||||
matched_ni_gray = matched_ni # 红外图已经是灰度
|
||||
|
||||
# 计算指标
|
||||
en = calculate_en(fusion_gray)
|
||||
sf = calculate_sf(fusion_gray)
|
||||
mi_visible = calculate_mi(fusion_gray, cropped_vi_gray)
|
||||
mi_infrared = calculate_mi(fusion_gray, matched_ni_gray)
|
||||
mi_total = mi_visible + mi_infrared
|
||||
|
||||
# 添加SSIM容错处理
|
||||
try:
|
||||
ssim_visible = calculate_ssim(fusion_gray, cropped_vi_gray)
|
||||
ssim_infrared = calculate_ssim(fusion_gray, matched_ni_gray)
|
||||
ssim_avg = (ssim_visible + ssim_infrared) / 2
|
||||
except Exception as ssim_error:
|
||||
print(f"SSIM计算错误: {ssim_error}")
|
||||
ssim_avg = -1 # 用-1表示计算失败
|
||||
|
||||
# YOLOv8目标检测
|
||||
results = yolo_model(fusion) # 输入融合后的图像
|
||||
annotated_image = results[0].plot() # 绘制检测框
|
||||
|
||||
return 1, annotated_image, dot # 返回带检测结果的图像
|
||||
# 返回带检测结果的图像
|
||||
return 1, annotated_image, dot, en, sf, mi_total, ssim_avg
|
||||
except Exception as e:
|
||||
print(f"Error in fusion/detection: {e}")
|
||||
return 0, None, 0
|
||||
return 0, None, 0, 0.0, 0.0, 0.0, 0.0
|
||||
|
||||
|
||||
def parse_args():
|
||||
# 输入可见光和红外图像路径
|
||||
visible_image_path = "./test/visible/visibleI0195.jpg" # 可见光图片路径
|
||||
infrared_image_path = "./test/infrared/infraredI0195.jpg" # 红外图片路径
|
||||
# 输入可见光和红外视频路径
|
||||
visible_video_path = "./test/visible.mp4" # 可见光视频路径
|
||||
infrared_video_path = "./test/infrared.mp4" # 红外视频路径
|
||||
|
||||
"""解析命令行参数"""
|
||||
parser = argparse.ArgumentParser(description='图像融合与目标检测')
|
||||
|
||||
parser.add_argument('--mode', type=str, choices=['video', 'image'], default='image',
|
||||
help='输入模式:video(视频流) 或 image(静态图片)')
|
||||
|
||||
# 区分摄像头或视频文件
|
||||
parser.add_argument('--source', type=str, choices=['camera', 'file'],
|
||||
help='视频输入类型:camera(摄像头)或 file(视频文件)')
|
||||
|
||||
# 视频模式参数
|
||||
parser.add_argument('--video1', type=str, default=visible_video_path,
|
||||
help='可见光视频路径(仅在source=file时需要)')
|
||||
parser.add_argument('--video2', type=str, default=infrared_video_path,
|
||||
help='红外视频路径(仅在source=file时需要)')
|
||||
|
||||
# 摄像头模式参数
|
||||
parser.add_argument('--camera_id1', type=int, default=0,
|
||||
help='可见光摄像头ID(仅在source=camera时需要,默认0)')
|
||||
parser.add_argument('--camera_id2', type=int, default=1,
|
||||
help='红外摄像头ID(仅在source=camera时需要,默认1)')
|
||||
parser.add_argument('--output', type=str, default='output.mp4',
|
||||
help='输出视频路径(仅在video模式需要)')
|
||||
|
||||
# 图片模式参数
|
||||
parser.add_argument('--visible', type=str, default=visible_image_path,
|
||||
help='可见光图片路径(仅在image模式需要)')
|
||||
parser.add_argument('--infrared', type=str, default=infrared_image_path,
|
||||
help='红外图片路径(仅在image模式需要)')
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
time_all = 0
|
||||
dots = 0
|
||||
i = 0
|
||||
fourcc = cv2.VideoWriter_fourcc(*'XVID')
|
||||
capture = cv2.VideoCapture("video/20190926_141816_1_8/20190926_141816_1_8/infrared.mp4")
|
||||
capture2 = cv2.VideoCapture("video/20190926_141816_1_8/20190926_141816_1_8/visible.mp4")
|
||||
fps = capture.get(cv2.CAP_PROP_FPS)
|
||||
out = cv2.VideoWriter('output2.mp4', fourcc, fps, (640, 480))
|
||||
# 持续读取摄像头数据
|
||||
while True:
|
||||
read_code, frame = capture.read() # 红外帧
|
||||
read_code2, frame2 = capture2.read() # 可见光帧
|
||||
if not read_code:
|
||||
break
|
||||
i += 1
|
||||
# frame = cv2.resize(frame, (1920, 1080))
|
||||
# frame2 = cv2.resize(frame2, (640, 512))
|
||||
args = parse_args()
|
||||
|
||||
if args.mode == 'video':
|
||||
if args.source == 'file':
|
||||
# ========== 视频流处理模式 ==========
|
||||
if not args.video1 or not args.video2:
|
||||
raise ValueError("视频模式需要指定 --video1 和 --video2 参数")
|
||||
capture = cv2.VideoCapture(args.video2)
|
||||
capture2 = cv2.VideoCapture(args.video1)
|
||||
elif args.source == 'camera':
|
||||
# ========== 摄像头处理模式 ==========
|
||||
capture = cv2.VideoCapture(args.camera_id1)
|
||||
capture2 = cv2.VideoCapture(args.camera_id2)
|
||||
else:
|
||||
raise ValueError("必须指定 --source 参数(camera 或 file)")
|
||||
|
||||
# 公共视频处理逻辑
|
||||
fps = capture.get(cv2.CAP_PROP_FPS) if args.source == 'file' else 30
|
||||
fourcc = cv2.VideoWriter_fourcc(*'XVID')
|
||||
out = cv2.VideoWriter(args.output, fourcc, fps, (640, 480))
|
||||
|
||||
while True:
|
||||
ret1, frame_vi = capture.read() # 可见光帧
|
||||
ret2, frame_ir = capture2.read() # 红外帧
|
||||
if not ret1 or not ret2:
|
||||
break
|
||||
|
||||
# 红外图像转灰度
|
||||
frame_ir_gray = cv2.cvtColor(frame_ir, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
# 执行融合与检测
|
||||
flag, fusion, _ = main(frame_vi, frame_ir_gray)
|
||||
|
||||
if flag == 1:
|
||||
cv2.imshow("Fusion with YOLOv8 Detection", fusion)
|
||||
out.write(fusion)
|
||||
|
||||
if cv2.waitKey(1) == ord('q'):
|
||||
break
|
||||
|
||||
# 释放资源
|
||||
capture.release()
|
||||
capture2.release()
|
||||
out.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
elif args.mode == 'image':
|
||||
# ========= 图片处理模式 ==========
|
||||
if not args.infrared or not args.visible:
|
||||
raise ValueError("图片模式需要指定 --visible 和 --infrared 参数")
|
||||
|
||||
# 读取图像
|
||||
img_visible = cv2.imread(args.visible)
|
||||
img_infrared = cv2.imread(args.infrared)
|
||||
|
||||
if img_visible is None or img_infrared is None:
|
||||
print("Error: 图片加载失败,请检查路径!")
|
||||
exit()
|
||||
|
||||
# 转换为灰度图(红外图像处理)
|
||||
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
img_inf_gray = cv2.cvtColor(img_infrared, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
# 调用main函数进行融合和检测
|
||||
flag, fusion, dot = main(frame2, frame_gray)
|
||||
# 执行融合与检测
|
||||
flag, fusion_result, dot, en, sf, mi, ssim_val = main(img_visible, img_inf_gray)
|
||||
|
||||
if flag == 1:
|
||||
# 显示带检测结果的融合图像
|
||||
cv2.imshow("Fusion with YOLOv8 Detection", fusion)
|
||||
out.write(fusion)
|
||||
|
||||
if cv2.waitKey(1) == ord('q'):
|
||||
break
|
||||
# 释放资源
|
||||
capture.release()
|
||||
capture2.release()
|
||||
cv2.destroyAllWindows()
|
||||
ave = time_all / i
|
||||
print(ave, "平均时间")
|
||||
cv2.destroyAllWindows()
|
||||
# 展示评价指标
|
||||
print("\n======== 融合质量评价 ========")
|
||||
print(f"信息熵(EN): {en:.2f}")
|
||||
print(f"空间频率(SF): {sf:.2f}")
|
||||
print(f"互信息(MI): {mi:.2f}")
|
||||
|
||||
# 条件显示SSIM
|
||||
if ssim_val >= 0:
|
||||
print(f"结构相似性(SSIM): {ssim_val:.4f}")
|
||||
else:
|
||||
print("结构相似性(SSIM): 计算失败(已跳过)")
|
||||
|
||||
print(f"配准点数: {dot}")
|
||||
# 显示并保存结果
|
||||
# cv2.imshow("Fusion with Detection", fusion_result)
|
||||
cv2.imwrite("output/fusion_result.jpg", fusion_result)
|
||||
# cv2.waitKey(0)
|
||||
# cv2.destroyAllWindows()
|
||||
else:
|
||||
print("融合失败!")
|
||||
|
BIN
image_fusion/output/fusion_result.jpg
Normal file
After Width: | Height: | Size: 152 KiB |
BIN
image_fusion/test/infrared.jpg
Normal file
After Width: | Height: | Size: 28 KiB |
BIN
image_fusion/test/visible.jpg
Normal file
After Width: | Height: | Size: 67 KiB |
41
requirements.txt
Normal file
@@ -0,0 +1,41 @@
|
||||
certifi==2025.1.31
|
||||
charset-normalizer==3.4.1
|
||||
colorama==0.4.6
|
||||
contourpy==1.3.2
|
||||
cycler==0.12.1
|
||||
filelock==3.18.0
|
||||
fonttools==4.57.0
|
||||
fsspec==2025.3.2
|
||||
idna==3.10
|
||||
Jinja2==3.1.6
|
||||
kiwisolver==1.4.8
|
||||
MarkupSafe==3.0.2
|
||||
matplotlib==3.10.1
|
||||
mpmath==1.3.0
|
||||
networkx==3.4.2
|
||||
numpy==2.1.1
|
||||
opencv-python==4.11.0.86
|
||||
packaging==24.2
|
||||
pandas==2.2.3
|
||||
pillow==11.2.1
|
||||
psutil==7.0.0
|
||||
py-cpuinfo==9.0.0
|
||||
pyparsing==3.2.3
|
||||
python-dateutil==2.9.0.post0
|
||||
pytz==2025.2
|
||||
PyYAML==6.0.2
|
||||
requests==2.32.3
|
||||
scipy==1.15.2
|
||||
seaborn==0.13.2
|
||||
setuptools==78.1.0
|
||||
six==1.17.0
|
||||
sympy==1.13.1
|
||||
torch==2.6.0+cu124
|
||||
torchaudio==2.6.0+cu124
|
||||
torchvision==0.21.0+cu124
|
||||
tqdm==4.67.1
|
||||
typing_extensions==4.13.2
|
||||
tzdata==2025.2
|
||||
ultralytics==8.3.111
|
||||
ultralytics-thop==2.0.14
|
||||
urllib3==2.4.0
|
6
yolov8/yolov8.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
train: /mnt/DATA/dataset/uav_dataset/train/images/
|
||||
val: /mnt/DATA/dataset/uav_dataset/val/images/
|
||||
test: /mnt/DATA/dataset/test2/images/
|
||||
# number of classes
|
||||
nc: 1
|
||||
names: ['uav']
|
13
yolov8/yolov8_train.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from ultralytics import YOLO
|
||||
|
||||
# 加载预训练模型
|
||||
model = YOLO('../yolov8n.pt')
|
||||
|
||||
# 开始训练
|
||||
model.train(
|
||||
data='./yolov8.yaml', # 数据配置文件路径
|
||||
epochs=320, # 训练轮数
|
||||
batch=-1, # 批量大小
|
||||
imgsz=640, # 输入图片大小
|
||||
device=0 # 使用的设备(0 表示 GPU,'cpu' 表示 CPU)
|
||||
)
|