联邦学习模块
This commit is contained in:
		
							
								
								
									
										0
									
								
								federated_learning/utils/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								federated_learning/utils/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
								
								
									
										217
									
								
								federated_learning/utils/data_utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										217
									
								
								federated_learning/utils/data_utils.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,217 @@
 | 
			
		||||
import os
 | 
			
		||||
from PIL import Image
 | 
			
		||||
import torch
 | 
			
		||||
from torchvision import transforms
 | 
			
		||||
from torch.utils.data import DataLoader, Dataset, random_split
 | 
			
		||||
from collections import Counter
 | 
			
		||||
from torch.utils.data import DataLoader, Subset
 | 
			
		||||
from torchvision import transforms, datasets
 | 
			
		||||
import os
 | 
			
		||||
from sklearn.model_selection import train_test_split
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class CustomImageDataset(Dataset):
 | 
			
		||||
    def __init__(self, root_dir, transform=None):
 | 
			
		||||
        self.root_dir = root_dir
 | 
			
		||||
        self.transform = transform
 | 
			
		||||
        self.image_paths = []
 | 
			
		||||
        self.labels = []
 | 
			
		||||
        
 | 
			
		||||
        # 遍历 root_dir 下的子文件夹 0 和 1
 | 
			
		||||
        for label in [0, 1]:
 | 
			
		||||
            folder_path = os.path.join(root_dir, str(label))
 | 
			
		||||
            if os.path.isdir(folder_path):
 | 
			
		||||
                for img_name in os.listdir(folder_path):
 | 
			
		||||
                    img_path = os.path.join(folder_path, img_name)
 | 
			
		||||
                    self.image_paths.append(img_path)
 | 
			
		||||
                    self.labels.append(label)
 | 
			
		||||
        
 | 
			
		||||
        # 打印用于调试的图像路径和标签
 | 
			
		||||
        # print("Loaded image paths and labels:")
 | 
			
		||||
        # for path, label in zip(self.image_paths[:5], self.labels[:5]):
 | 
			
		||||
        #     print(f"Path: {path}, Label: {label}")
 | 
			
		||||
        # print(f"Total samples: {len(self.image_paths)}\n")
 | 
			
		||||
    
 | 
			
		||||
    def __len__(self):
 | 
			
		||||
        return len(self.image_paths)
 | 
			
		||||
    
 | 
			
		||||
    def __getitem__(self, idx):
 | 
			
		||||
        img_path = self.image_paths[idx]
 | 
			
		||||
        label = self.labels[idx]
 | 
			
		||||
        image = Image.open(img_path).convert("RGB")
 | 
			
		||||
        
 | 
			
		||||
        if self.transform:
 | 
			
		||||
            image = self.transform(image)
 | 
			
		||||
        
 | 
			
		||||
        return image, label
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_test_data(test_image_path, batch_size, nw):
 | 
			
		||||
    data_transform = transforms.Compose([
 | 
			
		||||
        transforms.Resize((256, 256)),
 | 
			
		||||
        transforms.ToTensor(),
 | 
			
		||||
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
 | 
			
		||||
    ])
 | 
			
		||||
    
 | 
			
		||||
    # test_dataset = datasets.ImageFolder(root=test_image_path, transform=data_transform)
 | 
			
		||||
    
 | 
			
		||||
    test_dataset = CustomImageDataset(root_dir=test_image_path, transform=data_transform)
 | 
			
		||||
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=nw)
 | 
			
		||||
    return test_loader
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_Onedata(train_image_path, val_image_path, batch_size, num_workers):
 | 
			
		||||
    """
 | 
			
		||||
    加载完整的训练数据集和验证数据集。
 | 
			
		||||
    """
 | 
			
		||||
    data_transform = {
 | 
			
		||||
        "train": transforms.Compose([
 | 
			
		||||
            transforms.Resize((256, 256)),
 | 
			
		||||
            transforms.RandomHorizontalFlip(),
 | 
			
		||||
            transforms.ToTensor(),
 | 
			
		||||
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
 | 
			
		||||
        ]),
 | 
			
		||||
        "val": transforms.Compose([
 | 
			
		||||
            transforms.Resize((256, 256)),
 | 
			
		||||
            transforms.ToTensor(),
 | 
			
		||||
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
 | 
			
		||||
        ]),
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    # 创建训练和验证数据集
 | 
			
		||||
    train_dataset = CustomImageDataset(root_dir=train_image_path, transform=data_transform["train"])
 | 
			
		||||
    val_dataset = CustomImageDataset(root_dir=val_image_path, transform=data_transform["val"])
 | 
			
		||||
    
 | 
			
		||||
    # 创建数据加载器
 | 
			
		||||
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
 | 
			
		||||
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
 | 
			
		||||
    
 | 
			
		||||
    return train_loader, val_loader
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_data(train_image_path, val_image_path, batch_size, num_workers):
 | 
			
		||||
    data_transform = {
 | 
			
		||||
        "train": transforms.Compose([
 | 
			
		||||
            transforms.Resize((256, 256)),
 | 
			
		||||
            transforms.RandomHorizontalFlip(),
 | 
			
		||||
            transforms.ToTensor(),
 | 
			
		||||
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
 | 
			
		||||
        ]),
 | 
			
		||||
        "val": transforms.Compose([
 | 
			
		||||
            transforms.Resize((256, 256)),
 | 
			
		||||
            transforms.ToTensor(),
 | 
			
		||||
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
 | 
			
		||||
        ]),
 | 
			
		||||
        "test": transforms.Compose([
 | 
			
		||||
            transforms.Resize((256, 256)),
 | 
			
		||||
            transforms.ToTensor(),
 | 
			
		||||
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
 | 
			
		||||
        ]),
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    train_dataset = CustomImageDataset(root_dir=train_image_path, transform=data_transform["train"])
 | 
			
		||||
    val_dataset = CustomImageDataset(root_dir=val_image_path, transform=data_transform["val"])
 | 
			
		||||
    
 | 
			
		||||
    # 切分数据集为三个等分
 | 
			
		||||
    train_len = (len(train_dataset) // 3) * 3
 | 
			
		||||
    train_dataset_truncated = torch.utils.data.Subset(train_dataset, range(train_len))
 | 
			
		||||
    subset_len = train_len // 3
 | 
			
		||||
    dataset1, dataset2, dataset3 = random_split(train_dataset_truncated, [subset_len] * 3)
 | 
			
		||||
    
 | 
			
		||||
    loader1 = DataLoader(dataset1, batch_size=batch_size, shuffle=True, num_workers=num_workers)
 | 
			
		||||
    loader2 = DataLoader(dataset2, batch_size=batch_size, shuffle=True, num_workers=num_workers)
 | 
			
		||||
    loader3 = DataLoader(dataset3, batch_size=batch_size, shuffle=True, num_workers=num_workers)
 | 
			
		||||
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
 | 
			
		||||
    
 | 
			
		||||
    return loader1, loader2, loader3, subset_len, val_loader
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_Fourdata(train_path, val_path, batch_size, num_workers):
 | 
			
		||||
    """
 | 
			
		||||
    加载训练集和验证集。
 | 
			
		||||
    包括 4 个客户端验证集(df、f2f、fs、nt)和 1 个全局验证集。
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        train_path (str): 训练数据路径
 | 
			
		||||
        val_path (str): 验证数据路径
 | 
			
		||||
        batch_size (int): 批量大小
 | 
			
		||||
        num_workers (int): DataLoader 的工作线程数
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
        tuple: 包含 4 个客户端训练和验证加载器,以及全局验证加载器
 | 
			
		||||
    """
 | 
			
		||||
    # 数据预处理
 | 
			
		||||
    train_transform = transforms.Compose([
 | 
			
		||||
        transforms.Resize((256, 256)),
 | 
			
		||||
        transforms.RandomHorizontalFlip(),
 | 
			
		||||
        transforms.ToTensor(),
 | 
			
		||||
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
 | 
			
		||||
    ])
 | 
			
		||||
    
 | 
			
		||||
    val_transform = transforms.Compose([
 | 
			
		||||
        transforms.Resize((256, 256)),
 | 
			
		||||
        transforms.ToTensor(),
 | 
			
		||||
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
 | 
			
		||||
    ])
 | 
			
		||||
    
 | 
			
		||||
    # 定义 4 个客户端数据集路径
 | 
			
		||||
    client_names = ['df', 'f2f', 'fs', 'nt']
 | 
			
		||||
    client_train_loaders = []
 | 
			
		||||
    client_val_loaders = []
 | 
			
		||||
    
 | 
			
		||||
    for client in client_names:
 | 
			
		||||
        client_train_path = os.path.join(train_path, client)
 | 
			
		||||
        client_val_path = os.path.join(val_path, client)
 | 
			
		||||
        
 | 
			
		||||
        # 加载客户端训练数据
 | 
			
		||||
        train_dataset = datasets.ImageFolder(root=client_train_path, transform=train_transform)
 | 
			
		||||
        train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
 | 
			
		||||
        
 | 
			
		||||
        # 加载客户端验证数据
 | 
			
		||||
        val_dataset = datasets.ImageFolder(root=client_val_path, transform=val_transform)
 | 
			
		||||
        val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
 | 
			
		||||
        
 | 
			
		||||
        client_train_loaders.append(train_loader)
 | 
			
		||||
        client_val_loaders.append(val_loader)
 | 
			
		||||
    
 | 
			
		||||
    # 全局验证集
 | 
			
		||||
    global_val_dataset = datasets.ImageFolder(root=val_path, transform=val_transform)
 | 
			
		||||
    global_val_loader = DataLoader(global_val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
 | 
			
		||||
    
 | 
			
		||||
    return (*client_train_loaders, *client_val_loaders, global_val_loader)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    # 设置参数
 | 
			
		||||
    train_image_path = "/media/terminator/实验&代码/yhs/FF++_mask/c23/f2f/train"
 | 
			
		||||
    val_image_path = "/media/terminator/实验&代码/yhs/FF++_mask/c23/f2f/val"
 | 
			
		||||
    batch_size = 4
 | 
			
		||||
    num_workers = 2
 | 
			
		||||
    
 | 
			
		||||
    # 获取数据加载器
 | 
			
		||||
    loader1, loader2, loader3, subset_len, val_loader = get_data(train_image_path, val_image_path, batch_size,
 | 
			
		||||
                                                                 num_workers)
 | 
			
		||||
    
 | 
			
		||||
    # 统计标签数量和类型
 | 
			
		||||
    train_labels = []
 | 
			
		||||
    for dataset in [loader1, loader2, loader3]:
 | 
			
		||||
        for _, labels in dataset:
 | 
			
		||||
            train_labels.extend(labels.tolist())
 | 
			
		||||
    
 | 
			
		||||
    val_labels = []
 | 
			
		||||
    for _, labels in val_loader:
 | 
			
		||||
        val_labels.extend(labels.tolist())
 | 
			
		||||
    
 | 
			
		||||
    # 使用 Counter 统计标签数量
 | 
			
		||||
    train_label_counts = Counter(train_labels)
 | 
			
		||||
    val_label_counts = Counter(val_labels)
 | 
			
		||||
    
 | 
			
		||||
    # 打印统计结果
 | 
			
		||||
    print("Training Dataset - Label Counts:", train_label_counts)
 | 
			
		||||
    print("Validation Dataset - Label Counts:", val_label_counts)
 | 
			
		||||
    print("Label Types in Training:", set(train_labels))
 | 
			
		||||
    print("Label Types in Validation:", set(val_labels))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    main()
 | 
			
		||||
							
								
								
									
										57
									
								
								federated_learning/utils/model_utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										57
									
								
								federated_learning/utils/model_utils.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,57 @@
 | 
			
		||||
import torch
 | 
			
		||||
from torch import nn
 | 
			
		||||
from torchvision import models
 | 
			
		||||
 | 
			
		||||
from Deeplab.deeplab import DeepLab_F
 | 
			
		||||
from Deeplab.resnet_psa import BasicBlockWithPSA
 | 
			
		||||
from Deeplab.resnet_psa_v2 import ResNet
 | 
			
		||||
from model_base.efNet_base_model import DeepLab
 | 
			
		||||
from model_base.efficientnet import EfficientNet
 | 
			
		||||
from model_base.resnet_more import CustomResNet
 | 
			
		||||
from model_base.xcption import Xception
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_model(name, number_class, device, backbone):
 | 
			
		||||
    """
 | 
			
		||||
    根据指定的模型名称加载模型,并根据任务类别数调整最后的分类层。
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        name (str): 模型名称 ('Vgg', 'ResNet', 'EfficientNet', 'Xception')。
 | 
			
		||||
        number_class (int): 分类类别数。
 | 
			
		||||
        device (torch.device): 设备 ('cuda' or 'cpu')。
 | 
			
		||||
        resnet_type (str): ResNet类型 ('resnet18', 'resnet34', 'resnet50', 'resnet101', etc.)。
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
        nn.Module: 经过修改的模型。
 | 
			
		||||
    """
 | 
			
		||||
    if name == 'Vgg':
 | 
			
		||||
        model = models.vgg16_bn(pretrained=True).to(device)
 | 
			
		||||
        model.classifier[6] = nn.Linear(model.classifier[6].in_features, number_class)
 | 
			
		||||
    elif name == 'ResNet18':
 | 
			
		||||
        model = CustomResNet(resnet_type='resnet18', num_classes=number_class, pretrained=True).to(device)
 | 
			
		||||
    elif name == 'ResNet34':
 | 
			
		||||
        model = CustomResNet(resnet_type='resnet34', num_classes=number_class, pretrained=True).to(device)
 | 
			
		||||
    elif name == 'ResNet50':
 | 
			
		||||
        model = CustomResNet(resnet_type='resnet50', num_classes=number_class, pretrained=True).to(device)
 | 
			
		||||
    elif name == 'ResNet101':
 | 
			
		||||
        model = CustomResNet(resnet_type='resnet101', num_classes=number_class, pretrained=True).to(device)
 | 
			
		||||
    elif name == 'ResNet152':
 | 
			
		||||
        model = CustomResNet(resnet_type='resnet152', num_classes=number_class, pretrained=True).to(device)
 | 
			
		||||
    elif name == 'EfficientNet':
 | 
			
		||||
        # 使用自定义的 DeepLab 类加载 EfficientNet
 | 
			
		||||
        model = DeepLab(backbone='efficientnet', num_classes=number_class).to(device)
 | 
			
		||||
    elif name == 'Xception':
 | 
			
		||||
        model = Xception(
 | 
			
		||||
            in_planes=3,
 | 
			
		||||
            num_classes=number_class,
 | 
			
		||||
            pretrained=True,
 | 
			
		||||
            pretrained_path="/home/terminator/1325/yhs/fedLeaning/pre_model/xception-43020ad28.pth"
 | 
			
		||||
        ).to(device)
 | 
			
		||||
    elif name == 'DeepLab':
 | 
			
		||||
        # 使用自定义的 DeepLab 类加载 EfficientNet
 | 
			
		||||
        model = DeepLab_F(num_classes=1, backbone=backbone).to(device)
 | 
			
		||||
    elif name == 'resnet18_psa':
 | 
			
		||||
        model = ResNet(BasicBlockWithPSA, [2, 2, 2, 2], number_class)
 | 
			
		||||
    else:
 | 
			
		||||
        raise ValueError(f"Model {name} is not supported.")
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										368
									
								
								federated_learning/utils/train_utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										368
									
								
								federated_learning/utils/train_utils.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,368 @@
 | 
			
		||||
import numpy as np
 | 
			
		||||
import torch
 | 
			
		||||
from tqdm import tqdm
 | 
			
		||||
from sklearn.metrics import roc_auc_score, accuracy_score
 | 
			
		||||
import copy
 | 
			
		||||
import torch.nn.functional as F
 | 
			
		||||
import random
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def train_deepmodel(device, model, loader, optimizer, criterion, epoch, model_name):
 | 
			
		||||
    model.train()
 | 
			
		||||
    running_loss = 0.0
 | 
			
		||||
    corrects = 0.0
 | 
			
		||||
    alpha = 1
 | 
			
		||||
    beta = 0.1
 | 
			
		||||
    for inputs, labels in tqdm(loader, desc=f'Training {model_name} Epoch {epoch + 1}', unit='batch'):
 | 
			
		||||
        inputs, labels = inputs.float().to(device), labels.to(device)  # 确保数据在 device 上
 | 
			
		||||
        optimizer.zero_grad()
 | 
			
		||||
        
 | 
			
		||||
        outputs, re_img = model(inputs)
 | 
			
		||||
        loss = criterion(outputs.squeeze(), labels.float())
 | 
			
		||||
        loss_F1 = F.l1_loss(re_img, inputs)
 | 
			
		||||
        loss = alpha * loss + beta * loss_F1
 | 
			
		||||
        loss.backward()
 | 
			
		||||
        optimizer.step()
 | 
			
		||||
        
 | 
			
		||||
        running_loss += loss.item()
 | 
			
		||||
    
 | 
			
		||||
    avg_loss = running_loss / len(loader)
 | 
			
		||||
    print(f'{model_name} Training Loss: {avg_loss:.4f}')
 | 
			
		||||
    return avg_loss
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def validate_deepmodel(device, model, loader, criterion, epoch, model_name):
 | 
			
		||||
    model.eval()
 | 
			
		||||
    running_loss = 0.0
 | 
			
		||||
    correct, total = 0, 0
 | 
			
		||||
    all_labels, all_preds = [], []
 | 
			
		||||
    val_corrects = 0.0
 | 
			
		||||
    alpha = 1
 | 
			
		||||
    beta = 0.1
 | 
			
		||||
    
 | 
			
		||||
    with torch.no_grad():
 | 
			
		||||
        for inputs, labels in tqdm(loader, desc=f'Validating {model_name} Epoch {epoch + 1}', unit='batch'):
 | 
			
		||||
            inputs, labels = inputs.float().to(device), labels.to(device)  # 确保数据在 device 上
 | 
			
		||||
            
 | 
			
		||||
            outputs, re_img = model(inputs)
 | 
			
		||||
            
 | 
			
		||||
            # 将 logits 转换为预测
 | 
			
		||||
            predicted = torch.sigmoid(outputs).data
 | 
			
		||||
            all_preds.extend(predicted.cpu().numpy())
 | 
			
		||||
            all_labels.extend(labels.cpu().numpy())
 | 
			
		||||
            
 | 
			
		||||
            # loss = criterion(outputs.squeeze(), labels.float())
 | 
			
		||||
            loss = criterion(outputs.squeeze(), labels.float())
 | 
			
		||||
            loss_F1 = F.l1_loss(re_img, inputs)
 | 
			
		||||
            loss = alpha * loss + beta * loss_F1
 | 
			
		||||
            running_loss += loss.item()
 | 
			
		||||
    
 | 
			
		||||
    auc = roc_auc_score(all_labels, all_preds)
 | 
			
		||||
    predicted_labels = (np.array(all_preds) >= 0.5).astype(int)  # 确保转换为 NumPy 数组
 | 
			
		||||
    acc = accuracy_score(all_labels, predicted_labels)
 | 
			
		||||
    avg_loss = running_loss / len(loader)
 | 
			
		||||
    print(f'{model_name} Validation Loss: {avg_loss:.4f}, Accuracy: {acc:.4f}, AUC: {auc:.4f}')
 | 
			
		||||
    return avg_loss, acc, auc
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def test_deepmodel(device, model, loader):
 | 
			
		||||
    model.eval()
 | 
			
		||||
    all_labels, all_preds = [], []
 | 
			
		||||
    
 | 
			
		||||
    with torch.no_grad():
 | 
			
		||||
        for inputs, labels in tqdm(loader, desc=f'Testing', unit='batch'):
 | 
			
		||||
            inputs, labels = inputs.float().to(device), labels.to(device)  # 确保数据在 device 上
 | 
			
		||||
            outputs, re_img = model(inputs)
 | 
			
		||||
            predicted = torch.sigmoid(outputs).data  # 将 logits 转换为预测
 | 
			
		||||
            
 | 
			
		||||
            # 收集预测值和真实标签
 | 
			
		||||
            all_preds.extend(predicted.cpu().numpy())
 | 
			
		||||
            all_labels.extend(labels.cpu().numpy())
 | 
			
		||||
    
 | 
			
		||||
    # 将预测值转换为二值标签
 | 
			
		||||
    predicted_labels = (np.array(all_preds) >= 0.5).astype(int)
 | 
			
		||||
    
 | 
			
		||||
    # 计算准确率和AUC
 | 
			
		||||
    acc = accuracy_score(all_labels, predicted_labels)
 | 
			
		||||
    auc = roc_auc_score(all_labels, all_preds)
 | 
			
		||||
    
 | 
			
		||||
    print(f'Test Accuracy: {acc:.4f}, Test AUC: {auc:.4f}')
 | 
			
		||||
    return acc, auc
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# def train_model(device, model, loader, optimizer, criterion, epoch, model_name):
 | 
			
		||||
#     model.train()
 | 
			
		||||
#     running_loss = 0.0
 | 
			
		||||
#     for i, (inputs, labels) in enumerate(tqdm(loader, desc=f'Training {model_name} Epoch {epoch + 1}', unit='batch')):
 | 
			
		||||
#         inputs, labels = inputs.float().to(device), labels.float().to(device)  # 确保数据格式正确
 | 
			
		||||
#         optimizer.zero_grad()
 | 
			
		||||
#
 | 
			
		||||
#         outputs = model(inputs)
 | 
			
		||||
#         loss = criterion(outputs.squeeze(), labels)
 | 
			
		||||
#
 | 
			
		||||
#         # 随机打印部分输出和标签,检查格式
 | 
			
		||||
#         if i % 10 == 0:  # 每100个批次打印一次
 | 
			
		||||
#             print(f"Batch {i} - Sample Output: {outputs[0].item():.4f}, Sample Label: {labels[0].item()}")
 | 
			
		||||
#
 | 
			
		||||
#         # 检查损失值是否异常
 | 
			
		||||
#         if loss.item() < 0:
 | 
			
		||||
#             print(f"Warning: Negative loss detected at batch {i}. Loss: {loss.item()}")
 | 
			
		||||
#
 | 
			
		||||
#         loss.backward()
 | 
			
		||||
#         optimizer.step()
 | 
			
		||||
#
 | 
			
		||||
#         running_loss += loss.item()
 | 
			
		||||
#
 | 
			
		||||
#     avg_loss = running_loss / len(loader)
 | 
			
		||||
#     print(f'{model_name} Training Loss: {avg_loss:.4f}')
 | 
			
		||||
#     return avg_loss
 | 
			
		||||
def train_model(device, model, loader, optimizer, criterion, epoch, model_name):
 | 
			
		||||
    model.train()
 | 
			
		||||
    running_loss = 0.0
 | 
			
		||||
    for inputs, labels in tqdm(loader, desc=f'Training {model_name} Epoch {epoch + 1}', unit='batch'):
 | 
			
		||||
        inputs, labels = inputs.float().to(device), labels.to(device)  # 确保数据在 device 上
 | 
			
		||||
        optimizer.zero_grad()
 | 
			
		||||
        
 | 
			
		||||
        outputs = model(inputs)
 | 
			
		||||
        loss = criterion(outputs.squeeze(), labels.float())
 | 
			
		||||
        loss.backward()
 | 
			
		||||
        optimizer.step()
 | 
			
		||||
        
 | 
			
		||||
        running_loss += loss.item()
 | 
			
		||||
    
 | 
			
		||||
    avg_loss = running_loss / len(loader)
 | 
			
		||||
    print(f'{model_name} Training Loss: {avg_loss:.4f}')
 | 
			
		||||
    return avg_loss
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def validate_model(device, model, loader, criterion, epoch, model_name):
 | 
			
		||||
    model.eval()
 | 
			
		||||
    running_loss = 0.0
 | 
			
		||||
    correct, total = 0, 0
 | 
			
		||||
    all_labels, all_preds = [], []
 | 
			
		||||
    
 | 
			
		||||
    with torch.no_grad():
 | 
			
		||||
        for inputs, labels in tqdm(loader, desc=f'Validating {model_name} Epoch {epoch + 1}', unit='batch'):
 | 
			
		||||
            inputs, labels = inputs.float().to(device), labels.to(device)  # 确保数据在 device 上
 | 
			
		||||
            
 | 
			
		||||
            outputs = model(inputs)
 | 
			
		||||
            
 | 
			
		||||
            # 将 logits 转换为预测
 | 
			
		||||
            predicted = torch.sigmoid(outputs).data
 | 
			
		||||
            all_preds.extend(predicted.cpu().numpy())
 | 
			
		||||
            all_labels.extend(labels.cpu().numpy())
 | 
			
		||||
            
 | 
			
		||||
            # loss = criterion(outputs.squeeze(), labels.float())
 | 
			
		||||
            loss = criterion(outputs.squeeze(), labels.float())
 | 
			
		||||
            running_loss += loss.item()
 | 
			
		||||
    auc = roc_auc_score(all_labels, all_preds)
 | 
			
		||||
    predicted_labels = (np.array(all_preds) >= 0.5).astype(int)  # 确保转换为 NumPy 数组
 | 
			
		||||
    acc = accuracy_score(all_labels, predicted_labels)
 | 
			
		||||
    avg_loss = running_loss / len(loader)
 | 
			
		||||
    print(f'{model_name} Validation Loss: {avg_loss:.4f}, Accuracy: {acc:.4f}, AUC: {auc:.4f}')
 | 
			
		||||
    return avg_loss, acc, auc
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# 权重聚合函数
 | 
			
		||||
def aggregate_weights(weights_list, alpha=1 / 3, beta=1 / 3, gamma=1 / 3):
 | 
			
		||||
    new_state_dict = copy.deepcopy(weights_list[0])  # 从模型a复制权重结构
 | 
			
		||||
    for key in new_state_dict.keys():
 | 
			
		||||
        new_state_dict[key] = (alpha * weights_list[0][key] +
 | 
			
		||||
                               beta * weights_list[1][key] +
 | 
			
		||||
                               gamma * weights_list[2][key])
 | 
			
		||||
    return new_state_dict
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def v3_update_model_weights(
 | 
			
		||||
        epoch,
 | 
			
		||||
        model_to_update,
 | 
			
		||||
        other_models,
 | 
			
		||||
        global_model,
 | 
			
		||||
        losses,
 | 
			
		||||
        val_loader,
 | 
			
		||||
        device,
 | 
			
		||||
        val_auc_threshold,  # 当前需要更新模型的验证 AUC 阈值
 | 
			
		||||
        validate_model,
 | 
			
		||||
        criterion,
 | 
			
		||||
        update_frequency
 | 
			
		||||
):
 | 
			
		||||
    """
 | 
			
		||||
    根据给定的条件更新模型的权重。
 | 
			
		||||
 | 
			
		||||
    参数:
 | 
			
		||||
        epoch (int): 当前训练轮次。
 | 
			
		||||
        model_to_update: 需要更新的模型。
 | 
			
		||||
        other_models (list): 其他模型列表,用于计算全局模型权重。
 | 
			
		||||
        global_model: 全局模型。
 | 
			
		||||
        losses (list): 各模型的损失值列表。
 | 
			
		||||
        val_loader: 验证数据的 DataLoader。
 | 
			
		||||
        device: 设备 ('cuda' 或 'cpu')。
 | 
			
		||||
        val_auc_threshold (float): 当前需要更新模型的验证 AUC。
 | 
			
		||||
        aggregate_weights (function): 权重聚合函数。
 | 
			
		||||
        validate_model (function): 验证模型的函数。
 | 
			
		||||
        update_frequency (int): 权重更新的频率。
 | 
			
		||||
 | 
			
		||||
    返回:
 | 
			
		||||
        val_acc (float): 全局模型的验证精度。
 | 
			
		||||
        val_auc (float): 全局模型的验证 AUC。
 | 
			
		||||
        updated_val_auc_threshold (float): 更新后的验证 AUC。
 | 
			
		||||
    """
 | 
			
		||||
    if (epoch + 1) % update_frequency == 0:
 | 
			
		||||
        # 获取所有模型的权重
 | 
			
		||||
        all_weights = [model.state_dict() for model in other_models]
 | 
			
		||||
        avg_weights = aggregate_weights(all_weights)  # 聚合权重
 | 
			
		||||
        
 | 
			
		||||
        # 更新全局模型权重
 | 
			
		||||
        global_model.load_state_dict(avg_weights)
 | 
			
		||||
        
 | 
			
		||||
        # 计算加权平均损失
 | 
			
		||||
        weighted_loss = sum(loss * 0.33 for loss in losses)
 | 
			
		||||
        print(f"Weighted Average Loss: {weighted_loss:.4f}")
 | 
			
		||||
        
 | 
			
		||||
        # 验证全局模型
 | 
			
		||||
        val_loss, val_acc, val_auc = validate_model(device, global_model, val_loader, criterion, epoch, 'global_model')
 | 
			
		||||
        print(f'global_model Validation Accuracy: {val_acc:.4f}, global_model Validation AUC: {val_auc:.4f}')
 | 
			
		||||
        
 | 
			
		||||
        # 如果全局模型的 AUC 更高,则更新目标模型
 | 
			
		||||
        if val_auc > val_auc_threshold:
 | 
			
		||||
            print(f'Updating model at epoch {epoch + 1}')
 | 
			
		||||
            model_to_update.load_state_dict(global_model.state_dict())
 | 
			
		||||
            val_auc_threshold = val_auc  # 更新 AUC 阈值
 | 
			
		||||
        
 | 
			
		||||
        return val_acc, val_auc, val_auc_threshold
 | 
			
		||||
    return None, None, val_auc_threshold
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def update_model_weights(
 | 
			
		||||
        epoch,
 | 
			
		||||
        model_to_update,
 | 
			
		||||
        other_models,
 | 
			
		||||
        global_model,
 | 
			
		||||
        losses,
 | 
			
		||||
        val_loader,
 | 
			
		||||
        device,
 | 
			
		||||
        val_auc_threshold,  # 当前需要更新模型的验证 AUC 阈值
 | 
			
		||||
        validate_model,
 | 
			
		||||
        criterion,
 | 
			
		||||
        update_frequency
 | 
			
		||||
):
 | 
			
		||||
    """
 | 
			
		||||
    根据给定的条件更新模型的权重。
 | 
			
		||||
 | 
			
		||||
    参数:
 | 
			
		||||
        epoch (int): 当前训练轮次。
 | 
			
		||||
        model_to_update: 需要更新的模型。
 | 
			
		||||
        other_models (list): 其他模型列表,用于计算全局模型权重。
 | 
			
		||||
        global_model: 全局模型。
 | 
			
		||||
        losses (list): 各模型的损失值列表。
 | 
			
		||||
        val_loader: 验证数据的 DataLoader。
 | 
			
		||||
        device: 设备 ('cuda' 或 'cpu')。
 | 
			
		||||
        val_auc_threshold (float): 当前需要更新模型的验证 AUC。
 | 
			
		||||
        aggregate_weights (function): 权重聚合函数。
 | 
			
		||||
        validate_model (function): 验证模型的函数。
 | 
			
		||||
        update_frequency (int): 权重更新的频率。
 | 
			
		||||
 | 
			
		||||
    返回:
 | 
			
		||||
        val_acc (float): 全局模型的验证精度。
 | 
			
		||||
        val_auc (float): 全局模型的验证 AUC。
 | 
			
		||||
        updated_val_auc_threshold (float): 更新后的验证 AUC。
 | 
			
		||||
    """
 | 
			
		||||
    if (epoch + 1) % update_frequency == 0:
 | 
			
		||||
        # 获取所有模型的权重
 | 
			
		||||
        all_weights = [model.state_dict() for model in other_models]
 | 
			
		||||
        avg_weights = aggregate_weights(all_weights)  # 聚合权重
 | 
			
		||||
        
 | 
			
		||||
        # 更新全局模型权重
 | 
			
		||||
        global_model.load_state_dict(avg_weights)
 | 
			
		||||
        
 | 
			
		||||
        # 计算加权平均损失
 | 
			
		||||
        weighted_loss = sum(loss * 0.33 for loss in losses)
 | 
			
		||||
        print(f"Weighted Average Loss: {weighted_loss:.4f}")
 | 
			
		||||
        
 | 
			
		||||
        # 验证全局模型
 | 
			
		||||
        val_loss, val_acc, val_auc = validate_deepmodel(device, global_model, val_loader, criterion, epoch,
 | 
			
		||||
                                                        'global_model')
 | 
			
		||||
        print(f'global_model Validation Accuracy: {val_acc:.4f}, global_model Validation AUC: {val_auc:.4f}')
 | 
			
		||||
        
 | 
			
		||||
        # 如果全局模型的 AUC 更高,则更新目标模型
 | 
			
		||||
        if val_auc > val_auc_threshold:
 | 
			
		||||
            print(f'Updating model at epoch {epoch + 1}')
 | 
			
		||||
            model_to_update.load_state_dict(global_model.state_dict())
 | 
			
		||||
            val_auc_threshold = val_auc  # 更新 AUC 阈值
 | 
			
		||||
        
 | 
			
		||||
        return val_acc, val_auc, val_auc_threshold
 | 
			
		||||
    return None, None, val_auc_threshold
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def f_update_model_weights(
 | 
			
		||||
        epoch,
 | 
			
		||||
        model_to_update,
 | 
			
		||||
        other_models,
 | 
			
		||||
        global_model,
 | 
			
		||||
        losses,
 | 
			
		||||
        val_loader,
 | 
			
		||||
        device,
 | 
			
		||||
        val_auc_threshold,  # 当前需要更新模型的验证 AUC 阈值
 | 
			
		||||
        aggregate_weights,  # 权重聚合函数
 | 
			
		||||
        validate_model,
 | 
			
		||||
        criterion,
 | 
			
		||||
        update_frequency
 | 
			
		||||
):
 | 
			
		||||
    """
 | 
			
		||||
    根据给定的条件更新模型的权重。
 | 
			
		||||
 | 
			
		||||
    参数:
 | 
			
		||||
        epoch (int): 当前训练轮次。
 | 
			
		||||
        model_to_update: 需要更新的模型。
 | 
			
		||||
        other_models (list): 其他模型列表,用于计算全局模型权重。
 | 
			
		||||
        global_model: 全局模型。
 | 
			
		||||
        losses (list): 各模型的损失值列表。
 | 
			
		||||
        val_loader: 验证数据的 DataLoader。
 | 
			
		||||
        device: 设备 ('cuda' 或 'cpu')。
 | 
			
		||||
        val_auc_threshold (float): 当前需要更新模型的验证 AUC 阈值。
 | 
			
		||||
        aggregate_weights (function): 权重聚合函数。
 | 
			
		||||
        validate_model (function): 验证模型的函数。
 | 
			
		||||
        criterion: 损失函数。
 | 
			
		||||
        update_frequency (int): 权重更新的频率。
 | 
			
		||||
 | 
			
		||||
    返回:
 | 
			
		||||
        val_acc (float): 全局模型的验证精度。
 | 
			
		||||
        val_auc (float): 全局模型的验证 AUC。
 | 
			
		||||
        updated_val_auc_threshold (float): 更新后的验证 AUC 阈值。
 | 
			
		||||
    """
 | 
			
		||||
    # 每隔指定的 epoch 更新一次模型权重
 | 
			
		||||
    if (epoch + 1) % update_frequency == 0:
 | 
			
		||||
        print(f"\n[Epoch {epoch + 1}] Updating global model weights...")
 | 
			
		||||
        
 | 
			
		||||
        # 获取其他模型的权重
 | 
			
		||||
        all_weights = [model.state_dict() for model in other_models]
 | 
			
		||||
        
 | 
			
		||||
        # 使用聚合函数计算全局权重
 | 
			
		||||
        avg_weights = aggregate_weights(all_weights)
 | 
			
		||||
        print("Global model weights aggregated.")
 | 
			
		||||
        
 | 
			
		||||
        # 更新全局模型权重
 | 
			
		||||
        global_model.load_state_dict(avg_weights)
 | 
			
		||||
        
 | 
			
		||||
        # 计算加权平均损失
 | 
			
		||||
        weighted_loss = sum(loss * (1 / len(losses)) for loss in losses)  # 平均加权
 | 
			
		||||
        print(f"Weighted Average Loss: {weighted_loss:.4f}")
 | 
			
		||||
        
 | 
			
		||||
        # 验证全局模型性能
 | 
			
		||||
        val_loss, val_acc, val_auc = validate_deepmodel(device, global_model, val_loader, criterion, epoch,
 | 
			
		||||
                                                        'global_model')
 | 
			
		||||
        print(f"[Global Model] Validation Loss: {val_loss:.4f}, Accuracy: {val_acc:.4f}, AUC: {val_auc:.4f}")
 | 
			
		||||
        
 | 
			
		||||
        # 如果全局模型 AUC 高于阈值,则更新目标模型权重
 | 
			
		||||
        if val_auc > val_auc_threshold:
 | 
			
		||||
            print(f"Global model AUC improved ({val_auc:.4f} > {val_auc_threshold:.4f}). Updating target model.")
 | 
			
		||||
            model_to_update.load_state_dict(global_model.state_dict())
 | 
			
		||||
            val_auc_threshold = val_auc  # 更新 AUC 阈值
 | 
			
		||||
        else:
 | 
			
		||||
            print(
 | 
			
		||||
                f"Global model AUC did not improve ({val_auc:.4f} <= {val_auc_threshold:.4f}). No update to target model.")
 | 
			
		||||
        
 | 
			
		||||
        return val_acc, val_auc, val_auc_threshold
 | 
			
		||||
    
 | 
			
		||||
    # 如果未到达更新频率,返回当前的 AUC 阈值
 | 
			
		||||
    return None, None, val_auc_threshold
 | 
			
		||||
		Reference in New Issue
	
	Block a user