From 9f827af58e4764411045b91542db0dfea3249c52 Mon Sep 17 00:00:00 2001
From: myh <yunhao.meng@outlook.com>
Date: Tue, 22 Apr 2025 14:51:15 +0800
Subject: [PATCH] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=97=A0=E7=94=A8=E6=A0=B7?=
 =?UTF-8?q?=E4=BE=8B?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 fed_example/__init__.py          |   0
 fed_example/res18Train.py        | 155 -------------
 fed_example/utils/__init__.py    |   0
 fed_example/utils/data_utils.py  | 239 --------------------
 fed_example/utils/model_utils.py |  65 ------
 fed_example/utils/train_utils.py | 370 -------------------------------
 6 files changed, 829 deletions(-)
 delete mode 100644 fed_example/__init__.py
 delete mode 100644 fed_example/res18Train.py
 delete mode 100644 fed_example/utils/__init__.py
 delete mode 100644 fed_example/utils/data_utils.py
 delete mode 100644 fed_example/utils/model_utils.py
 delete mode 100644 fed_example/utils/train_utils.py

diff --git a/fed_example/__init__.py b/fed_example/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/fed_example/res18Train.py b/fed_example/res18Train.py
deleted file mode 100644
index aef3011..0000000
--- a/fed_example/res18Train.py
+++ /dev/null
@@ -1,155 +0,0 @@
-import argparse
-import torch
-import os
-from torch import optim
-from torch.optim import lr_scheduler
-from fed_example.utils.data_utils import get_data
-from fed_example.utils.model_utils import get_model
-from fed_example.utils.train_utils import train_model, validate_model, v3_update_model_weights
-
-
-def main(args):
-    device = torch.device(args.device)
-    
-    # 数据加载器
-    loader1, loader2, loader3, subset_len, val_loader = get_data(
-        args.train_path, args.val_path, args.batch_size, args.number_workers
-    )
-    
-    # 模型 get_model(name='ResNet', number_class=2, device=device, resnet_type='resnet18')
-    model_a = get_model(args.model_name, args.number_class, device, args.deep_backbone).to(device)
-    model_b = get_model(args.model_name, args.number_class, device, args.deep_backbone).to(device)
-    model_c = get_model(args.model_name, args.number_class, device, args.deep_backbone).to(device)
-    # 添加全局模型
-    global_model = get_model(args.model_name, args.number_class, device, args.deep_backbone).to(device)
-    
-    if args.resume_training:
-        model_a.load_state_dict(torch.load(os.path.join(args.save_dir, 'best_model_a.pth')))
-        model_b.load_state_dict(torch.load(os.path.join(args.save_dir, 'best_model_b.pth')))
-        model_c.load_state_dict(torch.load(os.path.join(args.save_dir, 'best_model_c.pth')))
-        print("已加载之前保存的模型参数继续训练")
-    
-    # 优化器和损失函数
-    criterion = torch.nn.BCEWithLogitsLoss().to(device)
-    
-    optimizer_a = optim.Adam(model_a.parameters(), lr=args.lr, weight_decay=5e-4)
-    optimizer_b = optim.Adam(model_b.parameters(), lr=args.lr, weight_decay=5e-4)
-    optimizer_c = optim.Adam(model_c.parameters(), lr=args.lr, weight_decay=5e-4)
-    scheduler_a = lr_scheduler.ReduceLROnPlateau(optimizer_a, mode='min', factor=0.5, patience=2, verbose=True)
-    scheduler_b = lr_scheduler.ReduceLROnPlateau(optimizer_b, mode='min', factor=0.5, patience=2, verbose=True)
-    scheduler_c = lr_scheduler.ReduceLROnPlateau(optimizer_c, mode='min', factor=0.5, patience=2, verbose=True)
-    
-    # 初始化最优验证损失和模型路径
-    best_val_loss_a = float('inf')
-    best_val_loss_b = float('inf')
-    best_val_loss_c = float('inf')
-    
-    save_dir = args.save_dir
-    os.makedirs(save_dir, exist_ok=True)
-    
-    # 训练与验证
-    for epoch in range(args.epochs):
-        print(f'Epoch {epoch + 1}/{args.epochs}')
-        
-        # 训练模型
-        loss_a = train_model(device, model_a, loader1, optimizer_a, criterion, epoch, 'model_a')
-        loss_b = train_model(device, model_b, loader2, optimizer_b, criterion, epoch, 'model_b')
-        loss_c = train_model(device, model_c, loader3, optimizer_c, criterion, epoch, 'model_c')
-        
-        # 验证模型
-        val_loss_a, val_acc_a, val_auc_a = validate_model(device, model_a, val_loader, criterion, epoch, 'model_a')
-        val_loss_b, val_acc_b, val_auc_b = validate_model(device, model_b, val_loader, criterion, epoch, 'model_b')
-        val_loss_c, val_acc_c, val_auc_c = validate_model(device, model_c, val_loader, criterion, epoch, 'model_c')
-        
-        if args.save_model and val_loss_a < best_val_loss_a:
-            best_val_loss_a = val_loss_a
-            torch.save(model_a.state_dict(), os.path.join(save_dir, 'best_model_a.pth'))
-            print(f"Best model_a saved with val_loss: {best_val_loss_a:.4f}")
-        
-        if args.save_model and val_loss_b < best_val_loss_b:
-            best_val_loss_b = val_loss_b
-            torch.save(model_b.state_dict(), os.path.join(save_dir, 'best_model_b.pth'))
-            print(f"Best model_b saved with val_loss: {best_val_loss_b:.4f}")
-        
-        if args.save_model and val_loss_c < best_val_loss_c:
-            best_val_loss_c = val_loss_c
-            torch.save(model_c.state_dict(), os.path.join(save_dir, 'best_model_c.pth'))
-            print(f"Best model_c saved with val_loss: {best_val_loss_c:.4f}")
-        
-        print(
-            f'Model A - Loss: {loss_a:.4f}, Val Loss: {val_loss_a:.4f}, Val Acc: {val_acc_a:.4f}, AUC: {val_auc_a:.4f}')
-        print(
-            f'Model B - Loss: {loss_b:.4f}, Val Loss: {val_loss_b:.4f}, Val Acc: {val_acc_b:.4f}, AUC: {val_auc_b:.4f}')
-        print(
-            f'Model C - Loss: {loss_c:.4f}, Val Loss: {val_loss_c:.4f}, Val Acc: {val_acc_c:.4f}, AUC: {val_auc_c:.4f}')
-        
-        # 更新模型 A 的权重,每 3 轮 1
-        val_acc_a, val_auc_a, val_acc_a_threshold = v3_update_model_weights(
-            epoch=epoch,
-            model_to_update=model_a,
-            other_models=[model_a, model_b, model_c],
-            global_model=global_model,
-            losses=[loss_a, loss_b, loss_c],
-            val_loader=val_loader,
-            device=device,
-            val_auc_threshold=val_auc_a,
-            validate_model=validate_model,
-            criterion=criterion,
-            update_frequency=1
-        )
-        
-        # 更新模型 B 的权重,每 5 轮1
-        val_acc_b, val_auc_b, val_acc_b_threshold = v3_update_model_weights(
-            epoch=epoch,
-            model_to_update=model_b,
-            other_models=[model_a, model_b, model_c],
-            global_model=global_model,
-            losses=[loss_a, loss_b, loss_c],
-            val_loader=val_loader,
-            device=device,
-            val_auc_threshold=val_auc_b,
-            validate_model=validate_model,
-            criterion=criterion,
-            update_frequency=1
-        )
-        
-        # 更新模型 C 的权重,每 2 轮 1
-        val_acc_c, val_auc_c, val_acc_c_threshold = v3_update_model_weights(
-            epoch=epoch,
-            model_to_update=model_c,
-            other_models=[model_a, model_b, model_c],
-            global_model=global_model,
-            losses=[loss_a, loss_b, loss_c],
-            val_loader=val_loader,
-            device=device,
-            val_auc_threshold=val_auc_c,
-            validate_model=validate_model,
-            criterion=criterion,
-            update_frequency=1
-        )
-    
-    print("Training complete! Best models saved.")
-
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--model_name', type=str, default='resnet18_psa', help='Model name')
-    parser.add_argument('--deep_backbone', type=str, default='*', help='deeplab backbone')
-    parser.add_argument('--train_path', type=str, default='/media/terminator/实验&代码/yhs/FF++/c40/total/train')
-    parser.add_argument('--val_path', type=str, default='/media/terminator/实验&代码/yhs/FF++/c40/total/val')
-    # parser.add_argument('--train_path', type=str, default='/media/terminator/实验&代码/yhs/FF++_mask_sample/c23/df/train')
-    # parser.add_argument('--val_path', type=str, default='/media/terminator/实验&代码/yhs/FF++_mask_sample/c23/df/val')
-    parser.add_argument('--epochs', type=int, default=10)
-    parser.add_argument('--batch_size', type=int, default=16)
-    parser.add_argument('--number_workers', type=int, default=8)
-    parser.add_argument('--number_class', type=int, default=1)
-    parser.add_argument('--device', type=str, default='cuda:0')
-    parser.add_argument('--lr', type=float, default=0.00005)
-    parser.add_argument('--save_dir', type=str,
-                        default='/media/terminator/实验&代码/yhs/output/work2/resnet18_psa/c40/total/e10',
-                        help='Directory to save best models')
-    parser.add_argument('--save_model', type=bool, default=True, help='是否保存最优模型')
-    parser.add_argument('--resume_training', type=bool, default=False, help='是否从保存的模型参数继续训练')
-    args = parser.parse_args()
-    
-    main(args)
diff --git a/fed_example/utils/__init__.py b/fed_example/utils/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/fed_example/utils/data_utils.py b/fed_example/utils/data_utils.py
deleted file mode 100644
index e7f322f..0000000
--- a/fed_example/utils/data_utils.py
+++ /dev/null
@@ -1,239 +0,0 @@
-import os
-from collections import Counter
-
-import torch
-from PIL import Image
-from sklearn.model_selection import train_test_split
-from torch.utils.data import DataLoader
-from torch.utils.data import Dataset, random_split
-from torchvision import transforms, datasets
-
-
-class CustomImageDataset(Dataset):
-    def __init__(self, root_dir, transform=None):
-        self.root_dir = root_dir
-        self.transform = transform
-        self.image_paths = []
-        self.labels = []
-        
-        # 遍历 root_dir 下的子文件夹 0 和 1
-        for label in [0, 1]:
-            folder_path = os.path.join(root_dir, str(label))
-            if os.path.isdir(folder_path):
-                for img_name in os.listdir(folder_path):
-                    img_path = os.path.join(folder_path, img_name)
-                    self.image_paths.append(img_path)
-                    self.labels.append(label)
-        
-        # 打印用于调试的图像路径和标签
-        # print("Loaded image paths and labels:")
-        # for path, label in zip(self.image_paths[:5], self.labels[:5]):
-        #     print(f"Path: {path}, Label: {label}")
-        # print(f"Total samples: {len(self.image_paths)}\n")
-    
-    def __len__(self):
-        return len(self.image_paths)
-    
-    def __getitem__(self, idx):
-        img_path = self.image_paths[idx]
-        label = self.labels[idx]
-        image = Image.open(img_path).convert("RGB")
-        
-        if self.transform:
-            image = self.transform(image)
-        
-        return image, label
-
-
-def get_test_data(test_image_path, batch_size, nw):
-    data_transform = transforms.Compose([
-        transforms.Resize((256, 256)),
-        transforms.ToTensor(),
-        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
-    ])
-    
-    # test_dataset = datasets.ImageFolder(root=test_image_path, transform=data_transform)
-    
-    test_dataset = CustomImageDataset(root_dir=test_image_path, transform=data_transform)
-    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=nw)
-    return test_loader
-
-
-def get_Onedata(train_image_path, val_image_path, batch_size, num_workers):
-    """
-    加载完整的训练数据集和验证数据集。
-    """
-    data_transform = {
-        "train": transforms.Compose([
-            transforms.Resize((256, 256)),
-            transforms.RandomHorizontalFlip(),
-            transforms.ToTensor(),
-            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
-        ]),
-        "val": transforms.Compose([
-            transforms.Resize((256, 256)),
-            transforms.ToTensor(),
-            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
-        ]),
-    }
-    
-    # 创建训练和验证数据集
-    train_dataset = CustomImageDataset(root_dir=train_image_path, transform=data_transform["train"])
-    val_dataset = CustomImageDataset(root_dir=val_image_path, transform=data_transform["val"])
-    
-    # 创建数据加载器
-    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
-    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
-    
-    return train_loader, val_loader
-
-
-def get_data(train_image_path, val_image_path, batch_size, num_workers):
-    data_transform = {
-        "train": transforms.Compose([
-            transforms.Resize((256, 256)),
-            transforms.RandomHorizontalFlip(),
-            transforms.ToTensor(),
-            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
-        ]),
-        "val": transforms.Compose([
-            transforms.Resize((256, 256)),
-            transforms.ToTensor(),
-            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
-        ]),
-        "test": transforms.Compose([
-            transforms.Resize((256, 256)),
-            transforms.ToTensor(),
-            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
-        ]),
-    }
-    
-    train_dataset = CustomImageDataset(root_dir=train_image_path, transform=data_transform["train"])
-    val_dataset = CustomImageDataset(root_dir=val_image_path, transform=data_transform["val"])
-    
-    # 切分数据集为三个等分
-    train_len = (len(train_dataset) // 3) * 3
-    train_dataset_truncated = torch.utils.data.Subset(train_dataset, range(train_len))
-    subset_len = train_len // 3
-    dataset1, dataset2, dataset3 = random_split(train_dataset_truncated, [subset_len] * 3)
-    
-    loader1 = DataLoader(dataset1, batch_size=batch_size, shuffle=True, num_workers=num_workers)
-    loader2 = DataLoader(dataset2, batch_size=batch_size, shuffle=True, num_workers=num_workers)
-    loader3 = DataLoader(dataset3, batch_size=batch_size, shuffle=True, num_workers=num_workers)
-    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
-    
-    return loader1, loader2, loader3, subset_len, val_loader
-
-
-def get_Fourdata(train_path, val_path, batch_size, num_workers):
-    """
-    加载训练集和验证集。
-    包括 4 个客户端验证集(df、f2f、fs、nt)和 1 个全局验证集。
-
-    Args:
-        train_path (str): 训练数据路径
-        val_path (str): 验证数据路径
-        batch_size (int): 批量大小
-        num_workers (int): DataLoader 的工作线程数
-
-    Returns:
-        tuple: 包含 4 个客户端训练和验证加载器,以及全局验证加载器
-    """
-    # 数据预处理
-    train_transform = transforms.Compose([
-        transforms.Resize((256, 256)),
-        transforms.RandomHorizontalFlip(),
-        transforms.ToTensor(),
-        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
-    ])
-    
-    val_transform = transforms.Compose([
-        transforms.Resize((256, 256)),
-        transforms.ToTensor(),
-        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
-    ])
-    
-    # 定义 4 个客户端数据集路径
-    client_names = ['df', 'f2f', 'fs', 'nt']
-    client_train_loaders = []
-    client_val_loaders = []
-    
-    for client in client_names:
-        client_train_path = os.path.join(train_path, client)
-        client_val_path = os.path.join(val_path, client)
-        
-        # 加载客户端训练数据
-        train_dataset = datasets.ImageFolder(root=client_train_path, transform=train_transform)
-        train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
-        
-        # 加载客户端验证数据
-        val_dataset = datasets.ImageFolder(root=client_val_path, transform=val_transform)
-        val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
-        
-        client_train_loaders.append(train_loader)
-        client_val_loaders.append(val_loader)
-    
-    # 全局验证集
-    global_val_dataset = datasets.ImageFolder(root=val_path, transform=val_transform)
-    global_val_loader = DataLoader(global_val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
-    
-    return (*client_train_loaders, *client_val_loaders, global_val_loader)
-
-
-def get_federated_data(train_path, val_path, num_clients=3, batch_size=16, num_workers=8):
-    """
-    将数据集划分为多个客户端,每个客户端拥有独立的训练和验证数据。
-    """
-    # 加载完整数据集
-    full_train_dataset = CustomImageDataset(root_dir=train_path, transform=get_transform("train"))
-    full_val_dataset = CustomImageDataset(root_dir=val_path, transform=get_transform("val"))
-    
-    # 划分客户端训练集
-    client_train_datasets = random_split(full_train_dataset, [len(full_train_dataset) // num_clients] * num_clients)
-    
-    # 创建客户端数据加载器
-    client_train_loaders = [
-        DataLoader(ds, batch_size=batch_size, shuffle=True, num_workers=num_workers)
-        for ds in client_train_datasets
-    ]
-    
-    # 全局验证集
-    global_val_loader = DataLoader(full_val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
-    
-    return client_train_loaders, global_val_loader
-
-
-def main():
-    # 设置参数
-    train_image_path = "/media/terminator/实验&代码/yhs/FF++_mask/c23/f2f/train"
-    val_image_path = "/media/terminator/实验&代码/yhs/FF++_mask/c23/f2f/val"
-    batch_size = 4
-    num_workers = 2
-    
-    # 获取数据加载器
-    loader1, loader2, loader3, subset_len, val_loader = get_data(train_image_path, val_image_path, batch_size,
-                                                                 num_workers)
-    
-    # 统计标签数量和类型
-    train_labels = []
-    for dataset in [loader1, loader2, loader3]:
-        for _, labels in dataset:
-            train_labels.extend(labels.tolist())
-    
-    val_labels = []
-    for _, labels in val_loader:
-        val_labels.extend(labels.tolist())
-    
-    # 使用 Counter 统计标签数量
-    train_label_counts = Counter(train_labels)
-    val_label_counts = Counter(val_labels)
-    
-    # 打印统计结果
-    print("Training Dataset - Label Counts:", train_label_counts)
-    print("Validation Dataset - Label Counts:", val_label_counts)
-    print("Label Types in Training:", set(train_labels))
-    print("Label Types in Validation:", set(val_labels))
-
-
-if __name__ == "__main__":
-    main()
diff --git a/fed_example/utils/model_utils.py b/fed_example/utils/model_utils.py
deleted file mode 100644
index 71664ad..0000000
--- a/fed_example/utils/model_utils.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import torch
-from torch import nn
-from torchvision import models
-
-from Deeplab.deeplab import DeepLab_F
-from Deeplab.resnet_psa import BasicBlockWithPSA
-from Deeplab.resnet_psa_v2 import ResNet
-from model_base.efNet_base_model import DeepLab
-from model_base.efficientnet import EfficientNet
-from model_base.resnet_more import CustomResNet
-from model_base.xcption import Xception
-
-
-def get_model(name, number_class, device, backbone):
-    """
-    根据指定的模型名称加载模型,并根据任务类别数调整最后的分类层。
-
-    Args:
-        name (str): 模型名称 ('Vgg', 'ResNet', 'EfficientNet', 'Xception')。
-        number_class (int): 分类类别数。
-        device (torch.device): 设备 ('cuda' or 'cpu')。
-        resnet_type (str): ResNet类型 ('resnet18', 'resnet34', 'resnet50', 'resnet101', etc.)。
-
-    Returns:
-        nn.Module: 经过修改的模型。
-    """
-    if name == 'Vgg':
-        model = models.vgg16_bn(pretrained=True).to(device)
-        model.classifier[6] = nn.Linear(model.classifier[6].in_features, number_class)
-    elif name == 'ResNet18':
-        model = CustomResNet(resnet_type='resnet18', num_classes=number_class, pretrained=True).to(device)
-    elif name == 'ResNet34':
-        model = CustomResNet(resnet_type='resnet34', num_classes=number_class, pretrained=True).to(device)
-    elif name == 'ResNet50':
-        model = CustomResNet(resnet_type='resnet50', num_classes=number_class, pretrained=True).to(device)
-    elif name == 'ResNet101':
-        model = CustomResNet(resnet_type='resnet101', num_classes=number_class, pretrained=True).to(device)
-    elif name == 'ResNet152':
-        model = CustomResNet(resnet_type='resnet152', num_classes=number_class, pretrained=True).to(device)
-    elif name == 'EfficientNet':
-        # 使用自定义的 DeepLab 类加载 EfficientNet
-        model = DeepLab(backbone='efficientnet', num_classes=number_class).to(device)
-    elif name == 'Xception':
-        model = Xception(
-            in_planes=3,
-            num_classes=number_class,
-            pretrained=True,
-            pretrained_path="/home/terminator/1325/yhs/fedLeaning/pre_model/xception-43020ad28.pth"
-        ).to(device)
-    elif name == 'DeepLab':
-        # 使用自定义的 DeepLab 类加载 EfficientNet
-        model = DeepLab_F(num_classes=1, backbone=backbone).to(device)
-    elif name == 'resnet18_psa':
-        model = ResNet(BasicBlockWithPSA, [2, 2, 2, 2], number_class)
-    else:
-        raise ValueError(f"Model {name} is not supported.")
-    return model
-
-def get_federated_model(device):
-    """初始化客户端模型和全局模型"""
-    client_models = [
-        get_model("resnet18_psa", 1, device, "*") for _ in range(3)
-    ]
-    global_model = get_model("resnet18_psa", 1, device, "*")
-    return client_models, global_model
\ No newline at end of file
diff --git a/fed_example/utils/train_utils.py b/fed_example/utils/train_utils.py
deleted file mode 100644
index 6992eb2..0000000
--- a/fed_example/utils/train_utils.py
+++ /dev/null
@@ -1,370 +0,0 @@
-import numpy as np
-import torch
-from tqdm import tqdm
-from sklearn.metrics import roc_auc_score, accuracy_score
-import copy
-import torch.nn.functional as F
-import random
-
-
-def train_deepmodel(device, model, loader, optimizer, criterion, epoch, model_name):
-    model.train()
-    running_loss = 0.0
-    corrects = 0.0
-    alpha = 1
-    beta = 0.1
-    for inputs, labels in tqdm(loader, desc=f'Training {model_name} Epoch {epoch + 1}', unit='batch'):
-        inputs, labels = inputs.float().to(device), labels.to(device)  # 确保数据在 device 上
-        optimizer.zero_grad()
-        
-        outputs, re_img = model(inputs)
-        loss = criterion(outputs.squeeze(), labels.float())
-        loss_F1 = F.l1_loss(re_img, inputs)
-        loss = alpha * loss + beta * loss_F1
-        loss.backward()
-        optimizer.step()
-        
-        running_loss += loss.item()
-    
-    avg_loss = running_loss / len(loader)
-    print(f'{model_name} Training Loss: {avg_loss:.4f}')
-    return avg_loss
-
-
-def validate_deepmodel(device, model, loader, criterion, epoch, model_name):
-    model.eval()
-    running_loss = 0.0
-    correct, total = 0, 0
-    all_labels, all_preds = [], []
-    val_corrects = 0.0
-    alpha = 1
-    beta = 0.1
-    
-    with torch.no_grad():
-        for inputs, labels in tqdm(loader, desc=f'Validating {model_name} Epoch {epoch + 1}', unit='batch'):
-            inputs, labels = inputs.float().to(device), labels.to(device)  # 确保数据在 device 上
-            
-            outputs, re_img = model(inputs)
-            
-            # 将 logits 转换为预测
-            predicted = torch.sigmoid(outputs).data
-            all_preds.extend(predicted.cpu().numpy())
-            all_labels.extend(labels.cpu().numpy())
-            
-            # loss = criterion(outputs.squeeze(), labels.float())
-            loss = criterion(outputs.squeeze(), labels.float())
-            loss_F1 = F.l1_loss(re_img, inputs)
-            loss = alpha * loss + beta * loss_F1
-            running_loss += loss.item()
-    
-    auc = roc_auc_score(all_labels, all_preds)
-    predicted_labels = (np.array(all_preds) >= 0.5).astype(int)  # 确保转换为 NumPy 数组
-    acc = accuracy_score(all_labels, predicted_labels)
-    avg_loss = running_loss / len(loader)
-    print(f'{model_name} Validation Loss: {avg_loss:.4f}, Accuracy: {acc:.4f}, AUC: {auc:.4f}')
-    return avg_loss, acc, auc
-
-
-def test_deepmodel(device, model, loader):
-    model.eval()
-    all_labels, all_preds = [], []
-    
-    with torch.no_grad():
-        for inputs, labels in tqdm(loader, desc=f'Testing', unit='batch'):
-            inputs, labels = inputs.float().to(device), labels.to(device)  # 确保数据在 device 上
-            outputs, re_img = model(inputs)
-            predicted = torch.sigmoid(outputs).data  # 将 logits 转换为预测
-            
-            # 收集预测值和真实标签
-            all_preds.extend(predicted.cpu().numpy())
-            all_labels.extend(labels.cpu().numpy())
-    
-    # 将预测值转换为二值标签
-    predicted_labels = (np.array(all_preds) >= 0.5).astype(int)
-    
-    # 计算准确率和AUC
-    acc = accuracy_score(all_labels, predicted_labels)
-    auc = roc_auc_score(all_labels, all_preds)
-    
-    print(f'Test Accuracy: {acc:.4f}, Test AUC: {auc:.4f}')
-    return acc, auc
-
-
-# def train_model(device, model, loader, optimizer, criterion, epoch, model_name):
-#     model.train()
-#     running_loss = 0.0
-#     for i, (inputs, labels) in enumerate(tqdm(loader, desc=f'Training {model_name} Epoch {epoch + 1}', unit='batch')):
-#         inputs, labels = inputs.float().to(device), labels.float().to(device)  # 确保数据格式正确
-#         optimizer.zero_grad()
-#
-#         outputs = model(inputs)
-#         loss = criterion(outputs.squeeze(), labels)
-#
-#         # 随机打印部分输出和标签,检查格式
-#         if i % 10 == 0:  # 每100个批次打印一次
-#             print(f"Batch {i} - Sample Output: {outputs[0].item():.4f}, Sample Label: {labels[0].item()}")
-#
-#         # 检查损失值是否异常
-#         if loss.item() < 0:
-#             print(f"Warning: Negative loss detected at batch {i}. Loss: {loss.item()}")
-#
-#         loss.backward()
-#         optimizer.step()
-#
-#         running_loss += loss.item()
-#
-#     avg_loss = running_loss / len(loader)
-#     print(f'{model_name} Training Loss: {avg_loss:.4f}')
-#     return avg_loss
-
-def train_model(device, model, loader, optimizer, criterion, epoch, model_name):
-    model.train()
-    running_loss = 0.0
-    for inputs, labels in tqdm(loader, desc=f'Training {model_name} Epoch {epoch + 1}', unit='batch'):
-        inputs, labels = inputs.float().to(device), labels.to(device)  # 确保数据在 device 上
-        optimizer.zero_grad()
-        
-        outputs = model(inputs)
-        loss = criterion(outputs.squeeze(), labels.float())
-        loss.backward()
-        optimizer.step()
-        
-        running_loss += loss.item()
-    
-    avg_loss = running_loss / len(loader)
-    print(f'{model_name} Training Loss: {avg_loss:.4f}')
-    return avg_loss
-
-
-def validate_model(device, model, loader, criterion, epoch, model_name):
-    model.eval()
-    running_loss = 0.0
-    correct, total = 0, 0
-    all_labels, all_preds = [], []
-    
-    with torch.no_grad():
-        for inputs, labels in tqdm(loader, desc=f'Validating {model_name} Epoch {epoch + 1}', unit='batch'):
-            inputs, labels = inputs.float().to(device), labels.to(device)  # 确保数据在 device 上
-            
-            outputs = model(inputs)
-            
-            # 将 logits 转换为预测
-            predicted = torch.sigmoid(outputs).data
-            all_preds.extend(predicted.cpu().numpy())
-            all_labels.extend(labels.cpu().numpy())
-            
-            # loss = criterion(outputs.squeeze(), labels.float())
-            loss = criterion(outputs.squeeze(), labels.float())
-            running_loss += loss.item()
-    auc = roc_auc_score(all_labels, all_preds)
-    predicted_labels = (np.array(all_preds) >= 0.5).astype(int)  # 确保转换为 NumPy 数组
-    acc = accuracy_score(all_labels, predicted_labels)
-    avg_loss = running_loss / len(loader)
-    print(f'{model_name} Validation Loss: {avg_loss:.4f}, Accuracy: {acc:.4f}, AUC: {auc:.4f}')
-    return avg_loss, acc, auc
-
-
-# 权重聚合函数
-def aggregate_weights(weights_list, alpha=1 / 3, beta=1 / 3, gamma=1 / 3):
-    new_state_dict = copy.deepcopy(weights_list[0])  # 从模型a复制权重结构
-    for key in new_state_dict.keys():
-        new_state_dict[key] = (alpha * weights_list[0][key] +
-                               beta * weights_list[1][key] +
-                               gamma * weights_list[2][key])
-    return new_state_dict
-
-
-def v3_update_model_weights(
-        epoch,
-        model_to_update,
-        other_models,
-        global_model,
-        losses,
-        val_loader,
-        device,
-        val_auc_threshold,  # 当前需要更新模型的验证 AUC 阈值
-        validate_model,
-        criterion,
-        update_frequency
-):
-    """
-    根据给定的条件更新模型的权重。
-
-    参数:
-        epoch (int): 当前训练轮次。
-        model_to_update: 需要更新的模型。
-        other_models (list): 其他模型列表,用于计算全局模型权重。
-        global_model: 全局模型。
-        losses (list): 各模型的损失值列表。
-        val_loader: 验证数据的 DataLoader。
-        device: 设备 ('cuda' 或 'cpu')。
-        val_auc_threshold (float): 当前需要更新模型的验证 AUC。
-        aggregate_weights (function): 权重聚合函数。
-        validate_model (function): 验证模型的函数。
-        update_frequency (int): 权重更新的频率。
-
-    返回:
-        val_acc (float): 全局模型的验证精度。
-        val_auc (float): 全局模型的验证 AUC。
-        updated_val_auc_threshold (float): 更新后的验证 AUC。
-    """
-    if (epoch + 1) % update_frequency == 0:
-        # 获取所有模型的权重
-        all_weights = [model.state_dict() for model in other_models]
-        avg_weights = aggregate_weights(all_weights)  # 聚合权重
-        
-        # 更新全局模型权重
-        global_model.load_state_dict(avg_weights)
-        
-        # 计算加权平均损失
-        weighted_loss = sum(loss * 0.33 for loss in losses)
-        print(f"Weighted Average Loss: {weighted_loss:.4f}")
-        
-        # 验证全局模型
-        val_loss, val_acc, val_auc = validate_model(device, global_model, val_loader, criterion, epoch, 'global_model')
-        print(f'global_model Validation Accuracy: {val_acc:.4f}, global_model Validation AUC: {val_auc:.4f}')
-        
-        # 如果全局模型的 AUC 更高,则更新目标模型
-        if val_auc > val_auc_threshold:
-            print(f'Updating model at epoch {epoch + 1}')
-            model_to_update.load_state_dict(global_model.state_dict())
-            val_auc_threshold = val_auc  # 更新 AUC 阈值
-        
-        return val_acc, val_auc, val_auc_threshold
-    return None, None, val_auc_threshold
-
-
-def update_model_weights(
-        epoch,
-        model_to_update,
-        other_models,
-        global_model,
-        losses,
-        val_loader,
-        device,
-        val_auc_threshold,  # 当前需要更新模型的验证 AUC 阈值
-        validate_model,
-        criterion,
-        update_frequency
-):
-    """
-    根据给定的条件更新模型的权重。
-
-    参数:
-        epoch (int): 当前训练轮次。
-        model_to_update: 需要更新的模型。
-        other_models (list): 其他模型列表,用于计算全局模型权重。
-        global_model: 全局模型。
-        losses (list): 各模型的损失值列表。
-        val_loader: 验证数据的 DataLoader。
-        device: 设备 ('cuda' 或 'cpu')。
-        val_auc_threshold (float): 当前需要更新模型的验证 AUC。
-        aggregate_weights (function): 权重聚合函数。
-        validate_model (function): 验证模型的函数。
-        update_frequency (int): 权重更新的频率。
-
-    返回:
-        val_acc (float): 全局模型的验证精度。
-        val_auc (float): 全局模型的验证 AUC。
-        updated_val_auc_threshold (float): 更新后的验证 AUC。
-    """
-    if (epoch + 1) % update_frequency == 0:
-        # 获取所有模型的权重
-        all_weights = [model.state_dict() for model in other_models]
-        avg_weights = aggregate_weights(all_weights)  # 聚合权重
-        
-        # 更新全局模型权重
-        global_model.load_state_dict(avg_weights)
-        
-        # 计算加权平均损失
-        weighted_loss = sum(loss * 0.33 for loss in losses)
-        print(f"Weighted Average Loss: {weighted_loss:.4f}")
-        
-        # 验证全局模型
-        val_loss, val_acc, val_auc = validate_deepmodel(device, global_model, val_loader, criterion, epoch,
-                                                        'global_model')
-        print(f'global_model Validation Accuracy: {val_acc:.4f}, global_model Validation AUC: {val_auc:.4f}')
-        
-        # 如果全局模型的 AUC 更高,则更新目标模型
-        if val_auc > val_auc_threshold:
-            print(f'Updating model at epoch {epoch + 1}')
-            model_to_update.load_state_dict(global_model.state_dict())
-            val_auc_threshold = val_auc  # 更新 AUC 阈值
-        
-        return val_acc, val_auc, val_auc_threshold
-    return None, None, val_auc_threshold
-
-
-def f_update_model_weights(
-        epoch,
-        model_to_update,
-        other_models,
-        global_model,
-        losses,
-        val_loader,
-        device,
-        val_auc_threshold,  # 当前需要更新模型的验证 AUC 阈值
-        aggregate_weights,  # 权重聚合函数
-        validate_model,
-        criterion,
-        update_frequency
-):
-    """
-    根据给定的条件更新模型的权重。
-
-    参数:
-        epoch (int): 当前训练轮次。
-        model_to_update: 需要更新的模型。
-        other_models (list): 其他模型列表,用于计算全局模型权重。
-        global_model: 全局模型。
-        losses (list): 各模型的损失值列表。
-        val_loader: 验证数据的 DataLoader。
-        device: 设备 ('cuda' 或 'cpu')。
-        val_auc_threshold (float): 当前需要更新模型的验证 AUC 阈值。
-        aggregate_weights (function): 权重聚合函数。
-        validate_model (function): 验证模型的函数。
-        criterion: 损失函数。
-        update_frequency (int): 权重更新的频率。
-
-    返回:
-        val_acc (float): 全局模型的验证精度。
-        val_auc (float): 全局模型的验证 AUC。
-        updated_val_auc_threshold (float): 更新后的验证 AUC 阈值。
-    """
-    # 每隔指定的 epoch 更新一次模型权重
-    
-    if (epoch + 1) % update_frequency == 0:
-        print(f"\n[Epoch {epoch + 1}] Updating global model weights...")
-        
-        # 获取其他模型的权重
-        all_weights = [model.state_dict() for model in other_models]
-        
-        # 使用聚合函数计算全局权重
-        avg_weights = aggregate_weights(all_weights)
-        print("Global model weights aggregated.")
-        
-        # 更新全局模型权重
-        global_model.load_state_dict(avg_weights)
-        
-        # 计算加权平均损失
-        weighted_loss = sum(loss * (1 / len(losses)) for loss in losses)  # 平均加权
-        print(f"Weighted Average Loss: {weighted_loss:.4f}")
-        
-        # 验证全局模型性能
-        val_loss, val_acc, val_auc = validate_deepmodel(device, global_model, val_loader, criterion, epoch,
-                                                        'global_model')
-        print(f"[Global Model] Validation Loss: {val_loss:.4f}, Accuracy: {val_acc:.4f}, AUC: {val_auc:.4f}")
-        
-        # 如果全局模型 AUC 高于阈值,则更新目标模型权重
-        if val_auc > val_auc_threshold:
-            print(f"Global model AUC improved ({val_auc:.4f} > {val_auc_threshold:.4f}). Updating target model.")
-            model_to_update.load_state_dict(global_model.state_dict())
-            val_auc_threshold = val_auc  # 更新 AUC 阈值
-        else:
-            print(
-                f"Global model AUC did not improve ({val_auc:.4f} <= {val_auc_threshold:.4f}). No update to target model.")
-        
-        return val_acc, val_auc, val_auc_threshold
-    
-    # 如果未到达更新频率,返回当前的 AUC 阈值
-    return None, None, val_auc_threshold