做网站的企业文化怎么写,网站建设常用的编程语言,wordpress处理大数据,传奇手游新开服网站# DAY 37 早停策略和模型权重的保存知识点回顾#xff1a;1. 过拟合的判断#xff1a;测试集和训练集同步打印指标2. 模型的保存和加载 a. 仅保存权重 b. 保存权重和模型 c. 保存全部信息 checkpoint#xff0c;还包含训练状态3. 早停策略 作业#xff1a;对信贷数据集…# DAY 37 早停策略和模型权重的保存知识点回顾1. 过拟合的判断测试集和训练集同步打印指标2. 模型的保存和加载a. 仅保存权重b. 保存权重和模型c. 保存全部信息 checkpoint还包含训练状态3. 早停策略作业对信贷数据集训练后保存权重加载权重后继续训练 50 轮并采取早停策略早停策略早停策略Early Stopping是深度学习训练中防止过拟合、优化训练效率的常用方法核心逻辑很简单1. 核心目标避免模型 “过度学习” 训练数据的细节甚至噪声提前停止训练保留对 “未见过的测试数据” 泛化能力最好的模型。2. 基本逻辑训练过程中持续监控测试集的性能指标通常是测试损失当测试集性能如损失持续下降时说明模型还在有效学习继续训练当测试集性能不再提升甚至开始上升时说明模型已经 “过拟合”只记住了训练数据没学会泛化此时提前终止训练。3. 关键参数Patience耐心值允许测试集性能 “不改善” 的最大轮数比如设为 100即连续 100 轮测试损失没下降就触发早停最佳模型保存训练中会实时保存 “测试性能最好时的模型”因为早停时的模型可能已经开始退化需要回退到最优状态。4. 作用防止过拟合避免模型在训练后期 “学偏”节省时间不用训练到预设的最大轮数自动保留最优模型不用手动挑选训练轮数。## 一、过拟合判断本次采用信贷数据import pandas as pd import pandas as pd #用于数据处理和分析可处理表格数据。 import numpy as np #用于数值计算提供了高效的数组操作。 import matplotlib.pyplot as plt #用于绘制各种类型的图表 import warnings from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split import torch import torch.nn as nn import torch.optim as optim import time warnings.filterwarnings(ignore) #忽略警告信息保持输出清洁。 # 设置中文字体解决中文显示问题 plt.rcParams[font.sans-serif] [SimHei] # Windows系统常用黑体字体 plt.rcParams[axes.unicode_minus] False # 正常显示负号 data pd.read_csv(data.csv) #读取数据 # 先筛选字符串变量 discrete_features data.select_dtypes(include[object]).columns.tolist() # Home Ownership 标签编码 home_ownership_mapping { Own Home: 1, Rent: 2, Have Mortgage: 3, Home Mortgage: 4 } data[Home Ownership] data[Home Ownership].map(home_ownership_mapping) # Years in current job 标签编码 years_in_job_mapping { 1 year: 1, 1 year: 2, 2 years: 3, 3 years: 4, 4 years: 5, 5 years: 6, 6 years: 7, 7 years: 8, 8 years: 9, 9 years: 10, 10 years: 11 } data[Years in current job] data[Years in current job].map(years_in_job_mapping) # Purpose 独热编码记得需要将bool类型转换为数值 data pd.get_dummies(data, columns[Purpose]) data2 pd.read_csv(data.csv) # 重新读取数据用来做列名对比 list_final [] # 新建一个空列表用于存放独热编码后新增的特征名 for i in data.columns: if i not in data2.columns: list_final.append(i) # 这里打印出来的就是独热编码后的特征名 for i in list_final: data[i] data[i].astype(int) # 这里的i就是独热编码后的特征名 # Term 0 - 1 映射 term_mapping { Short Term: 0, Long Term: 1 } data[Term] data[Term].map(term_mapping) data.rename(columns{Term: Long Term}, inplaceTrue) # 重命名列 continuous_features data.select_dtypes(include[int64, float64]).columns.tolist() #把筛选出来的列名转换成列表 # 连续特征用中位数补全 for feature in continuous_features: mode_value data[feature].mode()[0] #获取该列的众数。 data[feature].fillna(mode_value, inplaceTrue) #用众数填充该列的缺失值inplaceTrue表示直接在原数据上修改。 X data.drop([Credit Default], axis1) # 特征axis1表示按列删除 y data[Credit Default] # 标签 # 划分训练集和测试集 X_train, X_test, y_train, y_test train_test_split(X, y, test_size0.2, random_state42) # 归一化数据 scaler MinMaxScaler() X_train scaler.fit_transform(X_train) X_test scaler.transform(X_test) # 3. 转换为PyTorch张量适配GPU/CPU # 设置设备 device torch.device(cuda:0 if torch.cuda.is_available() else cpu) print(f使用设备: {device}) # 转换为张量pandas→numpy→tensor避免Series报错 # 修正后的张量转换部分 # 先打印类型和形状排查问题 print(X_train类型, type(X_train), | 形状, X_train.shape) print(y_train类型, type(y_train), | 形状, y_train.shape) # 统一转换逻辑先确保是numpy数组再转张量 # 特征转换X_train/X_test已被MinMaxScaler转为numpy数组 X_train torch.FloatTensor(X_train).to(device) X_test torch.FloatTensor(X_test).to(device) # 标签转换兼容Series/numpy数组 if isinstance(y_train, pd.Series): y_train_np y_train.values # Series→numpy数组 else: y_train_np y_train # 已是numpy数组直接用 y_train torch.LongTensor(y_train_np).to(device) if isinstance(y_test, pd.Series): y_test_np y_test.values else: y_test_np y_test y_test torch.LongTensor(y_test_np).to(device) # 打印维度验证特征数必须是31 print(fX_train形状: {X_train.shape}) # 输出(N, 31)N是训练样本数 print(fy_train形状: {y_train.shape}) # 输出(N,) print(f特征列数模型输入维度: {X_train.shape[1]}) # 4. 定义适配31维特征的MLP模型核心修改 class MLP(nn.Module): def __init__(self, input_dim31, hidden_dim64, output_dim2): super(MLP, self).__init__() # 输入层31维特征 → 隐藏层64维可调整 self.fc1 nn.Linear(input_dim, hidden_dim) self.relu nn.ReLU() self.dropout nn.Dropout(0.2) # 新增dropout防止过拟合 # 隐藏层 → 输出层二分类输出2维对应0/1 self.fc2 nn.Linear(hidden_dim, output_dim) def forward(self, x): out self.fc1(x) out self.relu(out) out self.dropout(out) # 随机丢弃20%神经元 out self.fc2(out) return out # 实例化模型 model MLP(input_dimX_train.shape[1]).to(device) print(模型结构) print(model) # 5. 训练配置 criterion nn.CrossEntropyLoss() # 二分类用CrossEntropyLoss等价于nn.NLLLossLogSoftmax optimizer optim.Adam(model.parameters(), lr0.001) # Adam优化器比SGD更适合二分类 num_epochs 1000 # 信贷数据无需20000轮1000轮足够 # 6. 训练模型 train_losses [] test_losses [] epochs_list [] start_time time.time() with tqdm(totalnum_epochs, desc训练进度, unitepoch) as pbar: for epoch in range(num_epochs): # 训练模式 model.train() # 前向传播 outputs model(X_train) train_loss criterion(outputs, y_train) # 反向传播优化 optimizer.zero_grad() train_loss.backward() optimizer.step() # 每20轮计算测试集损失监控过拟合 if (epoch 1) % 20 0: model.eval() with torch.no_grad(): test_outputs model(X_test) test_loss criterion(test_outputs, y_test) train_losses.append(train_loss.item()) test_losses.append(test_loss.item()) epochs_list.append(epoch 1) # 更新进度条 pbar.set_postfix({ Train Loss: f{train_loss.item():.4f}, Test Loss: f{test_loss.item():.4f} }) # 更新进度条 pbar.update(1) train_time time.time() - start_time print(f\n训练耗时: {train_time:.2f} 秒) # 7. 评估模型 model.eval() with torch.no_grad(): # 测试集预测 test_outputs model(X_test) _, predicted torch.max(test_outputs, 1) # 计算准确率 correct (predicted y_test).sum().item() accuracy correct / y_test.size(0) print(f测试集准确率: {accuracy * 100:.2f}%) # 8. 可视化损失曲线 plt.figure(figsize(10, 6)) plt.plot(epochs_list, train_losses, label训练损失) plt.plot(epochs_list, test_losses, label测试损失) plt.xlabel(Epoch) plt.ylabel(Loss) plt.title(训练/测试损失变化曲线) plt.legend() plt.grid(True) plt.show()1. 曲线趋势与拟合阶段前期0~200 轮训练损失蓝色和测试损失橙色同步快速下降说明模型在有效学习数据的核心规律是正常的 “学习阶段”。中后期200~1000 轮训练损失继续缓慢下降并趋于稳定测试损失下降至 0.46 左右后保持平稳两者的差距始终较小训练损失最终约 0.44测试损失约 0.46没有出现 “差距持续拉大” 的情况。2. 过拟合的核心判断未满足过拟合的典型特征是训练损失持续下降甚至趋近于 0但测试损失下降到一定程度后开始上升或训练损失远低于测试损失。而这张图中测试损失未出现 “下降后上升” 的趋势始终保持稳定训练损失与测试损失的差距很小没有出现 “训练效果极好、测试效果极差” 的脱节情况。3. 结论该模型的拟合状态正常且泛化能力较好不存在过拟合问题。## 二、加载权重后继续训练50轮import torch import torch.nn as nn import torch.optim as optim import pandas as pd import numpy as np import time import matplotlib.pyplot as plt from tqdm import tqdm from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler import warnings warnings.filterwarnings(ignore) # 1. 全局配置 数据预处理适配信贷数据集 # 设置GPU/CPU设备 device torch.device(cuda:0 if torch.cuda.is_available() else cpu) print(f使用设备: {device}) # 数据预处理你的信贷数据逻辑修正路径和类型兼容 def preprocess_data(): # 设置中文字体可选可视化用 plt.rcParams[font.sans-serif] [SimHei] plt.rcParams[axes.unicode_minus] False # 读取数据注意路径用原始字符串避免转义 data pd.read_csv(rdata.csv) # 1. 字符串特征编码 # Home Ownership 标签编码 home_ownership_mapping {Own Home: 1, Rent: 2, Have Mortgage: 3, Home Mortgage: 4} data[Home Ownership] data[Home Ownership].map(home_ownership_mapping) # Years in current job 标签编码 years_in_job_mapping { 1 year: 1, 1 year: 2, 2 years: 3, 3 years: 4, 4 years: 5, 5 years: 6, 6 years: 7, 7 years: 8, 8 years: 9, 9 years: 10, 10 years: 11 } data[Years in current job] data[Years in current job].map(years_in_job_mapping) # Purpose 独热编码 data pd.get_dummies(data, columns[Purpose]) data2 pd.read_csv(rdata.csv) list_final [col for col in data.columns if col not in data2.columns] for col in list_final: data[col] data[col].astype(int) # Term 映射 重命名 term_mapping {Short Term: 0, Long Term: 1} data[Term] data[Term].map(term_mapping) data.rename(columns{Term: Long Term}, inplaceTrue) # 2. 缺失值填充连续特征用众数 continuous_features data.select_dtypes(include[int64, float64]).columns.tolist() for feature in continuous_features: mode_value data[feature].mode()[0] data[feature].fillna(mode_value, inplaceTrue) # 3. 划分特征/标签 X data.drop([Credit Default], axis1) # 31维特征 y data[Credit Default] # 二分类标签0/1 # 4. 划分训练集/测试集8:2 X_train, X_test, y_train, y_test train_test_split(X, y, test_size0.2, random_state42) # 5. 特征归一化提升训练稳定性 scaler MinMaxScaler() X_train scaler.fit_transform(X_train) X_test scaler.transform(X_test) # 6. 张量转换兼容pandas Series/numpy数组 def to_tensor(data, dtypetorch.float32): 统一转换为张量避免Series报错 if isinstance(data, pd.Series): arr data.values elif isinstance(data, np.ndarray): arr data else: arr np.array(data) return torch.tensor(arr, dtypedtype).to(device) X_train to_tensor(X_train, torch.float32) X_test to_tensor(X_test, torch.float32) y_train to_tensor(y_train, torch.long) # 分类任务标签用long型 y_test to_tensor(y_test, torch.long) # 打印维度验证 print(f数据预处理完成 | X_train形状: {X_train.shape} | y_train形状: {y_train.shape}) print(f模型输入维度: {X_train.shape[1]} | 输出维度: 2二分类) return X_train, X_test, y_train, y_test # 执行数据预处理 X_train, X_test, y_train, y_test preprocess_data() # 2. 定义适配信贷数据的MLP模型 class MLP(nn.Module): def __init__(self, input_dim31, hidden_dim64, output_dim2): super(MLP, self).__init__() self.fc1 nn.Linear(input_dim, hidden_dim) # 31维输入 → 64维隐藏层 self.relu nn.ReLU() self.dropout nn.Dropout(0.2) # 防止过拟合 self.fc2 nn.Linear(hidden_dim, output_dim) # 64维 → 2维输出二分类 def forward(self, x): out self.fc1(x) out self.relu(out) out self.dropout(out) out self.fc2(out) return out # 3. 基础训练生成检查点 def train_base_model(): # 初始化模型/损失/优化器 model MLP(input_dimX_train.shape[1]).to(device) criterion nn.CrossEntropyLoss() # 二分类用CrossEntropyLoss optimizer optim.SGD(model.parameters(), lr0.01) num_epochs 20000 # 基础训练轮数可根据需求调整 # 记录损失 train_losses [] test_losses [] epochs [] best_loss float(inf) start_time time.time() with tqdm(totalnum_epochs, desc基础训练进度, unitepoch) as pbar: for epoch in range(num_epochs): model.train() # 训练模式 # 前向传播 outputs model(X_train) train_loss criterion(outputs, y_train) # 反向传播优化 optimizer.zero_grad() train_loss.backward() optimizer.step() # 每200轮记录测试损失 if (epoch 1) % 200 0: model.eval() with torch.no_grad(): test_outputs model(X_test) test_loss criterion(test_outputs, y_test) model.train() train_losses.append(train_loss.item()) test_losses.append(test_loss.item()) epochs.append(epoch 1) pbar.set_postfix({Train Loss: f{train_loss.item():.4f}, Test Loss: f{test_loss.item():.4f}}) # 每1000轮更新进度条 if (epoch 1) % 1000 0: pbar.update(1000) # 补全进度条 if pbar.n num_epochs: pbar.update(num_epochs - pbar.n) # 保存检查点包含模型/优化器/损失/epoch checkpoint { model_state_dict: model.state_dict(), optimizer_state_dict: optimizer.state_dict(), epoch: num_epochs, train_losses: train_losses, test_losses: test_losses, epochs: epochs, best_loss: min(test_losses) if test_losses else float(inf) } torch.save(checkpoint, credit_checkpoint.pth) print(f\n基础训练完成 | 耗时: {time.time() - start_time:.2f} 秒 | 检查点保存至 credit_checkpoint.pth) return model # 执行基础训练 base_model train_base_model() # 4. 加载检查点并续训50轮 def continue_train_50_epochs(): # 步骤1初始化模型/优化器和基础训练一致 model MLP(input_dimX_train.shape[1]).to(device) criterion nn.CrossEntropyLoss() optimizer optim.SGD(model.parameters(), lr0.01) # 优化器参数必须和基础训练一致 # 步骤2加载检查点 checkpoint torch.load(credit_checkpoint.pth, map_locationdevice) model.load_state_dict(checkpoint[model_state_dict]) optimizer.load_state_dict(checkpoint[optimizer_state_dict]) start_epoch checkpoint[epoch] # 基础训练结束轮数20000 end_epoch start_epoch 50 # 续训50轮 # 加载历史损失 train_losses checkpoint[train_losses] test_losses checkpoint[test_losses] epochs checkpoint[epochs] print(f\n开始续训50轮 | 从第 {start_epoch 1} 轮到第 {end_epoch} 轮) start_time time.time() # 步骤3续训50轮 with tqdm(total50, desc续训进度, unitepoch) as pbar: for epoch in range(start_epoch, end_epoch): model.train() # 强制训练模式关键 # 前向传播 outputs model(X_train) train_loss criterion(outputs, y_train) # 反向传播优化 optimizer.zero_grad() train_loss.backward() optimizer.step() # 每轮计算测试损失 model.eval() with torch.no_grad(): test_outputs model(X_test) test_loss criterion(test_outputs, y_test) model.train() # 追加损失记录 train_losses.append(train_loss.item()) test_losses.append(test_loss.item()) epochs.append(epoch 1) # 更新进度条 pbar.set_postfix({Train Loss: f{train_loss.item():.4f}, Test Loss: f{test_loss.item():.4f}}) pbar.update(1) # 步骤4保存续训后检查点 new_checkpoint { model_state_dict: model.state_dict(), optimizer_state_dict: optimizer.state_dict(), epoch: end_epoch, train_losses: train_losses, test_losses: test_losses, epochs: epochs, best_loss: min(test_losses) } torch.save(new_checkpoint, credit_checkpoint_continued.pth) print(f续训完成 | 耗时: {time.time() - start_time:.2f} 秒 | 新检查点保存至 credit_checkpoint_continued.pth) # 步骤5评估续训后模型 model.eval() with torch.no_grad(): # 测试集准确率 test_outputs model(X_test) _, predicted torch.max(test_outputs, 1) correct (predicted y_test).sum().item() test_accuracy correct / y_test.size(0) # 训练集准确率对比过拟合 train_outputs model(X_train) _, train_pred torch.max(train_outputs, 1) train_correct (train_pred y_train).sum().item() train_accuracy train_correct / y_train.size(0) print(f\n续训后评估 | 训练集准确率: {train_accuracy * 100:.2f}% | 测试集准确率: {test_accuracy * 100:.2f}%) # 步骤6可视化完整损失曲线 plt.figure(figsize(12, 6)) plt.plot(epochs, train_losses, label训练损失) plt.plot(epochs, test_losses, label测试损失) plt.axvline(xstart_epoch, colorred, linestyle--, labelf续训起始点第{start_epoch}轮) plt.xlabel(训练轮数Epoch) plt.ylabel(损失Loss) plt.title(信贷违约预测模型 - 基础训练续训损失曲线) plt.legend() plt.grid(True) plt.show() return model # 执行续训50轮 continued_model continue_train_50_epochs()## 三、早停法import torch import torch.nn as nn import torch.optim as optim import pandas as pd import numpy as np import time import matplotlib.pyplot as plt from tqdm import tqdm from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler import warnings warnings.filterwarnings(ignore) # 1. 基础配置 # 设置GPU/CPU设备 device torch.device(cuda:0 if torch.cuda.is_available() else cpu) print(f使用设备: {device}) # 设置中文字体解决可视化中文乱码 plt.rcParams[font.sans-serif] [SimHei] plt.rcParams[axes.unicode_minus] False # 2. 数据预处理适配信贷数据集 def preprocess_credit_data(): 信贷数据集预处理编码、缺失值填充、划分、归一化 # 读取数据原始字符串避免路径转义 data pd.read_csv(rdata.csv) # 1. 字符串特征编码 # Home Ownership 标签编码 home_mapping {Own Home: 1, Rent: 2, Have Mortgage: 3, Home Mortgage: 4} data[Home Ownership] data[Home Ownership].map(home_mapping) # Years in current job 标签编码 job_years_mapping { 1 year: 1, 1 year: 2, 2 years: 3, 3 years: 4, 4 years: 5, 5 years: 6, 6 years: 7, 7 years: 8, 8 years: 9, 9 years: 10, 10 years: 11 } data[Years in current job] data[Years in current job].map(job_years_mapping) # Purpose 独热编码转为数值型 data pd.get_dummies(data, columns[Purpose]) data2 pd.read_csv(rdata.csv) new_cols [col for col in data.columns if col not in data2.columns] for col in new_cols: data[col] data[col].astype(int) # Term 映射 重命名 term_mapping {Short Term: 0, Long Term: 1} data[Term] data[Term].map(term_mapping) data.rename(columns{Term: Long Term}, inplaceTrue) # 2. 缺失值填充连续特征用众数 continuous_cols data.select_dtypes(include[int64, float64]).columns.tolist() for col in continuous_cols: mode_val data[col].mode()[0] data[col].fillna(mode_val, inplaceTrue) # 3. 划分特征/标签X31维特征y二分类标签 X data.drop([Credit Default], axis1) y data[Credit Default] # 4. 划分训练集/测试集8:2 X_train, X_test, y_train, y_test train_test_split(X, y, test_size0.2, random_state42) # 5. 特征归一化提升神经网络训练稳定性 scaler MinMaxScaler() X_train scaler.fit_transform(X_train) X_test scaler.transform(X_test) # 6. 张量转换兼容pandas Series/numpy数组 def to_tensor(data, dtypetorch.float32): 统一转换为张量避免Series报错 if isinstance(data, pd.Series): arr data.values elif isinstance(data, np.ndarray): arr data else: arr np.array(data) return torch.tensor(arr, dtypedtype).to(device) # 特征转float32标签转longCrossEntropyLoss要求 X_train to_tensor(X_train, torch.float32) X_test to_tensor(X_test, torch.float32) y_train to_tensor(y_train, torch.long) y_test to_tensor(y_test, torch.long) # 打印维度验证 print(f数据预处理完成 | X_train形状: {X_train.shape} | y_train形状: {y_train.shape}) print(f模型输入维度: {X_train.shape[1]} | 输出维度: 2二分类) return X_train, X_test, y_train, y_test # 执行数据预处理 X_train, X_test, y_train, y_test preprocess_credit_data() # 3. 定义适配信贷数据的MLP模型 class MLP(nn.Module): def __init__(self, input_dim31, hidden_dim64, output_dim2): super(MLP, self).__init__() self.fc1 nn.Linear(input_dim, hidden_dim) # 31维输入 → 64维隐藏层 self.relu nn.ReLU() self.dropout nn.Dropout(0.2) # 防止过拟合信贷数据复杂度更高 self.fc2 nn.Linear(hidden_dim, output_dim) # 64维 → 2维输出二分类 def forward(self, x): out self.fc1(x) out self.relu(out) out self.dropout(out) out self.fc2(out) return out # 实例化模型并移至设备 model MLP(input_dimX_train.shape[1]).to(device) # 4. 训练配置保留早停逻辑 # 损失函数二分类用CrossEntropyLoss criterion nn.CrossEntropyLoss() # 优化器SGD可替换为Adamoptim.Adam(model.parameters(), lr0.001) optimizer optim.SGD(model.parameters(), lr0.01) # 训练参数 num_epochs 20000 # 最大训练轮数 train_losses [] # 训练损失记录 test_losses [] # 测试损失记录 epochs_list [] # 记录的epoch数 # 早停相关参数适配信贷数据调整patience best_test_loss float(inf) # 最佳测试损失 best_epoch 0 # 最佳epoch patience 100 # 早停耐心值信贷数据可适当增大 counter 0 # 早停计数器 early_stopped False # 是否早停标志 # 5. 训练模型保留进度条早停 start_time time.time() # 创建tqdm进度条 with tqdm(totalnum_epochs, desc训练进度, unitepoch) as pbar: for epoch in range(num_epochs): model.train() # 训练模式启用Dropout # 前向传播 outputs model(X_train) train_loss criterion(outputs, y_train) # 反向传播优化 optimizer.zero_grad() train_loss.backward() optimizer.step() # 每200轮记录损失检查早停 if (epoch 1) % 200 0: # 测试集评估关闭梯度 model.eval() with torch.no_grad(): test_outputs model(X_test) test_loss criterion(test_outputs, y_test) # 记录损失 train_losses.append(train_loss.item()) test_losses.append(test_loss.item()) epochs_list.append(epoch 1) # 更新进度条 pbar.set_postfix({Train Loss: f{train_loss.item():.4f}, Test Loss: f{test_loss.item():.4f}}) # 早停逻辑 if test_loss.item() best_test_loss: best_test_loss test_loss.item() best_epoch epoch 1 counter 0 # 保存最佳模型 torch.save(model.state_dict(), best_credit_model.pth) else: counter 1 if counter patience: print(f\n早停触发第{epoch1}轮测试损失已连续{patience}轮未改善) print(f最佳测试损失{best_test_loss:.4f}第{best_epoch}轮) early_stopped True break # 终止训练 model.train() # 切回训练模式 # 每1000轮更新进度条 if (epoch 1) % 1000 0: pbar.update(1000) # 补全进度条 if pbar.n num_epochs: pbar.update(num_epochs - pbar.n) # 计算训练耗时 train_time time.time() - start_time print(f\n训练总耗时: {train_time:.2f} seconds) # 6. 加载最佳模型评估 if early_stopped: print(f\n加载第{best_epoch}轮的最佳模型进行评估...) model.load_state_dict(torch.load(best_credit_model.pth, map_locationdevice)) # 可视化损失曲线 plt.figure(figsize(10, 6)) plt.plot(epochs_list, train_losses, label训练损失) plt.plot(epochs_list, test_losses, label测试损失) plt.axvline(xbest_epoch, colorred, linestyle--, labelf最佳模型第{best_epoch}轮) plt.xlabel(训练轮数Epoch) plt.ylabel(损失Loss) plt.title(信贷违约预测模型 - 训练/测试损失曲线) plt.legend() plt.grid(True) plt.show() # 测试集评估二分类准确率 model.eval() with torch.no_grad(): outputs model(X_test) _, predicted torch.max(outputs, 1) correct (predicted y_test).sum().item() accuracy correct / y_test.size(0) print(f测试集准确率: {accuracy * 100:.2f}%)从曲线趋势可以看出该信贷违约预测模型的训练过程稳定既没有欠拟合损失未居高不下也没有过拟合训练 / 测试损失差距小最终在 20000 轮达到了较好的拟合效果。