时间序列(Time-Series)exp_classification.py脚本解析

from data_provider.data_factory import data_provider
from exp.exp_basic import Exp_Basic
from utils.tools import EarlyStopping, adjust_learning_rate, cal_accuracy
import torch
import torch.nn as nn
from torch import optim
import os
import time
import warnings
import numpy as np
import pdb

warnings.filterwarnings('ignore')

class Exp_Classification(Exp_Basic):
    def __init__(self, args):
        super(Exp_Classification, self).__init__(args)
    #创建模型
    def _build_model(self):
        # model input depends on data
       
        train_data, train_loader = self._get_data(flag='TRAIN')
        test_data, test_loader = self._get_data(flag='TEST')
        self.args.seq_len = max(train_data.max_seq_len, test_data.max_seq_len)
        self.args.pred_len = 0
        self.args.enc_in = train_data.feature_df.shape[1]
        self.args.num_class = len(train_data.class_names)
        # model init
        model = self.model_dict[self.args.model].Model(self.args).float()
        if self.args.use_multi_gpu and self.args.use_gpu:
            model = nn.DataParallel(model, device_ids=self.args.device_ids)
        return model
    #获取数据
    def _get_data(self, flag):
        data_set, data_loader = data_provider(self.args, flag)
        return data_set, data_loader
    #选择优化器
    def _select_optimizer(self):
        model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
        return model_optim
    #选择评估标准函数
    def _select_criterion(self):
        #交叉熵
        criterion = nn.CrossEntropyLoss()
        return criterion
    #验证方法,通过计算模型验证的误差来评估模型性能
    def vali(self, vali_data, vali_loader, criterion):
        total_loss = []
        preds = []
        trues = []
        #设置评估模式
        self.model.eval()
        #关闭梯度计算,节省内存和计算资源
        with torch.no_grad():
            for i, (batch_x, label, padding_mask) in enumerate(vali_loader):
                #将转化为浮点型的数据加载到cpu或gpu
                batch_x = batch_x.float().to(self.device)
                padding_mask = padding_mask.float().to(self.device)
                label = label.to(self.device)
                #传入输入数据并获取输出
                outputs = self.model(batch_x, padding_mask, None, None)

                pred = outputs.detach().cpu()

                loss = criterion(pred, label.long().squeeze().cpu())
                #将loss添加total_loss列表
                total_loss.append(loss)

                preds.append(outputs.detach())
                trues.append(label)
        #计算total_loss列表均值
        total_loss = np.average(total_loss)

        preds = torch.cat(preds, 0)
        trues = torch.cat(trues, 0)
        probs = torch.nn.functional.softmax(preds)  # (total_samples, num_classes) est. prob. for each class and sample
        predictions = torch.argmax(probs, dim=1).cpu().numpy()  # (total_samples,) int class index for each sample
        trues = trues.flatten().cpu().numpy()
        accuracy = cal_accuracy(predictions, trues)
        #将模型切换成训练模型
        self.model.train()
        return total_loss, accuracy

    def train(self, setting):
        train_data, train_loader = self._get_data(flag='TRAIN')
        vali_data, vali_loader = self._get_data(flag='TEST')
        test_data, test_loader = self._get_data(flag='TEST')

        path = os.path.join(self.args.checkpoints, setting)
        if not os.path.exists(path):
            os.makedirs(path)
        #获取时间
        time_now = time.time()
        #训练长度
        train_steps = len(train_loader)
        #早起停止函数,避免过拟合 patience 容忍升高次数
        early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
        #选择优化器
        model_optim = self._select_optimizer()
        #选择损失函数,这里选择交叉熵
        criterion = self._select_criterion()

        for epoch in range(self.args.train_epochs):
            iter_count = 0
            train_loss = []
            #选择训练模式
            self.model.train()
            epoch_time = time.time()
            #加载训练数据
            for i, (batch_x, label, padding_mask) in enumerate(train_loader):
                iter_count += 1
                #将模型中的梯度设置为0
                model_optim.zero_grad()
                #将转化为浮点型的数据加载到cpu或gpu
                batch_x = batch_x.float().to(self.device)
                padding_mask = padding_mask.float().to(self.device)
                label = label.to(self.device)

                outputs = self.model(batch_x, padding_mask, None, None)
                loss = criterion(outputs, label.long().squeeze(-1))
                train_loss.append(loss.item())

                if (i + 1) % 100 == 0:
                    print(" iters: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item()))
                    speed = (time.time() - time_now) / iter_count
                    left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i)
                    print(' speed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
                    iter_count = 0
                    time_now = time.time()
                #计算当前梯度,反向传播
                loss.backward()
                nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=4.0)
                #根据梯度更新网络参数
                model_optim.step()

            print("Epoch: {} cost time: {}".format(epoch + 1, time.time() - epoch_time))
            #计算train_loss列表均值
            train_loss = np.average(train_loss)
            #验证方法,通过计算模型验证的误差来评估模型性能
            vali_loss, val_accuracy = self.vali(vali_data, vali_loader, criterion)
            test_loss, test_accuracy = self.vali(test_data, test_loader, criterion)

            print(
                "Epoch: {0}, Steps: {1} | Train Loss: {2:.3f} Vali Loss: {3:.3f} Vali Acc: {4:.3f} Test Loss: {5:.3f} Test Acc: {6:.3f}"
                .format(epoch + 1, train_steps, train_loss, vali_loss, val_accuracy, test_loss, test_accuracy))
            #早起停止函数,避免过拟合 patience 容忍升高次数
            early_stopping(-val_accuracy, self.model, path)
            if early_stopping.early_stop:
                print("Early stopping")
                break
            if (epoch + 1) % 5 == 0:
                adjust_learning_rate(model_optim, epoch + 1, self.args)
        #加载训练模型
        best_model_path = path + '/' + 'checkpoint.pth'
        self.model.load_state_dict(torch.load(best_model_path))

        return self.model

    def test(self, setting, test=0):
        test_data, test_loader = self._get_data(flag='TEST')
        if test:
            print('loading model')
            self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))

        preds = []
        trues = []
        #检测是否已经创建文件路径,未存在路径则创建该文件
        folder_path = './test_results/' + setting + '/'
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)
        #设置评估模型
        self.model.eval()
        #关闭梯度计算,节省内存和计算资源
        with torch.no_grad():
            #迭代测试数据加载器,每次迭代添加数据标签
            for i, (batch_x, label, padding_mask) in enumerate(test_loader):
                #将数据的数据类型转化为浮点型,加载到GPU或CPU
                batch_x = batch_x.float().to(self.device)
                padding_mask = padding_mask.float().to(self.device)
                label = label.to(self.device)
                #根据模型计算
                outputs = self.model(batch_x, padding_mask, None, None)

                preds.append(outputs.detach())
                trues.append(label)
        
        preds = torch.cat(preds, 0)
        trues = torch.cat(trues, 0)
        print('test shape:', preds.shape, trues.shape)

        probs = torch.nn.functional.softmax(preds)  # (total_samples, num_classes) est. prob. for each class and sample
        predictions = torch.argmax(probs, dim=1).cpu().numpy()  # (total_samples,) int class index for each sample
        trues = trues.flatten().cpu().numpy()
        #准确率计算
        accuracy = cal_accuracy(predictions, trues)

        # result save
        folder_path = './results/' + setting + '/'
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)

        #准确率打印存储
        print('accuracy:{}'.format(accuracy))
        file_name='result_classification.txt'
        f = open(os.path.join(folder_path,file_name), 'a')
        f.write(setting + "  
")
        f.write('accuracy:{}'.format(accuracy))
        f.write('
')
        f.write('
')
        f.close()
        return