基于pyg实现ACGRN实现犯罪分布预测

基于pyg实现ACGRN实现犯罪分布预测

此处仍采用纽约犯罪数据进行分析,犯罪历史分布图如下:
在这里插入图片描述

预处理模块

此处预处理部分采用单个图进行计算,也就是batch取1,论文这块还没仔细读,但是接口处好像不太支持batch处理,代码如下:

def nn_seq_gat( seq_len, B, pred_step_size,train,val,test,TempArea):
    TempTrain = np.reshape(train,[TempArea,-1])
    edge_index = knn(torch.from_numpy(train.reshape(TempArea,-1)),torch.from_numpy(train.reshape(TempArea,-1)),3)
    graph = Data(x=TempTrain,edge_index=edge_index)
    # graph = create_graph(num_nodes, train)

    def process(dataset, batch_size, step_size, shuffle):
        nodes, timeLength = dataset.shape[0], dataset.shape[1]
        # print(nodes,timeLength)
        dataset = dataset.tolist()
        feature = []
        target = []
        graphs = []
        for i in tqdm(range(0, timeLength - seq_len - pred_step_size, step_size)):

            train_seq = []
            for j in range(i, i + seq_len):
                x = []
                for c in range(nodes):            ##存储过去时间段的二维图矩阵
                    x.append(dataset[c][j][0])

                # temp = functools.reduce(operator.concat, x)
                # print(temp)
                train_seq.append(x)
            # 下几个时刻的所有变量
            train_labels = []
            for k in range(i + seq_len, i+seq_len+step_size):
                train_label = []
                for j in range(nodes):
                    train_label.append(dataset[j][k][0])
                # print(train_label)
                # temp1 = functools.reduce(operator.concat, train_label)
                # print(temp1)
                train_labels.append(train_label))
            feature.append(np.array(train_seq).T)
            target.append(np.array(train_labels).reshape(-1).T)

            # temp = Data(x=train_seq, edge_index=graph.edge_index, y=train_labels)
            # # print(temp)
            # graphs.append(temp)
        # #
        # loader = torch_geometric.loader.DataLoader(graphs, batch_size=batch_size,
        #                                            shuffle=shuffle, drop_last=True)
        loader = StaticGraphTemporalSignal(edge_index=graph.edge_index,edge_weight=np.ones(graph.edge_index.shape[1]),
                                                features=feature,targets=target)


        return loader,graphs

    Dtr,Dtrgraphs = process(train, B, step_size=1, shuffle=True)
    Val,Valgraphs = process(val, val.shape[1] - seq_len - pred_step_size, step_size=1, shuffle=True)
    Dte,Dtegraphs = process(test,test.shape[1] - seq_len - pred_step_size, step_size=1, shuffle=False)
    b = test.shape[1] - seq_len - pred_step_size
    c = val.shape[1] - seq_len - pred_step_size
    return graph, Dtr, Val, Dte, b, c

网络模块如下:

class AST(torch.nn.Module):
    def __init__(self):
        super(AST, self).__init__()
        self.recurrent = AGCRN(number_of_nodes=256,
                               in_channels=6,
                               out_channels=2,
                               K=2,
                               embedding_dimensions=10)
        self.linear = torch.nn.Linear(2, 1)

    def forward(self, data, e, h):
        x = data.x.view(1,256,6)

        h_0 = self.recurrent(x, e, h)
        y = F.relu(h_0)
        y = self.linear(y)
        return y, h_0

训练过程如下:

数据处理后的格式:

Data(x=[256, 6], edge_index=[2, 768], edge_attr=[768], y=[256])

训练的的代码块:

    for epoch in tqdm(range(300)):
        train_losses = []
        # tar_val = list(enumerate(val_loader))##迁移部分
        # print(tar_val[0])
        h = None
        for tr in train_loader:
            # _, x_tar = tar_val[batch_j]
            # x_tar = x_tar.to(device)
            # batch_j += 1#
            # if batch_j >= len(tar_val):#
            #     batch_j = 0#
            # print(tr)
            model.train()
            label = tr.to(device).y.float()
            y_src,_ = model(tr.to(device),e,h)  #,batch,timeStep,Areas, ##cpu().detach().numpy()
            y_src = y_src.float()

            loss = loss_function(y_src, label)
            print(loss)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            train_losses.append(loss.item())

        # validation
        val_loss, test_loss,result = test(model, val_loader,test_loader,b,c,h)
        print('Epoch {:03d} train_loss {:.4f} val_loss {:.4f} test mape {:.4f}'.format(epoch
                                                                                       , np.mean(train_losses),
                                                                                       val_loss, test_loss))

训练losss
在这里插入图片描述
loss总体还是呈下降趋势,后续的验证此处不在叙述