第六周.03.GIN代码实操
Posted oldmao_2001
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了第六周.03.GIN代码实操相关的知识,希望对你有一定的参考价值。
本文内容整理自深度之眼《GNN核心能力培养计划》
公式输入请参考: 在线Latex公式
接上次GIN论文带读,这次对GIN进行实现,代码参考:
https://github.com/dmlc/dgl/blob/master/examples/pytorch/gin/main.py
这里要重点关注原文的公式4.1:
h v ( k ) = M L P ( k ) ( ( 1 + ϵ ( k ) ) ⋅ h v ( k − 1 ) + ∑ u ∈ N ( v ) h u ( k − 1 ) ) (4.1) h_v^{(k)}=MLP^{(k)}\\left(\\left(1+\\epsilon^{(k)}\\right)\\cdot h_v^{(k-1)}+\\sum_{u\\in \\mathcal{N}(v)}h_u^{(k-1)}\\right)\\tag{4.1} hv(k)=MLP(k)⎝⎛(1+ϵ(k))⋅hv(k−1)+u∈N(v)∑hu(k−1)⎠⎞(4.1)
注意特征 h h h的下标,左边一项 h v h_v hv是代表当前节点, k k k是GIN的层数,第k-1是上一层的结果,右边的 h u , u ∈ N h_u,u\\in \\mathcal{N} hu,u∈N代表邻居节点,整个公式的意思就是上一层得到当前节点的特征集合邻居节点的特征汇聚信息,经过多层的MLP,得到当前节点的特征。
GINConv
DGL的GINConv看这里:
https://docs.dgl.ai/_modules/dgl/nn/pytorch/conv/ginconv.html#GINConv
代码里面最核心的部分,就是和上面4.1对应的那句:
rst = (1 + self.eps) * feat_dst + graph.dstdata['neigh']#对应公式的最大括号里面的东西
#这里对应公式中的多层MLP
if self.apply_func is not None:
rst = self.apply_func(rst)
GIN 模型
"""
How Powerful are Graph Neural Networks
https://arxiv.org/abs/1810.00826
https://openreview.net/forum?id=ryGs6iA5Km
Author's implementation: https://github.com/weihua916/powerful-gnns
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch.conv import GINConv
from dgl.nn.pytorch.glob import SumPooling, AvgPooling, MaxPooling
#自定义节点更新特征的方式,这里是mlp+bn+relu,实际是对应原文公式4.1第一项
class ApplyNodeFunc(nn.Module):
"""Update the node feature hv with MLP, BN and ReLU."""
def __init__(self, mlp):
super(ApplyNodeFunc, self).__init__()
self.mlp = mlp
self.bn = nn.BatchNorm1d(self.mlp.output_dim)
def forward(self, h):
h = self.mlp(h)
h = self.bn(h)
h = F.relu(h)
return h
class MLP(nn.Module):
"""MLP with linear output"""
#num_layers:共有多少层
#input_dim:输入维度
#hidden_dim:隐藏层维度,所有隐藏层维度都一样
#hidden_dim:输出维度
def __init__(self, num_layers, input_dim, hidden_dim, output_dim):
"""MLP layers construction
Paramters
---------
num_layers: int
The number of linear layers
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
"""
super(MLP, self).__init__()
self.linear_or_not = True # default is linear model这个时候只有一层MLP
self.num_layers = num_layers
self.output_dim = output_dim
#层数合法性判断
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:#只有一层则按线性变换来玩,输入就是输出
# Linear model
self.linear = nn.Linear(input_dim, output_dim)
else:#有多层则按下面代码处理
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(nn.Linear(input_dim, hidden_dim))#第一层比较特殊,输入维度到隐藏层维度
for layer in range(num_layers - 2):#中间隐藏层可以循环来玩,隐藏层维度到隐藏层维度
self.linears.append(nn.Linear(hidden_dim, hidden_dim))
self.linears.append(nn.Linear(hidden_dim, output_dim))#最后一层,隐藏层维度到输出维度
for layer in range(num_layers - 1):#除了最后一层都加BN
self.batch_norms.append(nn.BatchNorm1d((hidden_dim)))
def forward(self, x):#前向传播
if self.linear_or_not:#只有单层MLP
# If linear model
return self.linear(x)
else:#多层MLP
# If MLP
h = x
for i in range(self.num_layers - 1):#除最后一层外都加一个relu
h = F.relu(self.batch_norms[i](self.linears[i](h)))
return self.linears[-1](h)#最后一层用线性变换把维度转到输出维度
class GIN(nn.Module):
"""GIN model初始化"""
def __init__(self, num_layers, num_mlp_layers, input_dim, hidden_dim,
output_dim, final_dropout, learn_eps, graph_pooling_type,
neighbor_pooling_type):
"""model parameters setting
Paramters
---------
num_layers: int这个是GIN的层数
The number of linear layers in the neural network
num_mlp_layers: intMLP的层数
The number of linear layers in mlps
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
final_dropout: float最后一层的抓爆率
dropout ratio on the final linear layer
learn_eps: boolean在学习epsilon参数时是否区分节点本身和邻居节点
If True, learn epsilon to distinguish center nodes from neighbors
If False, aggregate neighbors and center nodes altogether.
neighbor_pooling_type: str邻居汇聚方式,原文公式4.1的后半部分
how to aggregate neighbors (sum, mean, or max)
graph_pooling_type: str全图汇聚方式,和上面的邻居汇聚方式可以不一样
how to aggregate entire nodes in a graph (sum, mean or max)
"""
super(GIN, self).__init__()
self.num_layers = num_layers
self.learn_eps = learn_eps
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
for layer in range(self.num_layers - 1):#GIN有几层,除了最后一层每层都定义一个MLP(num_mlp_layers层)来进行COMBINE
if layer == 0:#第一层GIN,注意输入维度,
mlp = MLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim)
else:
mlp = MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
#更新特征的方式是ApplyNodeFunc,邻居汇聚方式为neighbor_pooling_type
#具体参考:https://docs.dgl.ai/api/python/nn.pytorch.html#ginconv
self.ginlayers.append(
GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.learn_eps))
self.batch_norms.append(nn.BatchNorm1d(hidden_dim))
# Linear function for graph poolings of output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
#以下代码是将每一层点的表征保存下来,然后作为最后的图的表征计算
for layer in range(num_layers):
if layer == 0:
self.linears_prediction.append(
nn.Linear(input_dim, output_dim))
else:
self.linears_prediction.append(
nn.Linear(hidden_dim, output_dim))
self.drop = nn.Dropout(final_dropout)
#图表征消息汇聚的方式
if graph_pooling_type == 'sum':
self.pool = SumPooling()
elif graph_pooling_type == 'mean':
self.pool = AvgPooling()
elif graph_pooling_type == 'max':
self.pool = MaxPooling()
else:
raise NotImplementedError
def forward(self, g, h):#前向传播
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.num_layers - 1):#根据GIN层数做循环
h = self.ginlayers[i](g, h)#做原文公式4.1的操作
h = self.batch_norms[i](h)#接BN
h = F.relu(h)#接RELU
hidden_rep.append(h)#保存每一层的输出,作为最后图表征的计算
score_over_layer = 0
#根据hidden_rep计算图表征
# perform pooling over all nodes in each graph in every layer
for i, h in enumerate(hidden_rep):
pooled_h = self.pool(g, h)
score_over_layer += self.drop(self.linears_prediction[i](pooled_h))
return score_over_layer
parse
这个要在jupyter里面跑要改下,不然会报:ipykernel_launcher.py: error: unrecognized arguments
"""Parser for arguments
Put all arguments in one file and group similar arguments
"""
import argparse
class Parser():
def __init__(self, description):
'''
arguments parser
'''
self.parser = argparse.ArgumentParser(description=description)
self.args = None
self._parse()
#self._parser().parse_known_args()[0]
def _parse(self):
# dataset
self.parser.add_argument(
'--dataset', type=str, default="MUTAG",
choices=['MUTAG', 'COLLAB', 'IMDBBINARY', 'IMDBMULTI'],
help='name of dataset (default: MUTAG)')
self.parser.add_argument(
'--batch_size', type=int, default=32,
help='batch size for training and validation (default: 32)')
self.parser.add_argument(
'--fold_idx', type=int, default=0,
help='the index(<10) of fold in 10-fold validation.')
self.parser.add_argument(
'--filename', type=str, default="",
help='output file')
# device
self.parser.add_argument(
'--disable-cuda', action='store_true',
help='Disable CUDA')
self.parser.add_argument(
'--device', type=int, default=0,
help='which gpu device to use (default: 0)')
# net
self.parser.add_argument(
'--num_layers', type=int, default=5,
help='number of layers (default: 5)')
self.parser.add_argument(
'--num_mlp_layers', type=int, default=2,
help='number of MLP layers(default: 2). 1 means linear model.')
self.parser.add_argument(
'--hidden_dim', type=int, default=64,
help='number of hidden units (default: 64)')
# graph
self.parser.add_argument(
'--graph_pooling_type', type=str,
default="sum", choices=["sum", "mean", "max"],
help='type of graph pooling: sum, mean or max')#全局汇聚方式
self.parser.add_argument(
'--neighbor_pooling_type', type=str,
default="sum", choices=["sum", "mean", "max"],
help='type of neighboring pooling: sum, mean or max')#邻居汇聚方式
self.parser.add_argument(
'--learn_eps', action="store_true",
help='learn the epsilon weighting')
# learning
self.parser.add_argument(
'--seed', type=int, default=0,
help='random seed (default: 0)')
self.parser.add_argument(
'--epochs', type=int, default=350,
help='number of epochs to train (default: 350)')#这里算力不足改成10
self.parser.add_argument(
'--lr', type=float, default=0.01,
help='learning rate (default: 0.01)')
self.parser.add_argument(
'--final_dropout', type=float, default=0.5,
help='final layer dropout (default: 0.5)')
# done
#self.args = self.parser.parse_args()
#jupyter要改下:
self.args = self.parser.parse_args(args=[])
问题记录
1.在jupyter里面调用py文件,最好在ipynb文件最前面加:
%load_ext autoreload
%autoreload 2
这样修改py文件会自动重新加载,否则改了py文件还是和没改效果一样。
2.代码可以运行,但是下载数据集报错,不知道是不是要挂梯子。
ConnectionError: HTTPSConnectionPool(host=‘raw.githubusercontent.com’, port=443): Max retries exceeded with url: /weihua916/powerful-gnns/master/dataset.zip (Caused by NewConnectionError(’<urllib3.connection.HTTPSConnection object at 0x000001AF7E84D280>: Failed to establish a new connection: [Errno 11004] getaddrinfo failed’))
以上是关于第六周.03.GIN代码实操的主要内容,如果未能解决你的问题,请参考以下文章