[docs]defforward(self,graph,nfeats,efeats,edge_weight=None,get_attention=False):r""" Compute new node and edge features. Parameters ---------- graph : DGLGraph The graph. nfeat : torch.Tensor or pair of torch.Tensor If a torch.Tensor is given, the input feature of shape :math:`(N, D_{in})` where: :math:`D_{in}` is size of input node feature, :math:`N` is the number of nodes. If a pair of torch.Tensor is given, the pair must contain two tensors of shape :math:`(N_{in}, D_{in_{src}})` and :math:`(N_{out}, D_{in_{dst}})`. efeats: torch.Tensor The input edge feature of shape :math:`(E, F_{in})` where: :math:`F_{in}` is size of input node feature, :math:`E` is the number of edges. edge_weight : torch.Tensor, optional A 1D tensor of edge weight values. Shape: :math:`(|E|,)`. get_attention : bool, optional Whether to return the attention values. Default to False. Returns ------- pair of torch.Tensor node output features followed by edge output features. The node output feature is of shape :math:`(N, H, D_{out})` The edge output feature is of shape :math:`(F, H, F_{out})` where: :math:`H` is the number of heads, :math:`D_{out}` is size of output node feature, :math:`F_{out}` is size of output edge feature. torch.Tensor, optional The attention values of shape :math:`(E, H, 1)`. This is returned only when :attr:`get_attention` is ``True``. """withgraph.local_scope():if(graph.in_degrees()==0).any():raiseDGLError("There are 0-in-degree nodes in the graph, ""output for those nodes will be invalid. ""This is harmful for some applications, ""causing silent performance regression. ""Adding self-loop on the input graph by ""calling `g = dgl.add_self_loop(g)` will resolve ""the issue.")# calc edge attention# same trick way as in dgl.nn.pytorch.GATConv, but also includes edge feats# https://github.com/dmlc/dgl/blob/master/python/dgl/nn/pytorch/conv/gatconv.pyifisinstance(nfeats,tuple):nfeats_src,nfeats_dst=nfeatselse:nfeats_src=nfeats_dst=nfeatsf_ni=self.fc_ni(nfeats_src)f_nj=self.fc_nj(nfeats_dst)f_fij=self.fc_fij(efeats)graph.srcdata.update({"f_ni":f_ni})graph.dstdata.update({"f_nj":f_nj})# add ni, nj factorsgraph.apply_edges(fn.u_add_v("f_ni","f_nj","f_tmp"))# add fij to node factorf_out=graph.edata.pop("f_tmp")+f_fijifself.biasisnotNone:f_out=f_out+self.biasf_out=nn.functional.leaky_relu(f_out)f_out=f_out.view(-1,self._num_heads,self._out_edge_feats)# compute attention factore=(f_out*self.attn).sum(dim=-1).unsqueeze(-1)graph.edata["a"]=edge_softmax(graph,e)ifedge_weightisnotNone:graph.edata["a"]=graph.edata["a"]*edge_weight.tile(1,self._num_heads,1).transpose(0,2)graph.srcdata["h_out"]=self.fc_node_src(nfeats_src).view(-1,self._num_heads,self._out_node_feats)# calc weighted sumgraph.update_all(fn.u_mul_e("h_out","a","m"),fn.sum("m","h_out"))h_out=graph.dstdata["h_out"].view(-1,self._num_heads,self._out_node_feats)ifget_attention:returnh_out,f_out,graph.edata.pop("a")else:returnh_out,f_out