Download as pdf or txt
Download as pdf or txt
You are on page 1of 4

Untitled1 http://localhost:8888/nbconvert/html/Rakesh/an...

%matplotlib inline

import dgl
from dgl import DGLGraph
import numpy as np

def build_karate_club_graph():
# All 78 edges are stored in two numpy arrays. One for source endpoints
# while the other for destination endpoints.
src = np.array([1, 2, 2, 3, 3, 3, 4, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 9, 10
10, 11, 12, 12, 13, 13, 13, 13, 16, 16, 17, 17, 19, 19, 21, 21,
25, 25, 27, 27, 27, 28, 29, 29, 30, 30, 31, 31, 31, 31, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33, 33, 33, 33, 33, 33, 33])
dst = np.array([0, 0, 1, 0, 1, 2, 0, 0, 0, 4, 5, 0, 1, 2, 3, 0, 2, 2, 0
5, 0, 0, 3, 0, 1, 2, 3, 5, 6, 0, 1, 0, 1, 0, 1, 23, 24, 2, 23,
24, 2, 23, 26, 1, 8, 0, 24, 25, 28, 2, 8, 14, 15, 18, 20, 22, 23,
29, 30, 31, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30
31, 32])
# Edges are directional in DGL; Make them bi-directional.
u = np.concatenate([src, dst])
v = np.concatenate([dst, src])
# Construct a DGLGraph
return dgl.DGLGraph((u, v))

Using backend: pytorch

G = build_karate_club_graph()
print('We have %d nodes.' % G.number_of_nodes())
print('We have %d edges.' % G.number_of_edges())

We have 34 nodes.
We have 156 edges.
/home/dj/anaconda3/envs/mycondaenv/lib/python3.7/site-packages/dgl/base.p
y:45: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead o
f `dgl.DGLGraph(data)`.
return warnings.warn(message, category=category, stacklevel=1)

import networkx as nx
# Since the actual graph is undirected, we convert it for visualization
# purpose.
nx_G = G.to_networkx().to_undirected()
# Kamada-Kawaii layout usually looks pretty for arbitrary graphs
pos = nx.kamada_kawai_layout(nx_G)
nx.draw(nx_G, pos, with_labels=True, node_color=[[.7, .7, .7]])

1 of 4 07/02/21, 7:10 pm
Untitled1 http://localhost:8888/nbconvert/html/Rakesh/an...

# In DGL, you can add features for all nodes at once, using a feature tensor that
# batches node features along the first dimension. The code below adds the learnabl
# embeddings for all nodes:

import torch
import torch.nn as nn
import torch.nn.functional as F

embed = nn.Embedding(34, 5) # 34 nodes with embedding dim equal to 5


G.ndata['feat'] = embed.weight

# print out node 2's input feature


print(G.ndata['feat'][2])

# print out node 10 and 11's input features


print(G.ndata['feat'][[10, 11]])

tensor([-1.5003, 1.4897, 1.0536, 0.0548, -0.6533], grad_fn=<SelectBackw


ard>)
tensor([[-0.4901, 0.2789, -1.5515, -0.8049, -1.2336],
[ 0.6916, -1.6434, 0.0686, -0.5103, -0.2574]],
grad_fn=<IndexBackward>)

from dgl.nn.pytorch import GraphConv

2 of 4 07/02/21, 7:10 pm
Untitled1 http://localhost:8888/nbconvert/html/Rakesh/an...

class GCN(nn.Module):
def __init__(self, in_feats, hidden_size, num_classes):
super(GCN, self).__init__()
self.conv1 = GraphConv(in_feats, hidden_size)
self.conv2 = GraphConv(hidden_size, num_classes)

def forward(self, g, inputs):


h = self.conv1(g, inputs)
h = torch.relu(h)
h = self.conv2(g, h)
return h

# The first layer transforms input features of size of 5 to a hidden size of 5.


# The second layer transforms the hidden layer and produces output features of
# size 2, corresponding to the two groups of the karate club.
net = GCN(5, 5, 2)

inputs = embed.weight
labeled_nodes = torch.tensor([0, 33]) # only the instructor and the president node
labels = torch.tensor([0, 1]) # their labels are different

import itertools

optimizer = torch.optim.Adam(itertools.chain(net.parameters(), embed.parameters


all_logits = []
for epoch in range(50):
logits = net(G, inputs)
# we save the logits for visualization later
all_logits.append(logits.detach())
logp = F.log_softmax(logits, 1)
# we only compute loss for labeled nodes
loss = F.nll_loss(logp[labeled_nodes], labels)

optimizer.zero_grad()
loss.backward()
optimizer.step()

print('Epoch %d | Loss: %.4f' % (epoch, loss.item()))

Epoch 0 | Loss: 0.7169


Epoch 1 | Loss: 0.6751
Epoch 2 | Loss: 0.6408
Epoch 3 | Loss: 0.6130
Epoch 4 | Loss: 0.5890
Epoch 5 | Loss: 0.5685
Epoch 6 | Loss: 0.5495
Epoch 7 | Loss: 0.5312
Epoch 8 | Loss: 0.5130
Epoch 9 | Loss: 0.4945
Epoch 10 | Loss: 0.4750
Epoch 11 | Loss: 0.4547
Epoch 12 | Loss: 0.4339
Epoch 13 | Loss: 0.4124
Epoch 14 | Loss: 0.3906
Epoch 15 | Loss: 0.3684
Epoch 16 | Loss: 0.3460
Epoch 17 | Loss: 0.3233
Epoch 18 | Loss: 0.3010
3 of 4 Epoch 19 | Loss: 0.2790 07/02/21, 7:10 pm
Untitled1 http://localhost:8888/nbconvert/html/Rakesh/an...
Epoch 20 | Loss: 0.2573
Epoch 21 | Loss: 0.2363
Epoch 22 | Loss: 0.2161
Epoch 23 | Loss: 0.1967
Epoch 24 | Loss: 0.1780
Epoch 25 | Loss: 0.1604
Epoch 26 | Loss: 0.1437
Epoch 27 | Loss: 0.1281
Epoch 28 | Loss: 0.1137
Epoch 29 | Loss: 0.1004
Epoch 30 | Loss: 0.0884
Epoch 31 | Loss: 0.0776
Epoch 32 | Loss: 0.0681
Epoch 33 | Loss: 0.0596
Epoch 34 | Loss: 0.0522
Epoch 35 | Loss: 0.0458
Epoch 36 | Loss: 0.0402
Epoch 37 | Loss: 0.0353
Epoch 38 | Loss: 0.0310
Epoch 39 | Loss: 0.0273
Epoch 40 | Loss: 0.0241
Epoch 41 | Loss: 0.0214
Epoch 42 | Loss: 0.0190
Epoch 43 | Loss: 0.0169
Epoch 44 | Loss: 0.0151
Epoch 45 | Loss: 0.0136
Epoch 46 | Loss: 0.0122
Epoch 47 | Loss: 0.0111
Epoch 48 | Loss: 0.0100
Epoch 49 | Loss: 0.0092

import matplotlib.animation as animation


import matplotlib.pyplot as plt

def draw(i):
cls1color = '#00FFFF'
cls2color = '#FF00FF'
pos = {}
colors = []
for v in range(34):
pos[v] = all_logits[i][v].numpy()
cls = pos[v].argmax()
colors.append(cls1color if cls else cls2color)
ax.cla()
ax.axis('off')
ax.set_title('Epoch: %d' % i)
nx.draw_networkx(nx_G.to_undirected(), pos, node_color=colors,
with_labels=True, node_size=300, ax=ax)

fig = plt.figure(dpi=150)
fig.clf()
ax = fig.subplots()
draw(0) # draw the prediction of the first epoch
plt.close()

ani = animation.FuncAnimation(fig, draw, frames=len(all_logits), interval=200

4 of 4 07/02/21, 7:10 pm

You might also like