Download as pdf or txt
Download as pdf or txt
You are on page 1of 13

In [18]: import torch

from torch import nn


from torchvision.datasets import MNIST
from torchvision import transforms
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
import matplotlib.pyplot as plt
from tqdm.auto import tqdm
%matplotlib inline

hyper parameteres
In [22]: epcohes = 500
info_step = 10

batch_size = 128
lr = 0.00001
z_dim = 64

loss_func = nn.BCEWithLogitsLoss()

if(torch.cuda.is_available()):
device = 'cuda'
else:
device = 'cpu'
dataLoader = DataLoader(MNIST('.',download=True,transform=transforms.ToTensor()),ba

dataset
In [3]: x , y = next(iter(dataLoader))
print(x.shape)
print(y.shape)
print(y[:10])

torch.Size([128, 1, 28, 28])


torch.Size([128])
tensor([0, 8, 0, 2, 1, 0, 0, 5, 2, 8])

visualization
In [4]: def show(tensor,ch=1,shape=(28,28),nrows= 4,image_number = 16):
data = tensor.detach().to('cpu').view(-1,ch,shape[0],shape[1])
data_ = make_grid(data[:image_number],nrow=nrows).permute(1,2,0)
plt.imshow(data_)
plt.show()
return data_.shape

Generator
In [10]: #model
def forward(in_dim,out_dim):
return nn.Sequential(nn.Linear(in_dim,out_dim),
nn.BatchNorm1d(out_dim),
nn.ReLU())
class generator(nn.Module):
def __init__(self,out_dim = (28*28),hidden_dim = 128,z_dim = z_dim):
super().__init__();
self.gen = nn.Sequential(
forward(z_dim,hidden_dim),
forward(hidden_dim,hidden_dim*2),
forward(hidden_dim*2,hidden_dim*4),
forward(hidden_dim*4,hidden_dim*8),
nn.Linear(hidden_dim*8,out_dim),
nn.Sigmoid()
)
def forward(self,noise):
return self.gen(noise)
def noise_generator(channel,z_dim=64):
return torch.randn(channel,z_dim).to(device)

In [11]: noise_generator(2,3)

tensor([[-0.5539, 0.7412, -2.0922],


Out[11]:
[-0.7630, -1.0138, 1.1018]], device='cuda:0')

discriminator
In [12]: def dis(i_dim,out_dim):
return nn.Sequential(
nn.Linear(i_dim,out_dim),
nn.LeakyReLU(0.2)
)
class discriminator(nn.Module):
def __init__(self,in_dim = (28*28),hidden_dim = 128):
super().__init__()
self.dis = nn.Sequential(
dis(in_dim,hidden_dim*4),
dis(hidden_dim*4,hidden_dim*2),
dis(hidden_dim*2,1)
)

def forward(self,image):
return self.dis(image)

optimizer
In [13]: generator_ = generator().to(device)
gen_optim = torch.optim.Adam(generator_.parameters(),lr=lr)

discriminator_ = discriminator().to(device)
dis_optim = torch.optim.Adam(discriminator_.parameters(),lr=lr)

testing
In [14]: noise = noise_generator(batch_size,z_dim)
data = generator_(noise)

In [15]: show(data)
torch.Size([122, 122, 3])
Out[15]:

In [ ]: discriminator_(data)

loss functions
In [20]: #loss function
def gen_loss(batch_size,z_dim=64):
noise = noise_generator(batch_size,z_dim)
fake_img = generator_(noise)
fake_pred = discriminator_(fake_img)
target = torch.ones_like(fake_pred)
generator_loss = loss_func(fake_pred,target)
return generator_loss

#dis loss function


def dis_loss(batch_size,real_image):
noise = noise_generator(batch_size,z_dim)
fake_img = generator_(noise)
fake_pred = discriminator_(fake_img.detach())
fake_target = torch.zeros_like(fake_pred)

real_pred = discriminator_(real_image)
real_target = torch.ones_like(real_pred)

fake_loss = loss_func(fake_pred,fake_target)
real_loss = loss_func(real_target,real_pred)
dis_loss_ = (fake_loss + real_loss)/2
return dis_loss_

training and visualization


In [30]: for epoch in tqdm(range(epcohes)):
for img , _ in dataLoader:
gen_optim.zero_grad()
gen_loss_ = gen_loss(batch_size,z_dim)
gen_loss_.backward()
gen_optim.step()

#dis
dis_optim.zero_grad()
batch_len = len(img)
img_ = img.view(batch_len,-1).to(device)
dis_loss_ = dis_loss(batch_size,img_)
dis_loss_.backward()
dis_optim.step()
if(epoch == info_step):
noise = noise_generator(batch_size,z_dim)
fake_img = generator_(noise)
show(fake_img)
info_step+=10
In [67]: noise = noise_generator(batch_size,z_dim)
fake_img = generator_(noise)
show(fake_img)
torch.Size([122, 122, 3])
Out[67]:

You might also like