Professional Documents
Culture Documents
Big Data Assignment - 7
Big Data Assignment - 7
[2]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris,load_digits
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Variable
import tqdm
In [33]:
# IRIS Dataset
data_iris = load_iris()
X = data_iris['data']
y = data_iris['target']
species = data_iris['target_names']
features = data_iris['feature_names']
In [34]:
Out[34]:
Out[35]:
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
In [36]:
class Model(nn.Module):
def __init__(self, input_dim):
super(Model, self).__init__()
self.layer1 = nn.Linear(input_dim, 100)
self.layer2 = nn.Linear(100, 100)
self.layer3 = nn.Linear(100, 3)
In [38]:
model = Model(X_train_K.shape[1])
optimizer = torch.optim.Adam(model.parameters(), lr=0.002)
loss_fn = nn.CrossEntropyLoss()
In [39]:
EPOCHS = 85
X_train_K = Variable(torch.from_numpy(X_train_K)).float()
y_train_K = Variable(torch.from_numpy(y_train_K)).long()
X_test_K = Variable(torch.from_numpy(X_test_K)).float()
y_test_K = Variable(torch.from_numpy(y_test_K)).long()
loss_lst = np.zeros((EPOCHS,))
accuracy_lst = np.zeros((EPOCHS,))
with torch.no_grad():
y_pred = model(X_test_K)
correct = (torch.argmax(y_pred, dim=1) == y_test_K).type(torch.FloatTensor)
accuracy_lst[epoch] = correct.mean()
In [40]:
axs_1.plot(accuracy_lst)
axs_1.set_ylabel("Accuracy")
axs_2.plot(loss_lst)
axs_2.set_ylabel("Loss")
axs_2.set_xlabel("Epochs");
In [41]:
# DIGITS Dataset
data_digits = load_digits()
X = data_digits['data']
y = data_digits['target']
species = data_digits['target_names']
features = data_digits['feature_names']
# Features Standardization
scaler = StandardScaler()
X_scaled_value = scaler.fit_transform(X)
Out[42]:
...,
In [43]:
Out[43]:
In [44]:
class Model(nn.Module):
def __init__(self, input_dim):
super(Model, self).__init__()
self.layer1 = nn.Linear(input_dim, 100)
self.layer2 = nn.Linear(100, 100)
self.layer3 = nn.Linear(100, 10)
In [46]:
model = Model(X_train_K.shape[1])
optimizer = torch.optim.Adam(model.parameters(), lr=0.002)
loss_fn = nn.CrossEntropyLoss()
In [47]:
EPOCHS = 85
X_train_K = Variable(torch.from_numpy(X_train_K)).float()
y_train_K = Variable(torch.from_numpy(y_train_K)).long()
X_test_K = Variable(torch.from_numpy(X_test_K)).float()
y_test_K = Variable(torch.from_numpy(y_test_K)).long()
loss_lst = np.zeros((EPOCHS,))
accuracy_lst = np.zeros((EPOCHS,))
with torch.no_grad():
y_pred = model(X_test_K)
correct = (torch.argmax(y_pred, dim=1) == y_test_K).type(torch.FloatTensor)
accuracy_lst[epoch] = correct.mean()
In [48]:
axs_1.plot(accuracy_lst)
axs_1.set_ylabel("Accuracy")
axs_2.plot(loss_lst)
axs_2.set_ylabel("Loss")
axs_2.set_xlabel("Epochs");
In [ ]: