Professional Documents
Culture Documents
DLexp 4
DLexp 4
plt.plot(X,y,'o')
plt.xlabel('X')
plt.ylabel('Y')
plt.title('Input Data')
plt.show()
class Neuron:
def _init_(self):
self.w = None
self.b = None
def mse(self,ew,eb,X,Y):
err = X * ew + eb
y_pred = self.linear_activation(x)
return - (1 * np.sum((y - y_pred) * x )/20)
y_pred = self.linear_activation(x)
self.w = 0.1
self.b = 0.1
prev_loss = 0
loss = []
weight = []
bias = []
for i in range(epochs):
Y_pred = self.linear_activation(X)
break
dw = 0
db = 0
dw = self.grad_w_mse(x, y)
db = self.grad_b_mse(x, y)
self.w -= learning_rate * dw
self.b -= learning_rate * db
Y_pred = self.linear_activation(X)
weight.append(self.w)
bias.append(self.b)
loss.append(mean_squared_error(Y, Y_pred))
prev_loss = curr_loss
curr_loss = mean_squared_error(Y, Y_pred)
for i in range(len(weight)):
for j in range(len(bias)):
error[i][j] = self.mse(weight[i],bias[j],X,Y)
levels = np.sort(np.array(loss))
weight = np.array(weight)
fig = plt.figure()
plt.show()
#fig, ax = plt.subplots(1, 1)
plt.show()
plt.xlabel("Weight")
plt.ylabel("Bias")
plt.show()
plt.plot(weight,loss)
plt.xlabel('Weight')
plt.ylabel('Loss')
sn = Neuron()
OUTPUT