Professional Documents
Culture Documents
Machine Learning Hands-On Programs Program 1: Linear Regression - Single Variable Linear Regression
Machine Learning Hands-On Programs Program 1: Linear Regression - Single Variable Linear Regression
Performing the Linear regression for Single variable by using Salary_Data.csv which
is consists of two features that is Salary, YearsExperience.Each column contains 30 rows of
information. Feature “Salary” describes Each person salary according to his/her year of
experience.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
print(y)
print(X)
"""**Splitting the dataset into the Training set and Test set**"""
y_pred = regressor.predict(X_test)
print(y_pred)
Dept. of MCA
KLE Technological University
Dept. of MCA
KLE Technological University
Dept. of MCA
KLE Technological University
Dept. of MCA
KLE Technological University
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from google.colab import drive
drive.mount('/content/drive')
# Importing the dataset
dataset = pd.read_csv('/content/drive/My Drive/Machine Learning/Chapter1/MachineLearning-
master/Social_Network_Ads.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
print(X_train)
print(y_train)
print(X_test)
print(y_test)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
print(X_train)
print(X_test)
# Training the Logistic Regression model on the Training set
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state=0)
classifier.fit(X_train, y_train)
Dept. of MCA
KLE Technological University
Dept. of MCA
KLE Technological University
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i),
label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
Dept. of MCA
KLE Technological University
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
print(X_train)
print(y_train)
print(X_test)
print(y_test)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
print(X_train)
print(X_test)
Dept. of MCA
KLE Technological University
print(classifier.predict(sc.transform([[30,87000]])))
Dept. of MCA
KLE Technological University
Dept. of MCA
KLE Technological University
Dept. of MCA
KLE Technological University
Dept. of MCA
KLE Technological University
Dept. of MCA
KLE Technological University
#Adding the 2nd Hidden layer with 8 activation nodes and ReLU function
model.add(Dense(12, activation='relu'))
#Adding output layer with 1 activation unit (binary classification) and with Sigmoid function
model.add(Dense(1, activation='sigmoid'))
# compile the keras model
#Loss means computing the error or cost function (using algorithm - binary_crossentropy)
#Algorithm used is 'adam' - Stochastic gradient descent algorithm
#Evaluating the performance of the model will be done using 'accuracy'
Dept. of MCA
KLE Technological University
Epoch 1/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5050 - accuracy: 0.7591
Epoch 2/200
77/77 [==============================] - 0s 963us/step - loss: 0.5148 - accuracy:
0.7435
Epoch 3/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5155 - accuracy: 0.7396
Epoch 4/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5197 - accuracy: 0.7565
Epoch 5/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5281 - accuracy: 0.7578
Epoch 6/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5223 - accuracy: 0.7526
Epoch 7/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5092 - accuracy: 0.7513
Epoch 8/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5072 - accuracy: 0.7617
Epoch 9/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5241 - accuracy: 0.7552
Epoch 10/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5311 - accuracy: 0.7292
Epoch 11/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5194 - accuracy: 0.7539
Epoch 12/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5436 - accuracy: 0.7292
Epoch 13/200
...
Epoch 199/200
77/77 [==============================] - 0s 1ms/step - loss: 0.4924 - accuracy: 0.7383
Epoch 200/200
77/77 [==============================] - 0s 1ms/step - loss: 0.4948 - accuracy: 0.7617
Dept. of MCA
KLE Technological University
predictions = model.predict_classes(X)
# summarize the first 5 cases
for i in range(5):
print('%s => %d (expected %d)' % (X[i].tolist(), predictions[i], y[i]))
Drive already mounted at /content/drive; to attempt to forcibly remount, call
drive.mount("/content/drive", force_remount=True).
(768, 9)
(768, 8)
(768,)
Output exceeds the size limit. Open the full output data in a text editor
Epoch 1/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5050 - accuracy: 0.7591
Epoch 2/200
77/77 [==============================] - 0s 963us/step - loss: 0.5148 - accuracy:
0.7435
Epoch 3/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5155 - accuracy: 0.7396
Epoch 4/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5197 - accuracy: 0.7565
Epoch 5/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5281 - accuracy: 0.7578
Epoch 6/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5223 - accuracy: 0.7526
Epoch 7/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5092 - accuracy: 0.7513
Epoch 8/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5072 - accuracy: 0.7617
Epoch 9/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5241 - accuracy: 0.7552
Epoch 10/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5311 - accuracy: 0.7292
Epoch 11/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5194 - accuracy: 0.7539
Epoch 12/200
77/77 [==============================] - 0s 1ms/step - loss: 0.5436 - accuracy: 0.7292
Epoch 13/200
...
Epoch 199/200
77/77 [==============================] - 0s 1ms/step - loss: 0.4924 - accuracy: 0.7383
Epoch 200/200
77/77 [==============================] - 0s 1ms/step - loss: 0.4948 - accuracy: 0.7617
<tensorflow.python.keras.callbacks.History at 0x7f04e7d2ffd0>
24/24 [==============================] - 0s 986us/step - loss: 0.4918 - accuracy:
0.7656
Accuracy: 76.56
# make probability predictions with the model
predictions = model.predict(X)
Dept. of MCA
KLE Technological University
# round predictions
rounded = [round(x[0]) for x in predictions]
# make class predictions with the model
predictions = model.predict_classes(X)
# make class predictions with the model
predictions = model.predict_classes(X)
# summarize the first 5 cases
for i in range(5):
print('%s => %d (expected %d)' % (X[i].tolist(), predictions[i], y[i]))
[6.0, 148.0, 72.0, 35.0, 0.0, 33.6, 0.627, 50.0] => 0 (expected 1)
[1.0, 85.0, 66.0, 29.0, 0.0, 26.6, 0.351, 31.0] => 0 (expected 0)
[8.0, 183.0, 64.0, 0.0, 0.0, 23.3, 0.672, 32.0] => 1 (expected 1)
[1.0, 89.0, 66.0, 23.0, 94.0, 28.1, 0.167, 21.0] => 0 (expected 0)
[0.0, 137.0, 40.0, 35.0, 168.0, 43.1, 2.288, 33.0] => 1 (expected 1)
Dept. of MCA
KLE Technological University
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
Dept. of MCA
KLE Technological University
Dept. of MCA
KLE Technological University
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
Dept. of MCA
KLE Technological University
cnn = tf.keras.models.Sequential()
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
cnn.add(tf.keras.layers.Flatten())
cnn.add(tf.keras.layers.Dense(units=128, activation='relu'))
cnn.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))
### Training the CNN on the Training set and evaluating it on the Test set
Epoch 1/10
32/32 [==============================] - 121s 4s/step - loss: 0.0192 - accuracy: 0.9950
- val_loss: 0.0086 - val_accuracy: 1.0000
Epoch 2/10
32/32 [==============================] - 110s 3s/step - loss: 0.0150 - accuracy: 0.9960
- val_loss: 0.0021 - val_accuracy: 1.0000
Epoch 3/10
Dept. of MCA
KLE Technological University
import numpy as np
from keras.preprocessing import image
import keras.utils as image
test_image = image.load_img('/content/drive/My Drive/g1.jpg', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = cnn.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
prediction = 'Red Chilli'
else:
prediction = 'Green Chilli'
print(prediction)
Dept. of MCA