Download as pdf or txt
Download as pdf or txt
You are on page 1of 15

Outcome based Laboratory Record

MACHINE LEARNING 18CS504

B.E & COMPUTER SCIENCE ENGINEERING


Vth SEM

Academic Year 2022-2023

Submitted by
KANNAN R V
201CS192

BANNARI AMMAN INSTITUTE OF TECHNOLOGY


(An Autonomous Institution Affiliated to Anna University, Chennai)

SATHYAMANGALAM - 638 401


BANNARI AMMAN INSTITUTE OF TECHNOLOGY
(An Autonomous Institution Affiliated to Anna University, Chennai)

SATHYAMANGALAM - 638 401


DEPARTMENT OF
COMPUTER SCIENCE ENGINEERING

BONAFIDE CERTIFICATE

This is a Certified Bonafide Record Book of Mr. KANNAN R V , Register number 201CS192
submitted for the MACHINE LEARNING laboratory during the academic year 2022-2023.

Staff in - charge Head of the Department

1
Course Code - Course Name

Course Outcomes
1. Consider a set of training data examples and implement algorithms to find the
most specific hypothesis and set of all hypotheses that are consistent with the
training examples.

2. Build computational models consisting of several processing elements to


receive inputs and deliver outputs based on activation functions. Apply decision
tree model, artificial neural networks, bayesian learning, instance based learning
and reinforcement learning for approximation of classification, clustering,
prediction algorithms to get the desired output.

2
Table of Contents

S. No. Name of the Experiment Page No. Marks Signature


Awarded

1 Implementation of Candidate 4
Elimination Algorithm for the
dataset: Economy Car Dataset

2 Implementation of Random Forest 16


Classification Algorithm for the
dataset: Social networks ads Dataset

3. Implementation of Find S
Algorithm and Decision tree
algorithm

4. Implementation of KNN for the


dataset: Glass Identification
Dataset

3
Implementation of Candidate Elimination Algorithm for the
dataset: Economy Car Dataset

AIM:
Implement and demonstrate the Candidate Elimination algorithm is a type of
supervised ML algorithm that can be used for the classification and prediction

ALGORITHM:
Step1: Load Data set
Step2: Initialize General Hypothesis and Specific Hypothesis.
Step3: For each training example
Step4: If example is positive example
if attribute_value == hypothesis_value:
Do nothing
else:
replace attribute value with '?' (Basically generalizing it)
Step5: If example is Negative example
Make generalize hypothesis more specific
Coding:

import csv

# open the CSVFile and keep all rows as list of tuples


with open('EnjoySport.csv') as csvFile:

examples = [tuple(line) for line in csv.reader(csvFile)]

print(examples)

4
# To obtain the domain of attribute values defined in the instances X
def get_domains(examples):
# set function returns the unordered collection of items with no
duplicates
d = [set() for i in examples[0]]
for x in examples:
#Enumerate() function adds a counter to an iterable and returns
it in a form of enumerate object i.e(index,value)
for i, xi in enumerate(x):
d[i].add(xi)
return [list(sorted(x)) for x in d]
# Test the get_domains function
get_domains(examples)

# Repeat the '?' and '0' length of domain no of times


def g_0(n):
return ('?',)*n

def s_0(n):
return ('0',)*n
# Function to check generality between two hypothesis
def more_general(h1, h2):
more_general_parts = []
for x, y in zip(h1, h2):
mg = x == '?' or (x != '0' and (x == y or y == '0'))
more_general_parts.append(mg)
return all(more_general_parts) # Returns true if all elements of list or tuple are
true

# Function to check whether train examples are consistent with hypothesis


def consistent(hypothesis,example):
return more_general(hypothesis, example)

# Function to add min_generalizations


def min_generalizations(h, x):
h_new = list(h)
for i in range(len(h)):
if not consistent(h[i:i+1],x[i:i+1]):
if h[i] != '0':
h_new[i] = '?'
else:
h_new[i] = x[i]
return [tuple(h_new)]

# Function to generalize Specific hypto


def generalize_S(x, G, S):
S_prev = list(S)
for s in S_prev:
if s not in S:

5
continue
if not consistent(s,x):
S.remove(s)
Splus = min_generalizations(s, x)
# Keep only generalizations that have a counterpart in G
S.update([h for h in Splus if any([more_general(g,h)
for g in G])])
# Remove from S any hypothesis more general than any other hypothesis in S
S.difference_update([h for h in S if
any([more_general(h, h1)
for h1 in S if h != h1])])
return S
# Function to add min_specializations
def min_specializations(h, domains, x):
results = []
for i in range(len(h)):
if h[i] == '?':
for val in domains[i]:
if x[i] != val:
h_new = h[:i] + (val,) + h[i+1:]
results.append(h_new)
elif h[i] != '0':
h_new = h[:i] + ('0',) + h[i+1:]
results.append(h_new)
return results

# Function to specialize General hypotheses boundary


def specialize_G(x, domains, G, S):
G_prev = list(G)
for g in G_prev:
if g not in G:
continue
if consistent(g,x):
G.remove(g)
Gminus = min_specializations(g, domains, x)
# Keep only specializations that have a counterpart in S
G.update([h for h in Gminus if any([more_general(h, s)
for s in S])])
# Remove hypothesis less general than any other hypothesis in G
G.difference_update([h for h in G if
any([more_general(g1, h)
for g1 in G if h != g1])])
return G
# Function to perform CandidateElimination
def candidate_elimination(examples):
domains = get_domains(examples)[:-1]

G = set([g_0(len(domains))])
S = set([s_0(len(domains))])
i=0
print('All the hypotheses in General and Specific boundary are:\n')
print('\n G[{0}]:'.format(i),G)

6
print('\n S[{0}]:'.format(i),S)
for xcx in examples:
i=i+1
x, cx = xcx[:-1], xcx[-1] # Splitting data into attributes and decisions
if cx=='Yes': # x is positive example
G = {g for g in G if consistent(g,x)}
S = generalize_S(x, G, S)
else: # x is negative example
S = {s for s in S if not consistent(s,x)}
G = specialize_G(x, domains, G, S)
print('\n G[{0}]:'.format(i),G)
print('\n S[{0}]:'.format(i),S)
return
#import pixiedust # Visual debugger
#%%pixie_debugger
candidate_elimination(examples)

OUTPUT:

7
RESULT:

Successfully completed Candidate Elimination algorithm implementation using


Economy Car dataset in google-colab.

8
Implementation of Random Forest Classification Algorithm for the
dataset: Social Networks Ads Dataset

AIM:
Implement and demonstrate the Random Forest Classification algorithm is
a type of supervised ML algorithm that can be used for the classification and
prediction of the final result

ALGORITHM:

Step-1: Select random K data points from the training set.

Step-2: Build the decision trees associated with the selected data points (Subsets).

Step-3: Choose the number N for decision trees that you want to build.

Step-4: Repeat Step 1 & 2.

Step-5: For new data points, find the predictions of each decision tree, and assign the
new data points to the category that wins the majority votes.

CODING:
# Random Forest Classification

# Importing the libraries

import numpy as np

import matplotlib.pyplot as plt

import pandas as pd

dataset = pd.read_csv('../input/Social_Network_Ads.csv')

X = dataset.iloc[:, [2, 3]].values

y = dataset.iloc[:, 4].values

9
# Splitting the dataset into the Training set and Test set

from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size =


0.25, random_state = 0)

# Feature Scaling

from sklearn.preprocessing import StandardScaler

sc = StandardScaler()

X_train = sc.fit_transform(X_train)

X_test = sc.transform(X_test)

# Fitting Random Forest Classification to the Training set

from sklearn.ensemble import RandomForestClassifier

classifier = RandomForestClassifier(n_estimators = 10, criterion =


'entropy', random_state = 0)

classifier.fit(X_train, y_train)

# Predicting the Test set results

y_pred = classifier.predict(X_test)

# Making the Confusion Matrix

from sklearn.metrics import confusion_matrix

cm = confusion_matrix(y_test, y_pred)

10
#Display the results for Confusion matrix
cm

# Visualising the Training set results


from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.55, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Random Forest Classification (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()

# Visualising the Test set results


from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.55, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Random Forest Classification (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()

11
Dataset:

12
OUTPUT:

13
RESULT:

Successfully completed Random Forest Classification implementation using Social


Networks ads dataset in google-colab.

14

You might also like