Download as pdf or txt
Download as pdf or txt
You are on page 1of 42

DEPARTMENT OF COMPUTER SCIENCE AND

ENGINEERING

ARTIFICIAL INTELLIGENCE AND


MACHINE LEARNING LABORATORY
MANUAL
18CSL76

For

B.E IN COMPUTER SCIENCE AND


ENGINEERING (CSE)

1
Artificial Intelligence and Machine Learning Laboratory Manual

Artificial Intelligence and Machine Learning Laboratory Programs List:

1. Implement A* Search algorithm.


2. Implement AO* Search algorithm.
3. For a given set of training data examples stored in a .CSV file, implement
and demonstrate the Candidate-Elimination algorithm to output a
description of the set of all hypotheses consistent with the training
examples.
4. Write a program to demonstrate the working of the decision tree based
ID3 algorithm. Use an appropriate data set for building the decision tree
and apply this knowledge to classify a new sample.
5. Build an Artificial Neural Network by implementing the Backpropagation
algorithm and test the same using appropriate data sets.
6. Write a program to implement the naïve Bayesian classifier for a sample
training data set stored as a .CSV file. Compute the accuracy of the
classifier, considering few test data sets.
7. Apply EM algorithm to cluster a set of data stored in a .CSV file. Use the
same data set for clustering using k-Means algorithm. Compare the
results of these two algorithms and comment on the quality of clustering.
You can add Java/Python ML library classes/API in the program.
8. Write a program to implement k-Nearest Neighbour algorithm to classify
the iris data set. Print both correct and wrong predictions. Java/Python
ML library classes can be used for this problem.
9. Implement the non-parametric Locally Weighted Regression algorithm in
order to fit data points. Select appropriate data set for your experiment
and draw graphs

Department of Computer Science and Engineering, BIT 2


Artificial Intelligence and Machine Learning Laboratory Manual

1. Implement A* Search algorithm.


def aStarAlgo(start_node, stop_node):
open_set = set(start_node)
closed_set = set()
g = {} #store distance from starting node
parents = {} #parents contains an adjacency map of all
nodes
#distance of starting node from itself is zero
g[start_node] = 0
#start_node is root node i.e it has no parent nodes
#so start_node is set to its own parent node
parents[start_node] = start_node
while len(open_set) > 0:
n = None
#node with lowest f() is found
for v in open_set:
if n == None or g[v] + heuristic(v) < g[n] +
heuristic(n):
n = v
if n == stop_node or Graph_nodes[n] == None:
pass
else:
for (m, weight) in get_neighbors(n):
#nodes 'm' not in first and last set are added to first
#n is set its parent
if m not in open_set and m not in closed_set:
open_set.add(m)
parents[m] = n
g[m] = g[n] + weight #for each node
m,compare its distance from start i.e g(m) to the
#from start through n node
else:
if g[m] > g[n] + weight:

Department of Computer Science and Engineering, BIT 3


Artificial Intelligence and Machine Learning Laboratory Manual
#update g(m)
g[m] = g[n] + weight
#change parent of m to n
parents[m] = n
#if m in closed set,remove and add to
open
if m in closed_set:
closed_set.remove(m)
open_set.add(m)
if n == None:
print('Path does not exist!')
return None
# if the current node is the stop_node
# then we begin reconstructin the path from it to the
start_node
if n == stop_node:
path = []
while parents[n] != n:
path.append(n)
n = parents[n]
path.append(start_node)
path.reverse()
print('Path found: {}'.format(path))
return path
# remove n from the open_list, and add it to
closed_list
# because all of his neighbors were inspected
open_set.remove(n)
closed_set.add(n)
print('Path does not exist!')
return None

#define fuction to return neighbor and its distance

Department of Computer Science and Engineering, BIT 4


Artificial Intelligence and Machine Learning Laboratory Manual
#from the passed node
def get_neighbors(v):
if v in Graph_nodes:
return Graph_nodes[v]
else:
return None

#for simplicity we’ll consider heuristic distances given


#and this function returns heuristic distance for all nodes

def heuristic(n):
H_dist = {
'A': 11,
'B': 6,
'C': 5,
'D': 7,
'E': 3,
'F': 6,
'G': 5,
'H': 3,
'I': 1,
'J': 0
}
return H_dist[n]

Department of Computer Science and Engineering, BIT 5


Artificial Intelligence and Machine Learning Laboratory Manual

#Describe your graph here

Graph_nodes = {
'A': [('B', 6), ('F', 3)],
'B': [('A', 6), ('C', 3), ('D', 2)],
'C': [('B', 3), ('D', 1), ('E', 5)],
'D': [('B', 2), ('C', 1), ('E', 8)],
'E': [('C', 5), ('D', 8), ('I', 5), ('J', 5)],
'F': [('A', 3), ('G', 1), ('H', 7)],
'G': [('F', 1), ('I', 3)],
'H': [('F', 7), ('I', 2)],
'I': [('E', 5), ('G', 3), ('H', 2), ('J', 3)],
}

aStarAlgo('A', 'J')

Output:
Path found: ['A', 'F', 'G', 'I', 'J']

Department of Computer Science and Engineering, BIT 6


Artificial Intelligence and Machine Learning Laboratory Manual

#for simplicity we ll consider heuristic distances given


#and this function returns heuristic distance for all nodes

def heuristic(n):
H_dist = {
'A': 11,
'B': 6,
'C': 99,
'D': 1,
'E': 7,
'G': 0,
}
return H_dist[n]

#Describe your graph here


Graph_nodes = {
'A': [('B', 2), ('E', 3)],
'B': [('A', 2), ('C', 1), ('G', 9)],
'C': [('B', 1)],
'D': [('E', 6), ('G', 1)],
'E': [('A', 3), ('D', 6)],
'G': [('B', 9), ('D', 1)]
}
aStarAlgo('A', 'G')

Output:
Path found: ['A', 'E', 'D', 'G']

Department of Computer Science and Engineering, BIT 7


Artificial Intelligence and Machine Learning Laboratory Manual

2. Implement AO* Search algorithm.


class Graph:
def __init__(self, graph, heuristicNodeList, startNode):
#instantiate graph object with graph topology, heuristic
values, start node
self.graph = graph
self.H=heuristicNodeList
self.start=startNode
self.parent={}
self.status={}
self.solutionGraph={}

def applyAOStar(self): # starts a recursive AO* algorithm


self.aoStar(self.start, False)

def getNeighbors(self, v): # gets the Neighbors of a given


node
return self.graph.get(v,'')

def getStatus(self,v): # return the status of a given node


return self.status.get(v,0)

def setStatus(self,v, val): # set the status of a given


node
self.status[v]=val

def getHeuristicNodeValue(self, n):


return self.H.get(n,0) # always return the heuristic
value of a given node

def setHeuristicNodeValue(self, n, value):


self.H[n]=value # set the revised heuristic value of a
given node

Department of Computer Science and Engineering, BIT 8


Artificial Intelligence and Machine Learning Laboratory Manual

def printSolution(self):
print("FOR GRAPH SOLUTION, TRAVERSE THE GRAPH FROM THE
START NODE:",self.start)
print("-----------------------------------------------
-------------")
print(self.solutionGraph)
print("-----------------------------------------------
-------------")

def computeMinimumCostChildNodes(self, v): # Computes the


Minimum Cost of child nodes of a given node v
minimumCost=0
costToChildNodeListDict={}
costToChildNodeListDict[minimumCost]=[]
flag=True
for nodeInfoTupleList in self.getNeighbors(v): #
iterate over all the set of child node/s
cost=0
nodeList=[]
for c, weight in nodeInfoTupleList:
cost=cost+self.getHeuristicNodeValue(c)+weight
nodeList.append(c)
if flag==True: # initialize Minimum Cost with the
cost of first set of child node/s
minimumCost=cost
costToChildNodeListDict[minimumCost]=nodeList
# set the Minimum Cost child node/s
flag=False
else: # checking the Minimum Cost nodes with the
current Minimum Cost
if minimumCost>cost:
minimumCost=cost

Department of Computer Science and Engineering, BIT 9


Artificial Intelligence and Machine Learning Laboratory Manual

costToChildNodeListDict[minimumCost]=nodeList # set the


Minimum Cost child node/s
return minimumCost,
costToChildNodeListDict[minimumCost] # return Minimum Cost and
Minimum Cost child node/s

def aoStar(self, v, backTracking): # AO* algorithm for a


start node and backTracking status flag
print("HEURISTIC VALUES :", self.H)
print("SOLUTION GRAPH :", self.solutionGraph)
print("PROCESSING NODE :", v)
print("-----------------------------------------------
------------------------------------------")
if self.getStatus(v) >= 0: # if status node v >= 0,
compute Minimum Cost nodes of v
minimumCost, childNodeList =
self.computeMinimumCostChildNodes(v)
print(minimumCost, childNodeList)
self.setHeuristicNodeValue(v, minimumCost)
self.setStatus(v,len(childNodeList))
solved=True # check the Minimum Cost nodes of v
are solved
for childNode in childNodeList:
self.parent[childNode]=v
if self.getStatus(childNode)!=-1:
solved=solved & False
if solved==True: # if the Minimum Cost nodes of v
are solved, set the current node status as solved(-1)
self.setStatus(v,-1)
self.solutionGraph[v]=childNodeList # update
the solution graph with the solved nodes which may be a part
of solution

Department of Computer Science and Engineering, BIT 10


Artificial Intelligence and Machine Learning Laboratory Manual
if v!=self.start: # check the current node is the
start node for backtracking the current node value
self.aoStar(self.parent[v], True) #
backtracking the current node value with backtracking status
set to true
if backTracking==False: # check the current call
is not for backtracking
for childNode in childNodeList: # for each
Minimum Cost child node
self.setStatus(childNode,0) # set the
status of child node to 0(needs exploration)
self.aoStar(childNode, False) # Minimum
Cost child node is further explored with backtracking status
as false

Graph – 1 as Input to AO Star Search Algorithm

#for simplicity we ll consider heuristic distances given


print ("Graph - 1")
h1 = {'A': 1, 'B': 6, 'C': 2, 'D': 12, 'E': 2, 'F': 1, 'G': 5,
'H': 7, 'I': 7, 'J': 1}

Department of Computer Science and Engineering, BIT 11


Artificial Intelligence and Machine Learning Laboratory Manual
graph1 = {
'A': [[('B', 1), ('C', 1)], [('D', 1)]],
'B': [[('G', 1)], [('H', 1)]],
'C': [[('J', 1)]],
'D': [[('E', 1), ('F', 1)]],
'G': [[('I', 1)]]
}

G1= Graph(graph1, h1, 'A')


G1.applyAOStar()
G1.printSolution()

Output of AO Star Search Algorithm

Graph – 1
HEURISTIC VALUES : {‘A’: 1, ‘B’: 6, ‘C’: 2, ‘D’: 12, ‘E’: 2,
‘F’: 1, ‘G’: 5, ‘H’: 7, ‘I’: 7, ‘J’: 1}
SOLUTION GRAPH : {}

PROCESSING NODE : A

10 [‘B’, ‘C’]
HEURISTIC VALUES : {‘A’: 10, ‘B’: 6, ‘C’: 2, ‘D’: 12, ‘E’: 2,
‘F’: 1, ‘G’: 5, ‘H’: 7, ‘I’: 7, ‘J’: 1}
SOLUTION GRAPH : {}

PROCESSING NODE : B

6 [‘G’]
HEURISTIC VALUES : {‘A’: 10, ‘B’: 6, ‘C’: 2, ‘D’: 12, ‘E’: 2,
‘F’: 1, ‘G’: 5, ‘H’: 7, ‘I’: 7, ‘J’: 1}
SOLUTION GRAPH : {}

PROCESSING NODE : A

10 [‘B’, ‘C’]
HEURISTIC VALUES : {‘A’: 10, ‘B’: 6, ‘C’: 2, ‘D’: 12, ‘E’: 2,

Department of Computer Science and Engineering, BIT 12


Artificial Intelligence and Machine Learning Laboratory Manual
‘F’: 1, ‘G’: 5, ‘H’: 7, ‘I’: 7, ‘J’: 1}
SOLUTION GRAPH : {}

PROCESSING NODE : G

8 [‘I’]
HEURISTIC VALUES : {‘A’: 10, ‘B’: 6, ‘C’: 2, ‘D’: 12, ‘E’: 2,
‘F’: 1, ‘G’: 8, ‘H’: 7, ‘I’: 7, ‘J’: 1}
SOLUTION GRAPH : {}

PROCESSING NODE : B

8 [‘H’]
HEURISTIC VALUES : {‘A’: 10, ‘B’: 8, ‘C’: 2, ‘D’: 12, ‘E’: 2,
‘F’: 1, ‘G’: 8, ‘H’: 7, ‘I’: 7, ‘J’: 1}
SOLUTION GRAPH : {}

PROCESSING NODE : A

12 [‘B’, ‘C’]
HEURISTIC VALUES : {‘A’: 12, ‘B’: 8, ‘C’: 2, ‘D’: 12, ‘E’: 2,
‘F’: 1, ‘G’: 8, ‘H’: 7, ‘I’: 7, ‘J’: 1}
SOLUTION GRAPH : {}

PROCESSING NODE : I

0 []
HEURISTIC VALUES : {‘A’: 12, ‘B’: 8, ‘C’: 2, ‘D’: 12, ‘E’: 2,
‘F’: 1, ‘G’: 8, ‘H’: 7, ‘I’: 0, ‘J’: 1}
SOLUTION GRAPH : {‘I’: []}

PROCESSING NODE : G

1 [‘I’]
HEURISTIC VALUES : {‘A’: 12, ‘B’: 8, ‘C’: 2, ‘D’: 12, ‘E’: 2,
‘F’: 1, ‘G’: 1, ‘H’: 7, ‘I’: 0, ‘J’: 1}
SOLUTION GRAPH : {‘I’: [], ‘G’: [‘I’]}

PROCESSING NODE : B

2 [‘G’]
HEURISTIC VALUES : {‘A’: 12, ‘B’: 2, ‘C’: 2, ‘D’: 12, ‘E’: 2,
‘F’: 1, ‘G’: 1, ‘H’: 7, ‘I’: 0, ‘J’: 1}
SOLUTION GRAPH : {‘I’: [], ‘G’: [‘I’], ‘B’: [‘G’]}

Department of Computer Science and Engineering, BIT 13


Artificial Intelligence and Machine Learning Laboratory Manual
PROCESSING NODE : A

6 [‘B’, ‘C’]
HEURISTIC VALUES : {‘A’: 6, ‘B’: 2, ‘C’: 2, ‘D’: 12, ‘E’: 2,
‘F’: 1, ‘G’: 1, ‘H’: 7, ‘I’: 0, ‘J’: 1}
SOLUTION GRAPH : {‘I’: [], ‘G’: [‘I’], ‘B’: [‘G’]}

PROCESSING NODE : C

2 [‘J’]
HEURISTIC VALUES : {‘A’: 6, ‘B’: 2, ‘C’: 2, ‘D’: 12, ‘E’: 2,
‘F’: 1, ‘G’: 1, ‘H’: 7, ‘I’: 0, ‘J’: 1}
SOLUTION GRAPH : {‘I’: [], ‘G’: [‘I’], ‘B’: [‘G’]}

PROCESSING NODE : A

6 [‘B’, ‘C’]
HEURISTIC VALUES : {‘A’: 6, ‘B’: 2, ‘C’: 2, ‘D’: 12, ‘E’: 2,
‘F’: 1, ‘G’: 1, ‘H’: 7, ‘I’: 0, ‘J’: 1}
SOLUTION GRAPH : {‘I’: [], ‘G’: [‘I’], ‘B’: [‘G’]}

PROCESSING NODE : J

0 []
HEURISTIC VALUES : {‘A’: 6, ‘B’: 2, ‘C’: 2, ‘D’: 12, ‘E’: 2,
‘F’: 1, ‘G’: 1, ‘H’: 7, ‘I’: 0, ‘J’: 0}
SOLUTION GRAPH : {‘I’: [], ‘G’: [‘I’], ‘B’: [‘G’], ‘J’: []}

PROCESSING NODE : C

1 [‘J’]
HEURISTIC VALUES : {‘A’: 6, ‘B’: 2, ‘C’: 1, ‘D’: 12, ‘E’: 2,
‘F’: 1, ‘G’: 1, ‘H’: 7, ‘I’: 0, ‘J’: 0}
SOLUTION GRAPH : {‘I’: [], ‘G’: [‘I’], ‘B’: [‘G’], ‘J’: [],
‘C’: [‘J’]}

PROCESSING NODE : A

5 [‘B’, ‘C’]

FOR GRAPH SOLUTION, TRAVERSE THE GRAPH FROM THE START NODE: A

{‘I’: [], ‘G’: [‘I’], ‘B’: [‘G’], ‘J’: [], ‘C’: [‘J’], ‘A’:
[‘B’, ‘C’]}

Department of Computer Science and Engineering, BIT 14


Artificial Intelligence and Machine Learning Laboratory Manual
3. For a given set of training data examples stored in a
.CSV file, implement and demonstrate the Candidate-
Elimination algorithm to output a description of the set
of all hypotheses consistent with the training examples.

import numpy as np
import pandas as pd
data =
pd.DataFrame(data=pd.read_csv('enjoysport.csv'))
concepts = np.array(data.iloc[:,0:-1])
print(concepts)
target = np.array(data.iloc[:,-1])
print(target)

def learn(concepts, target):


specific_h = concepts[0].copy()
print("initialization of specific_h and
general_h")
print(specific_h)
general_h = [["?" for i in
range(len(specific_h))] for i in
range(len(specific_h))]
for i, h in enumerate(concepts):
if target[i] == "yes":
for x in range(len(specific_h)):
if h[x]!= specific_h[x]:
specific_h[x] ='?'
general_h[x][x] ='?'
print(specific_h)
print(specific_h)
if target[i] == "no":
for x in range(len(specific_h)):
if h[x]!= specific_h[x]:
general_h[x][x] = specific_h[x]
else:
general_h[x][x] = '?'
print(" steps of Candidate Elimination
Algorithm",i+1)

Department of Computer Science and Engineering, BIT 15


Artificial Intelligence and Machine Learning Laboratory Manual
print(specific_h)
print(general_h)
indices = [i for i, val in enumerate(general_h)
if val == ['?', '?', '?', '?', '?', '?']]
for i in indices:
general_h.remove(['?', '?', '?', '?', '?',
'?'])
return specific_h, general_h
s_final, g_final = learn(concepts, target)
print("Final Specific_h:", s_final, sep="\n")
print("Final General_h:", g_final, sep="\n")

OUTPUT

[['sunny' 'warm' 'normal' 'strong' 'warm' 'same']


['sunny' 'warm' 'high' 'strong' 'warm' 'same']
['rainy' 'cold' 'high' 'strong' 'warm' 'change']
['sunny' 'warm' 'high' 'strong' 'cool' 'change']]
['yes' 'yes' 'no' 'yes']

initialization of specific_h and general_h


['sunny' 'warm' 'normal' 'strong' 'warm' 'same']
['sunny' 'warm' 'normal' 'strong' 'warm' 'same']
['sunny' 'warm' 'normal' 'strong' 'warm' 'same']
['sunny' 'warm' 'normal' 'strong' 'warm' 'same']
['sunny' 'warm' 'normal' 'strong' 'warm' 'same']
['sunny' 'warm' 'normal' 'strong' 'warm' 'same']
['sunny' 'warm' 'normal' 'strong' 'warm' 'same']
['sunny' 'warm' 'normal' 'strong' 'warm' 'same']

steps of Candidate Elimination Algorithm 1


['sunny' 'warm' 'normal' 'strong' 'warm' 'same']
[['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?',
'?', '?'], ['?', '?', '?', '?', '?', '?'], ['?', '?',
'?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'],
['?', '?', '?', '?', '?', '?']]

Department of Computer Science and Engineering, BIT 16


Artificial Intelligence and Machine Learning Laboratory Manual
['sunny' 'warm' 'normal' 'strong' 'warm' 'same']
['sunny' 'warm' 'normal' 'strong' 'warm' 'same']
['sunny' 'warm' '?' 'strong' 'warm' 'same']
['sunny' 'warm' '?' 'strong' 'warm' 'same']
['sunny' 'warm' '?' 'strong' 'warm' 'same']
['sunny' 'warm' '?' 'strong' 'warm' 'same']
['sunny' 'warm' '?' 'strong' 'warm' 'same']

steps of Candidate Elimination Algorithm 2


['sunny' 'warm' '?' 'strong' 'warm' 'same']
[['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?',
'?', '?'], ['?', '?', '?', '?', '?', '?'], ['?', '?',
'?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'],
['?', '?', '?', '?', '?', '?']]
['sunny' 'warm' '?' 'strong' 'warm' 'same']

steps of Candidate Elimination Algorithm 3


['sunny' 'warm' '?' 'strong' 'warm' 'same']
[['sunny', '?', '?', '?', '?', '?'], ['?', 'warm', '?
', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'], ['
?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?
', '?'], ['?', '?', '?', '?', '?', 'same']]
['sunny' 'warm' '?' 'strong' 'warm' 'same']
['sunny' 'warm' '?' 'strong' 'warm' 'same']
['sunny' 'warm' '?' 'strong' 'warm' 'same']
['sunny' 'warm' '?' 'strong' 'warm' 'same']
['sunny' 'warm' '?' 'strong' '?' 'same']
['sunny' 'warm' '?' 'strong' '?' '?']
['sunny' 'warm' '?' 'strong' '?' '?']

steps of Candidate Elimination Algorithm 4


['sunny' 'warm' '?' 'strong' '?' '?']
[['sunny', '?', '?', '?', '?', '?'], ['?', 'warm', '?
', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'], ['
?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?
', '?'], ['?', '?', '?', '?', '?', '?']]

Department of Computer Science and Engineering, BIT 17


Artificial Intelligence and Machine Learning Laboratory Manual
Final Specific_h:
['sunny' 'warm' '?' 'strong' '?' '?']
Final General_h:
[['sunny', '?', '?', '?', '?', '?'], ['?', 'warm', '?
', '?', '?', '?']]

Department of Computer Science and Engineering, BIT 18


Artificial Intelligence and Machine Learning Laboratory Manual
4. Write a program to demonstrate the working of the
decision tree based ID3 algorithm. Use an appropriate
data set for building the decision tree and apply this
knowledge to classify a new sample.

import math
import csv
def load_csv(filename):
lines=csv.reader(open(filename,"r"));
dataset = list(lines)
headers = dataset.pop(0)
return dataset,headers

class Node:
def __init__(self,attribute):
self.attribute=attribute
self.children=[]
self.answer=""

def subtables(data,col,delete):
dic={}
coldata=[row[col] for row in data]
attr=list(set(coldata))

counts=[0]*len(attr)
r=len(data)
c=len(data[0])
for x in range(len(attr)):
for y in range(r):
if data[y][col]==attr[x]:
counts[x]+=1

for x in range(len(attr)):
dic[attr[x]]=[[0 for i in range(c)] for j in
range(counts[x])]
pos=0
for y in range(r):
if data[y][col]==attr[x]:

Department of Computer Science and Engineering, BIT 19


Artificial Intelligence and Machine Learning Laboratory Manual
if delete:
del data[y][col]
dic[attr[x]][pos]=data[y]
pos+=1
return attr,dic

def entropy(S):
attr=list(set(S))
if len(attr)==1:
return 0

counts=[0,0]
for i in range(2):
counts[i]=sum([1 for x in S if
attr[i]==x])/(len(S)*1.0)

sums=0
for cnt in counts:
sums+=-1*cnt*math.log(cnt,2)
return sums

def compute_gain(data,col):
attr,dic = subtables(data,col,delete=False)

total_size=len(data)
entropies=[0]*len(attr)
ratio=[0]*len(attr)

total_entropy=entropy([row[-1] for row in data])


for x in range(len(attr)):
ratio[x]=len(dic[attr[x]])/(total_size*1.0)
entropies[x]=entropy([row[-1] for row in
dic[attr[x]]])
total_entropy-=ratio[x]*entropies[x]
return total_entropy

def build_tree(data,features):

Department of Computer Science and Engineering, BIT 20


Artificial Intelligence and Machine Learning Laboratory Manual
lastcol=[row[-1] for row in data]
if(len(set(lastcol)))==1:
node=Node("")
node.answer=lastcol[0]
return node

n=len(data[0])-1
gains=[0]*n
for col in range(n):
gains[col]=compute_gain(data,col)
split=gains.index(max(gains))
node=Node(features[split])
fea = features[:split]+features[split+1:]

attr,dic=subtables(data,split,delete=True)

for x in range(len(attr)):
child=build_tree(dic[attr[x]],fea)
node.children.append((attr[x],child))
return node

def print_tree(node,level):
if node.answer!="":
print(" "*level,node.answer)
return

print(" "*level,node.attribute)
for value,n in node.children:
print(" "*(level+1),value)
print_tree(n,level+2)

def classify(node,x_test,features):
if node.answer!="":
print(node.answer)
return
pos=features.index(node.attribute)
for value, n in node.children:

Department of Computer Science and Engineering, BIT 21


Artificial Intelligence and Machine Learning Laboratory Manual
if x_test[pos]==value:
classify(n,x_test,features)

'''Main program'''
dataset,features=load_csv("id3.csv")
node1=build_tree(dataset,features)

print("The decision tree for the dataset using ID3


algorithm is")
print_tree(node1,0)
testdata,features=load_csv("id3_test.csv")

for xtest in testdata:


print("The test instance:",xtest)
print("The label for test instance:",end=" ")
classify(node1,xtest,features)

OUTPUT

The decision tree for the dataset using ID3 algorithm


is
Outlook
rain
Wind
weak
yes
strong
no
overcast
yes
sunny
Humidity
normal
yes
high
no

Department of Computer Science and Engineering, BIT 22


Artificial Intelligence and Machine Learning Laboratory Manual
The test instance: ['rain', 'cool', 'normal', 'strong
']
The label for test instance: no
The test instance: ['sunny', 'mild', 'normal', 'stron
g']
The label for test instance: yes

Department of Computer Science and Engineering, BIT 23


Artificial Intelligence and Machine Learning Laboratory Manual
5. Build an Artificial Neural Network by implementing
the Backpropagation algorithm and test the same using
appropriate data sets.

import numpy as np
X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float)
#two inputs [sleep,study]
y = np.array(([92], [86], [89]), dtype=float)
#one output [Expected % in Exams]
X = X/np.amax(X,axis=0) # maximum of X array
longitudinally
y = y/100

#Sigmoid Function
def sigmoid (x):
return 1/(1 + np.exp(-x))

#Derivative of Sigmoid Function


def derivatives_sigmoid(x):
return x * (1 - x)

#Variable initialization
epoch=5000 #Setting training iterations
lr=0.1 #Setting learning rate
inputlayer_neurons = 2 #number of features in
data set
hiddenlayer_neurons = 3 #number of hidden layers
neurons
output_neurons = 1 #number of neurons at output
layer

#weight and bias initialization


wh=np.random.uniform(size=(inputlayer_neurons,hiddenl
ayer_neurons)) #weight of the link from input node to
hidden node
bh=np.random.uniform(size=(1,hiddenlayer_neurons)) #
bias of the link from input node to hidden node

Department of Computer Science and Engineering, BIT 24


Artificial Intelligence and Machine Learning Laboratory Manual
wout=np.random.uniform(size=(hiddenlayer_neurons,outp
ut_neurons)) #weight of the link from hidden node to
output node
bout=np.random.uniform(size=(1,output_neurons)) #bias
of the link from hidden node to output node

#draws a random range of numbers uniformly of dim x*y


for i in range(epoch):

#Forward Propogation
hinp1=np.dot(X,wh)
hinp=hinp1 + bh
hlayer_act = sigmoid(hinp)
outinp1=np.dot(hlayer_act,wout)
outinp= outinp1+ bout
output = sigmoid(outinp)

#Backpropagation
EO = y-output
outgrad = derivatives_sigmoid(output)
d_output = EO* outgrad
EH = d_output.dot(wout.T)

#how much hidden layer weights contributed to error


hiddengrad = derivatives_sigmoid(hlayer_act)
d_hiddenlayer = EH * hiddengrad

# dotproduct of nextlayererror and currentlayerop


wout += hlayer_act.T.dot(d_output) *lr
wh += X.T.dot(d_hiddenlayer) *lr

print("Input: \n" + str(X))


print("Actual Output: \n" + str(y))
print("Predicted Output: \n" ,output)

Department of Computer Science and Engineering, BIT 25


Artificial Intelligence and Machine Learning Laboratory Manual
OUTPUT

Input:
[[0.66666667 1. ]
[0.33333333 0.55555556]
[1. 0.66666667]]

Actual Output:
[[0.92]
[0.86]
[0.89]]

Predicted Output:
[[0.66129474]
[0.65669962]
[0.66083852]]

Department of Computer Science and Engineering, BIT 26


Artificial Intelligence and Machine Learning Laboratory Manual
6. Write a program to implement the naïve Bayesian
classifier for a sample training data set stored as a
.CSV file. Compute the accuracy of the classifier,
considering few test data sets.

import numpy as np
import csv
import pandas as pd
from pgmpy.models import BayesianModel
from pgmpy.estimators import
MaximumLikelihoodEstimator
from pgmpy.inference import VariableElimination

heartDisease = pd.read_csv('heart.csv')
heartDisease = heartDisease.replace('?',np.nan)

print('Few examples from the dataset are given


below')
print(heartDisease.head())

model =
BayesianModel([('age','trestbps'),('age','fbs'),('sex
','trestbps'),('exang','trestbps'),('trestbps','heart
disease'),('fbs','heartdisease'),('heartdisease','res
tecg'),('heartdisease','thalach'),('heartdisease','ch
ol')])

print('\nLearning CPD using Maximum likelihood


estimators')
model.fit(heartDisease,estimator=MaximumLikelihoodEst
imator)

print('\n Inferencing with Bayesian Network:')


HeartDisease_infer = VariableElimination(model)

Department of Computer Science and Engineering, BIT 27


Artificial Intelligence and Machine Learning Laboratory Manual
print('\n 1. Probability of HeartDisease given
Age=28')
q=HeartDisease_infer.query(variables=['heartdisease']
,evidence={'age':28})
print(q['heartdisease'])

print('\n 2. Probability of HeartDisease given


cholesterol=100')
q=HeartDisease_infer.query(variables=['heartdisease']
,evidence={'chol':100})
print(q['heartdisease'])

OUTPUT

Few examples from the dataset are given below


age sex cp trestbps ... slope ca thal heartdisease
0 63 1 1 145 ... 3 0 6 0
1 67 1 4 160 ... 2 3 3 2
2 67 1 4 120 ... 2 2 7 1
3 37 1 3 130 ... 3 0 3 0
4 41 0 2 130 ... 1 0 3 0
[5 rows x 14 columns]
Learning CPD using Maximum likelihood estimators
Inferencing with Bayesian Network:
1. Probability of HeartDisease given Age=28
╒════════════════╤═════════════════════╕
│ heartdisease │ phi(heartdisease)│
╞════════════════╪═════════════════════╡
│ heartdisease_0 │ 0.6791 │
├────────────────┼─────────────────────┤
│ heartdisease_1 │ 0.1212 │
├────────────────┼─────────────────────┤
│ heartdisease_2 │ 0.0810 │
├────────────────┼─────────────────────┤
│ heartdisease_3 │ 0.0939 │
├────────────────┼─────────────────────┤
│ heartdisease_4 │ 0.0247 │
╘════════════════╧═════════════════════╛

Department of Computer Science and Engineering, BIT 28


Artificial Intelligence and Machine Learning Laboratory Manual
2. Probability of HeartDisease given cholesterol=100
╒════════════════╤═════════════════════╕
│ heartdisease │ phi(heartdisease) │
╞════════════════╪═════════════════════╡
│ heartdisease_0 │ 0.5400 │
├────────────────┼─────────────────────┤
│ heartdisease_1 │ 0.1533 │
├────────────────┼─────────────────────┤
│ heartdisease_2 │ 0.1303 │
├────────────────┼─────────────────────┤
│ heartdisease_3 │ 0.1259 │
├────────────────┼─────────────────────┤
│ heartdisease_4 │ 0.0506 │
╘════════════════╧═════════════════════╛

Department of Computer Science and Engineering, BIT 29


Artificial Intelligence and Machine Learning Laboratory Manual
7. Apply EM algorithm to cluster a set of data stored
in a .CSV file. Use the same data set for clustering
using k-Means algorithm. Compare the results of these
two algorithms and comment on the quality of
clustering. You can add Java/Python ML library
classes/API in the program.

import matplotlib.pyplot as plt


from sklearn import datasets
from sklearn.cluster import KMeans
import sklearn.metrics as sm
import pandas as pd
import numpy as np
#import matplotlib inline

iris = datasets.load_iris()

X = pd.DataFrame(iris.data)
X.columns =
['Sepal_Length','Sepal_Width','Petal_Length','Petal_W
idth']

y = pd.DataFrame(iris.target)
y.columns = ['Targets']

#colormap = np.array(['red', 'lime', 'black'])

# K Means Cluster
model = KMeans(n_clusters=3)
model.fit(X)
# This is what KMeans thought
model.labels_

# View the results

# Set the size of the plot


plt.figure(figsize=(14,7))

Department of Computer Science and Engineering, BIT 30


Artificial Intelligence and Machine Learning Laboratory Manual
# Create a colormap
colormap = np.array(['red', 'lime', 'black'])

# Plot the Original Classifications


plt.subplot(1, 2, 1)
plt.scatter(X.Petal_Length, X.Petal_Width,
c=colormap[y.Targets], s=40)
plt.title('Real Classification')

# Plot the Models Classifications


plt.subplot(1, 2, 2)
plt.scatter(X.Petal_Length, X.Petal_Width,
c=colormap[model.labels_], s=40)
plt.title('K Mean Classification')

# View the results


# Set the size of the plot
plt.figure(figsize=(14,7))
# Create a colormap
#print('The accuracy score : ',sm.accuracy_score(y,
model.labels_))
#sm.confusion_matrix(y, model.labels_)

predY = np.choose(model.labels_, [0, 1,


2]).astype(np.int64)
print (predY)

#colormap = np.array(['red', 'lime', 'black'])


# Plot Orginal
plt.subplot(1, 2, 1)
plt.scatter(X.Petal_Length, X.Petal_Width,
c=colormap[y.Targets], s=40)
plt.title('Real Classification')
# Plot Predicted with corrected values
plt.subplot(1, 2, 2)
plt.scatter(X.Petal_Length,X.Petal_Width,
c=colormap[predY], s=40)

Department of Computer Science and Engineering, BIT 31


Artificial Intelligence and Machine Learning Laboratory Manual
plt.title('K Mean Classification')

print('The accuracy score of K-Mean: '


,sm.accuracy_score(y, model.labels_))
print('The Confusion matrixof K-Mean: '
,sm.confusion_matrix(y, model.labels_))

from sklearn import preprocessing


scaler = preprocessing.StandardScaler()
scaler.fit(X)
xsa = scaler.transform(X)
xs = pd.DataFrame(xsa, columns = X.columns)
#xs.sample(5)

from sklearn.mixture import GaussianMixture


gmm = GaussianMixture(n_components=3)
gmm.fit(xs)

y_cluster_gmm = gmm.predict(xs)
#y_cluster_gmm

plt.subplot(2, 2, 3)
plt.scatter(X.Petal_Length, X.Petal_Width,
c=colormap[y_cluster_gmm], s=40)
plt.title('GMM Classification')

print('The accuracy score of EM: '


,sm.accuracy_score(y, y_cluster_gmm))
print('The Confusion matrix of EM: '
,sm.confusion_matrix(y, y_cluster_gmm))

Department of Computer Science and Engineering, BIT 32


Artificial Intelligence and Machine Learning Laboratory Manual
OUTPUT

[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 0
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 0 2 2
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 0 2 0 0 0 0 2
0 0 0 0 0 0 2 2 0 0 0 0 2 0 2 0 2 0 0 2 2 0 0 0 0 0 2
0 0 0 0 2 0 0 0 2 0 0 0 2 0 0 2]

The accuracy score of K-Mean: 0.09333333333333334


The Confusion matrixof K-Mean:
[[ 0 50 0]
[ 2 0 48]
[36 0 14]]
The accuracy score of EM: 0.3333333333333333
The Confusion matrix of EM:
[[ 0 50 0]
[45 0 5]
[ 0 0 50]]

Department of Computer Science and Engineering, BIT 33


Artificial Intelligence and Machine Learning Laboratory Manual

Department of Computer Science and Engineering, BIT 34


Artificial Intelligence and Machine Learning Laboratory Manual
8. Write a program to implement k-Nearest Neighbour
algorithm to classify the iris data set. Print both
correct and wrong predictions. Java/Python ML library
classes can be used for this problem.

from sklearn.model_selection import train_test_split


from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report,
confusion_matrix
from sklearn import datasets

iris=datasets.load_iris()

x = iris.data
y = iris.target

print ('sepal-length', 'sepal-width', 'petal-length',


'petal-width')
print(x)
print('class: 0-Iris-Setosa, 1- Iris-Versicolour, 2-
Iris-Virginica')
print(y)

x_train, x_test, y_train, y_test =


train_test_split(x,y,test_size=0.3)

#To Training the model and Nearest nighbors K=5


classifier = KNeighborsClassifier(n_neighbors=5)
classifier.fit(x_train, y_train)

#To make predictions on our test data


y_pred=classifier.predict(x_test)

print('Confusion Matrix')
print(confusion_matrix(y_test,y_pred))
print('Accuracy Metrics')
print(classification_report(y_test,y_pred))

Department of Computer Science and Engineering, BIT 35


Artificial Intelligence and Machine Learning Laboratory Manual
OUTPUT

sepal-length sepal-width petal-length petal-width


[[5.1 3.5 1.4 0.2]
[4.9 3. 1.4 0.2]
[4.7 3.2 1.3 0.2]
[4.6 3.1 1.5 0.2]
[5. 3.6 1.4 0.2]
[5.4 3.9 1.7 0.4]
[4.6 3.4 1.4 0.3]
[5. 3.4 1.5 0.2]
[4.4 2.9 1.4 0.2]
[4.9 3.1 1.5 0.1]
[5.4 3.7 1.5 0.2]
[4.8 3.4 1.6 0.2]
[4.8 3. 1.4 0.1]
[4.3 3. 1.1 0.1]
[5.8 4. 1.2 0.2]
[5.7 4.4 1.5 0.4]
[5.4 3.9 1.3 0.4]
[5.1 3.5 1.4 0.3]
[5.7 3.8 1.7 0.3]
[5.1 3.8 1.5 0.3]
[5.4 3.4 1.7 0.2]
[5.1 3.7 1.5 0.4]
[4.6 3.6 1. 0.2]
[5.1 3.3 1.7 0.5]
[4.8 3.4 1.9 0.2]
[5. 3. 1.6 0.2]
[5. 3.4 1.6 0.4]
[5.2 3.5 1.5 0.2]
[5.2 3.4 1.4 0.2]
[4.7 3.2 1.6 0.2]
[4.8 3.1 1.6 0.2]
[5.4 3.4 1.5 0.4]
[5.2 4.1 1.5 0.1]
[5.5 4.2 1.4 0.2]
[4.9 3.1 1.5 0.2]
[5. 3.2 1.2 0.2]
[5.5 3.5 1.3 0.2]
[4.9 3.6 1.4 0.1]
[4.4 3. 1.3 0.2]
[5.1 3.4 1.5 0.2]

Department of Computer Science and Engineering, BIT 36


Artificial Intelligence and Machine Learning Laboratory Manual
[5. 3.5 1.3 0.3]
[4.5 2.3 1.3 0.3]
[4.4 3.2 1.3 0.2]
[5. 3.5 1.6 0.6]
[5.1 3.8 1.9 0.4]
[4.8 3. 1.4 0.3]
[5.1 3.8 1.6 0.2]
[4.6 3.2 1.4 0.2]
[5.3 3.7 1.5 0.2]
[5. 3.3 1.4 0.2]
[7. 3.2 4.7 1.4]
[6.4 3.2 4.5 1.5]
[6.9 3.1 4.9 1.5]
[5.5 2.3 4. 1.3]
[6.5 2.8 4.6 1.5]
[5.7 2.8 4.5 1.3]
[6.3 3.3 4.7 1.6]
[4.9 2.4 3.3 1. ]
[6.6 2.9 4.6 1.3]
[5.2 2.7 3.9 1.4]
[5. 2. 3.5 1. ]
[5.9 3. 4.2 1.5]
[6. 2.2 4. 1. ]
[6.1 2.9 4.7 1.4]
[5.6 2.9 3.6 1.3]
[6.7 3.1 4.4 1.4]
[5.6 3. 4.5 1.5]
[5.8 2.7 4.1 1. ]
[6.2 2.2 4.5 1.5]
[5.6 2.5 3.9 1.1]
[5.9 3.2 4.8 1.8]
[6.1 2.8 4. 1.3]
[6.3 2.5 4.9 1.5]
[6.1 2.8 4.7 1.2]
[6.4 2.9 4.3 1.3]
[6.6 3. 4.4 1.4]
[6.8 2.8 4.8 1.4]
[6.7 3. 5. 1.7]
[6. 2.9 4.5 1.5]
[5.7 2.6 3.5 1. ]
[5.5 2.4 3.8 1.1]
[5.5 2.4 3.7 1. ]
[5.8 2.7 3.9 1.2]

Department of Computer Science and Engineering, BIT 37


Artificial Intelligence and Machine Learning Laboratory Manual
[6. 2.7 5.1 1.6]
[5.4 3. 4.5 1.5]
[6. 3.4 4.5 1.6]
[6.7 3.1 4.7 1.5]
[6.3 2.3 4.4 1.3]
[5.6 3. 4.1 1.3]
[5.5 2.5 4. 1.3]
[5.5 2.6 4.4 1.2]
[6.1 3. 4.6 1.4]
[5.8 2.6 4. 1.2]
[5. 2.3 3.3 1. ]
[5.6 2.7 4.2 1.3]
[5.7 3. 4.2 1.2]
[5.7 2.9 4.2 1.3]
[6.2 2.9 4.3 1.3]
[5.1 2.5 3. 1.1]
[5.7 2.8 4.1 1.3]
[6.3 3.3 6. 2.5]
[5.8 2.7 5.1 1.9]
[7.1 3. 5.9 2.1]
[6.3 2.9 5.6 1.8]
[6.5 3. 5.8 2.2]
[7.6 3. 6.6 2.1]
[4.9 2.5 4.5 1.7]
[7.3 2.9 6.3 1.8]
[6.7 2.5 5.8 1.8]
[7.2 3.6 6.1 2.5]
[6.5 3.2 5.1 2. ]
[6.4 2.7 5.3 1.9]
[6.8 3. 5.5 2.1]
[5.7 2.5 5. 2. ]
[5.8 2.8 5.1 2.4]
[6.4 3.2 5.3 2.3]
[6.5 3. 5.5 1.8]
[7.7 3.8 6.7 2.2]
[7.7 2.6 6.9 2.3]
[6. 2.2 5. 1.5]
[6.9 3.2 5.7 2.3]
[5.6 2.8 4.9 2. ]
[7.7 2.8 6.7 2. ]
[6.3 2.7 4.9 1.8]
[6.7 3.3 5.7 2.1]
[7.2 3.2 6. 1.8]

Department of Computer Science and Engineering, BIT 38


Artificial Intelligence and Machine Learning Laboratory Manual
[6.2 2.8 4.8 1.8]
[6.1 3. 4.9 1.8]
[6.4 2.8 5.6 2.1]
[7.2 3. 5.8 1.6]
[7.4 2.8 6.1 1.9]
[7.9 3.8 6.4 2. ]
[6.4 2.8 5.6 2.2]
[6.3 2.8 5.1 1.5]
[6.1 2.6 5.6 1.4]
[7.7 3. 6.1 2.3]
[6.3 3.4 5.6 2.4]
[6.4 3.1 5.5 1.8]
[6. 3. 4.8 1.8]
[6.9 3.1 5.4 2.1]
[6.7 3.1 5.6 2.4]
[6.9 3.1 5.1 2.3]
[5.8 2.7 5.1 1.9]
[6.8 3.2 5.9 2.3]
[6.7 3.3 5.7 2.5]
[6.7 3. 5.2 2.3]
[6.3 2.5 5. 1.9]
[6.5 3. 5.2 2. ]
[6.2 3.4 5.4 2.3]
[5.9 3. 5.1 1.8]]

class: 0-Iris-Setosa, 1- Iris-Versicolour, 2- Iris-Virginica


[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2]
Confusion Matrix
[[13 0 0]
[ 0 13 2]
[ 0 1 16]]
Accuracy Metrics
precision recall f1-score support

0 1.00 1.00 1.00 13


1 0.93 0.87 0.90 15
2 0.89 0.94 0.91 17

accuracy 0.93 45
macro avg 0.94 0.94 0.94 45
weighted avg 0.93 0.93 0.93 45

Department of Computer Science and Engineering, BIT 39


Artificial Intelligence and Machine Learning Laboratory Manual
9. Implement the non-parametric Locally Weighted
Regression algorithm in order to fit data points.
Select appropriate data set for your experiment and
draw graphs

import numpy as np
from bokeh.plotting import figure, show,
output_notebook
from bokeh.layouts import gridplot
from bokeh.io import push_notebook

def local_regression(x0, X, Y, tau):# add bias term


x0 = np.r_[1, x0] # Add one to avoid the loss in
information
X = np.c_[np.ones(len(X)), X]

# fit model: normal equations with kernel


xw = X.T * radial_kernel(x0, X, tau) #XTranspose * W

beta = np.linalg.pinv(xw @ X) @ xw @ Y #@ Matrix


Multiplication or Dot Product

# predict value
return x0 @ beta # @ Matrix Multiplication or Dot
Product for prediction
def radial_kernel(x0, X, tau):
return np.exp(np.sum((X - x0) ** 2, axis=1) / (-2 *
tau * tau))
# Weight or Radial Kernal Bias Function

n = 1000
# generate dataset
X = np.linspace(-3, 3, num=n)
print("The Data Set ( 10 Samples) X :\n",X[1:10])
Y = np.log(np.abs(X ** 2 - 1) + .5)
print("The Fitting Curve Data Set (10 Samples) Y
:\n",Y[1:10])
# jitter X

Department of Computer Science and Engineering, BIT 40


Artificial Intelligence and Machine Learning Laboratory Manual
X += np.random.normal(scale=.1, size=n)
print("Normalised (10 Samples) X :\n",X[1:10])

domain = np.linspace(-3, 3, num=300)


print(" Xo Domain Space(10 Samples)
:\n",domain[1:10])
def plot_lwr(tau):
# prediction through regression
prediction = [local_regression(x0, X, Y, tau) for x0
in domain]
plot = figure(plot_width=400, plot_height=400)
plot.title.text='tau=%g' % tau
plot.scatter(X, Y, alpha=.3)
plot.line(domain, prediction, line_width=2,
color='red')
return plot

show(gridplot([
[plot_lwr(10.), plot_lwr(1.)],
[plot_lwr(0.1), plot_lwr(0.01)]]))

OUTPUT

The Data Set ( 10 Samples) X :


[-2.99399399 -2.98798799 -2.98198198 -2.97597598 -2.
96996997 -2.96396396 -2.95795796 -2.95195195 -2.94594
595]

The Fitting Curve Data Set (10 Samples) Y :


[2.13582188 2.13156806 2.12730467 2.12303166 2.11874
898 2.11445659 2.11015444 2.10584249 2.10152068]

Normalised (10 Samples) X :


[-3.13139645 -3.16332705 -2.82819547 -2.96836417 -2.
98648597 -2.89911672 -2.82230457 -2.96803478 -2.93037
174]

Department of Computer Science and Engineering, BIT 41


Artificial Intelligence and Machine Learning Laboratory Manual
Xo Domain Space(10 Samples) :
[-2.97993311 -2.95986622 -2.93979933 -2.91973244 -2.
89966555 -2.87959866 -2.85953177 -2.83946488 -2.81939
799]

Department of Computer Science and Engineering, BIT 42

You might also like