Professional Documents
Culture Documents
Ada Lab
Ada Lab
ANALYSIS LAB
ETCS - 351
Semester: 5th
Batch: 5C7
INDEX
Lab Objective
The main objective of this lab is to ensure that students can design efficient algorithms. The
algorithm should be designed by analyzing its running time and space complexity. The emphasis
is on choosing appropriate data structures and designing correct and efficient algorithms to operate
on these data structures. It familiarizes students with some problems for which it is unknown
whether there exist efficient algorithms.
ETCS351.2 Analyze and use the asymptotic behavior of Divide and Conquer approach in
designing algorithms
ETCS351.3 Apply concept of Dynamic Programming and Greedy approach to develop various
algorithms.
ETCS351.4 Development of algorithms based on Greedy Approach.
Aim: To implement the following algorithm using array as a data structure and analyze their time
complexities:
SOURCE CODE:
1. Insertion sort:
#include<iostream>
#include <bits/stdc++.h>
int i, key, j;
{ key = array[i];
j = i - 1;
{ array[j + 1] = array[j];
j = j - 1;
array[j + 1] = key;
int main()
cout<<"***INSERTION SORT***\n\n"<<endl;
int n;
cin >> n;
arr[i]= rand()%1000;
/*display(arr, n);*/
display(arr, n);
clock_t start,end;
start=clock();
insertionSort(arr, n);
display(arr, n);
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
OUTPUT:
BEST CASE
AVERAGE CASE
WORST CASE
GRAPH:
80000
70000
60000
50000
40000
30000
20000
10000
0
50 100 200 No. of elements
#include<iostream>
#include <bits/stdc++.h>
int temp;
temp = a;
a = b;
b = temp;
int i, j, imin;
imin = j;
swap(array[i], array[imin]);
int main()
{
cout<<"Name: AMAN CHAUHAN\nRoll no.: 14614802719\nGroup: 5C7\n\n";
cout<<"***SELECTION SORT***\n\n"<<endl;
int n;
cin >> n;
arr[i]= rand()%1000;
display(arr, n);
clock_t start,end;
start=clock();
selectionSort(arr, n);
display(arr, n);
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
}
OUTPUT:
BEST CASE
AVERAGE CASE
WORST CASE
GRAPH:
80000
70000
60000
50000
40000
30000
20000
10000
0
50 100 200 No. of elements
#include<iostream>
#include<cstdlib>
#include <bits/stdc++.h>
int temp;
temp = a;
a = b;
b = temp;
if(array[j] > array[j+1]) { //when the current item is bigger than next
swapping(array[j], array[j+1]);
if(!swaps)
}
int main()
cout<<"***BUBBLE SORT***\n\n"<<endl;
int n;
cin >> n;
arr[i]= rand()%1000;
display(arr, n);
clock_t start,end;
start=clock();
bubbleSort(arr, n);
display(arr, n);
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
}
OUTPUT:
BEST CASE
AVERAGE CASE
WORST CASE
GRAPH:
80000
70000
60000
50000
40000
30000
20000
10000
0
50 100 200 No. of elements
#include<iostream>
#include <bits/stdc++.h>
max = arr[i];
return max;
int output[size];
arr[i] = output[i];
int main()
cout<<"***RADIX SORT***\n\n"<<endl;
int size;
cin>>size;
int arr[size];
for(int i=0;i<size;i++)
arr[i]= rand()%1000;
for(int i=0;i<size;i++)
{
cout<<arr[i]<<" ";
clock_t start,end;
start=clock();
RadixSort(arr, size);
for(int i=0;i<size;i++)
cout<<arr[i]<<" ";
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
return 0;
}
OUTPUT:
BEST CASE
AVERAGE CASE
WORST CASE
GRAPH:
80000
70000
60000
50000
40000
30000
20000
10000
0
50 100 200 No. of elements
#include<iostream>
#include <bits/stdc++.h>
int temp;
temp = a;
a = b;
b = temp;
int gap, j, k;
for(gap = n/2; gap > 0; gap = gap / 2) { //initially gap = n/2,decreasing by gap /2
break;
else
swapping(arr[k+gap], arr[k]);
int main() {
int n;
cin >> n;
arr[i]= rand()%1000;
display(arr, n);
clock_t start,end;
start=clock();
shellSort(arr, n);
display(arr, n);
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
}
OUTPUT:
BEST CASE
AVERAGE CASE
WORST CASE
GRAPH:
80000
70000
60000
50000
40000
30000
20000
10000
0
50 100 200 No. of elements
#include <iostream>
#include <bits/stdc++.h>
int largest = i;
int l = 2*i + 1;
int r = 2*i + 2;
largest = l;
largest = r;
if (largest != i)
swap(arr[i], arr[largest]);
heapify(arr, n, largest);
}
void heapSort(int arr[], int n)
heapify(arr, n, i);
swap(arr[0], arr[i]);
heapify(arr, i, 0);
int main()
{
cout<<"Name: AMAN CHAUHAN\nRoll no.: 14614802719\nGroup: 5C7\n\n";
cout<<"***HEAP SORT***\n\n"<<endl;
int n;
cin>>n;
arr[i]= rand()%1000;
display(arr, n);
clock_t start,end;
start=clock();
heapSort(arr, n);
display(arr, n);
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
}
OUTPUT:
BEST CASE
AVERAGE CASE
WORST CASE
GRAPH:
60000
50000
40000
30000
20000
10000
0
50 100 200 No. of elements
#include <iostream>
#include <algorithm>
#include <vector>
#include <bits/stdc++.h>
vector<float> b[n];
int x = n*arr[i];
b[x].push_back(arr[i]);
sort(b[i].begin(), b[i].end());
int index = 0;
arr[index++] = b[i][j];
int main()
cout<<"**BUCKET SORT***\n\n"<<endl;
//int n = sizeof(arr)/sizeof(arr[0]);
int n;
cin >> n;
clock_t start,end;
start=clock();
bucketSort(arr, n);
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
return 0;
}
OUTPUT:
BEST CASE
AVERAGE CASE
WORST CASE
GRAPH:
90000
80000
70000
60000
50000
40000
30000
20000
10000
0
50 100 200 No. of elements
Ans) A Sorting Algorithm is used to rearrange a given array or list elements according to a comparison
operator on the elements.
Ans) The insertion sort is fast, efficient, stable while selection sort only works efficiently when the small
set of elements is involved or the list is partially previously sorted.
Insertion sort, Selection sort, Bubble sort, Merge sort, Quick sort, Heap sort,Shell sort
Ans) The worst case time complexity of Insertion sort is O(N^2) The average case time complexity of
Insertion sort is O(N^2) The time complexity of the best case is O(N) .
Name: AMAN CHAUHAN
Roll no.: 14614802719
Date: 05/10/21 EXPERIMENT-2 Group: 5C7
Aim: To implement the linear search and binary search and analyse its time complexity.
SOURCE CODE:
1. Linear Search:
#include<iostream>
#include<cstdlib>
#include <bits/stdc++.h>
#include<chrono>
for(int i=0;i<size;i++)
cout<<array[i]<<" ";
int i;
for(i=0;i<size;i++)
break;
if(i<size)
else
}
int main()
for(i=0;i<n;++i){
display(arr,n);
clock_t start,end;
start=clock();
linearsearch(arr,n,x);
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
return 0;
}
OUTPUT:
GRAPH:
Linear Search
60
54
50
40
40
Time(in µs)
30 27
20
10
0
50 100 200
No. of elements
50 27
100 54
200 40
2. Binary Search:
#include<iostream>
#include<bits/stdc++.h>
#include<chrono>
#include<cstdlib>
{
while (start_index <= end_index){
if(array[middle] == key)
return middle;
start_index = middle + 1;
else
end_index = middle - 1;
return -1;
int mid;
return;
if (array[mid] == key)
for(int i=0;i<size;i++)
cout<<array[i]<<" ";
int main()
cin>>n;
for(i=0;i<n;i++)
arr[i]=rand()%500;
sort(arr,arr+n);
cout<<"\n\nArray is : ";
displayarray(arr,n);
cout<<"\n\nEnter the Element to search : ";
cin>>x;
clock_t start,end;
start=clock();
if(found_index == -1 ) {
else {
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
clock_t start1,end1;
start1=clock();
binaryrecurssive(arr, 0, n, x);
end1=clock();
double duration1=double(end1-start1)/double(CLOCKS_PER_SEC);
OUTPUT:
GRAPH:
Binary Search
30
27
24 24
25
21
20
Time(in µs)
15
15 Iterative
Recursion
10 8
0
50 100 200
No. of elements
Iterative 50 24
Iterative 100 24
Iterative 200 27
Recursion 50 8
Recursion 100 15
Recursion 200 21
VIVA - QUESTIONS
Ans) The primary condition for binary search is that the array should be already sorted. If not so then the
array must be sorted first.
Ans) Binary search works on the principle of divide and conquer. Binary search looks for a particular
element by comparing the middle most item of the Collection.
Ans) Binary search is more efficient as it can search a large data in less time.
Name: AMAN CHAUHAN
Roll no.: 14614802719
Date: 26/10/21 EXPERIMENT-3 Group: 5C7
Aim: a) To implement the following algorithm using array as a data structure and analyze their time
complexities: Merge Sort, Quick Sort.
b) To implement matrix multiplication using strassen’s algorithm and analyse its time complexity.
SOURCE CODE:
a) Sorting algorithms
i) Merge Sort
#include <bits/stdc++.h>
int i, j, k;
int n1 = m - l + 1;
int n2 = r - m;
i = 0; j = 0; k = l;
{ arr[k] = L[i];
i++;
else{arr[k] = R[j];
j++;
k++;
arr[k] = L[i];
i++;
k++;
arr[k] = R[j];
j++;
k++;
if (l < r)
int m = l+(r-l)/2;
mergeSort(arr, l, m);
merge(arr, l, m, r);
int main()
cout<<"***MERGE SORT***\n\n"<<endl;
int n;
cin >> n;
arr[i]= rand()%1000;
display(arr, n);
clock_t start,end;
start=clock();
mergeSort(arr,0,n-1);
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
OUTPUT:
ii) Quick Sort
#include <bits/stdc++.h>
i++;
swap(arr[i], arr[j]);
return (i + 1);
quickSort(arr, pi + 1, high);
int main()
cout<<"***QUICK SORT***\n\n"<<endl;
int n;
cin >> n;
arr[i]= rand()%1000;
display(arr, n);
clock_t start,end;
start=clock();
quickSort(arr,0,n-1);
display(arr, n);
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
OUTPUT:
GRAPH:
Time in us
35000
30000
25000
Merge sort
20000
Quick sort
15000
10000
5000
0
50 50 No. of Elements
b) Strassen’s Algorithm
#include<iostream>
#include<bits/stdc++.h>
int main()
cin>>a[i][j];
cin>>b[i][j];
cout<<"\n";
cout<<a[i][j]<<"\t";
cout<<"\n";
cout<<b[i][j]<<"\t";
clock_t start,end;
start=clock();
c[0][1] = p1 + p2;
c[1][0] = p3 + p4;
c[1][1] = p5 + p1 - p3 - p7;
cout<<"\n";
cout<<c[i][j]<<"\t";
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
cout<<"\n\n";
return 0;
OUTPUT:
VIVA – QUESTIONS
Ans) A Sorting Algorithm is used to rearrange a given array or list elements according to a comparison
operator on the elements.
Ans) The main difference between quicksort and merge sort is that the quicksort sorts the elements by
comparing each element with an element called a pivot while merge sort divides the array into two
subarrays again and again until one element is left.
SOURCE CODE:
a) Matrix Chain Multiplication
#include<iostream>
#include<limits.h>
int m[n][n];
int i, j, k, L, q;
m[i][i] = 0; //number of multiplications are 0(zero) when there is only one matrix
j = i+L-1;
if (q < m[i][j])
m[i][j] = q; //if number of multiplications found less that number will be updated.
int main()
int n,i;
cin>>n;
n++;
int arr[n];
for(i=0;i<n;i++)
{
cin>>arr[i];
clock_t start,end;
start=clock();
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
return 0;
OUTPUT:
b) Longest Common Subsequence
#include<bits\stdc++.h>
if (i == 0 || j == 0)
LCS_table[i][j] = 0;
else
lcsAlgo[index] = '\0';
int i = m, j = n;
i--;
j--;
index--;
i--;
else
j--;
cout << "S1 : " << S1 << "\n\nS2 : " << S2 << "\n\nLCS: " << lcsAlgo << "\n\n";
int m = strlen(S1);
int n = strlen(S2);
clock_t start,end;
start=clock();
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
OUTPUT:
c) Optimal Binary Search Tree
#include <bits/stdc++.h>
int sum = 0;
sum += frequency[x];
return sum;
if (j < i)
return 0;
if (j == i)
return frequency[i];
min = cost;
int main()
clock_t start,end;
start=clock();
cout << "\n\nCost of Optimal BST is : " << optimalSearchTree(keys, frequency, n)<<"\n\n";
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
OUTPUT:
d) Binomial Coefficient Computation
#include <bits/stdc++.h>
if (k == 0 || k == n)
return 1;
return binomialCoeff(n - 1, k - 1) +
binomialCoeff(n - 1, k);
int main()
int n, k;
cin>>n;
cin>>k;
clock_t start,end;
start=clock();
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
return 0;
OUTPUT:
VIVA – QUESTIONS
Ans) Dynamic Programming (DP) is an algorithmic technique for solving an optimization problem by
breaking it down into simpler subproblems and utilizing the fact that the optimal solution to the overall
problem depends upon the optimal solution to its subproblems.
Ans) Matrix chain multiplication is an optimization problem concerning the most efficient way to
multiply a given sequence of matrices. The problem is not actually to perform the multiplications, but
merely to decide the sequence of the matrix multiplications involved. It is a Method under Dynamic
Programming in which previous output is taken as input for next.
Here, Chain means one matrix's column is equal to the second matrix's row (always).
Ans) An optimal binary search tree (Optimal BST), sometimes called a weight-balanced binary tree, is a
binary search tree which provides the smallest possible search time (or expected search time) for a given
sequence of accesses.
Ans) Recursion: It is repeated application of the same procedure on subproblems of the same type of a
problem.
Dynamic programming: It involves caching the results of the subproblems of a problem, so that every
subproblem is solved only once.
Name: AMAN CHAUHAN
Roll no.: 14614802719
Date: 30/11/21 EXPERIMENT- 5 Group: 5C7
SOURCE CODE:
a) i) 0-1 Knapsack Problem using dynamic programming
#include <iostream>
return (a > b) ? a : b;
int i, w;
if (i == 0 || w == 0)
K[i][w] = 0;
else
return K[n][W];
int main()
int n, W;
cin >> n;
cout << "\nEnter value and weight for item " << i << ":";
// int W = 50;
cin >> W;
clock_t start,end;
start=clock();
cout <<"\nThe min. weight should be: "<< knapSack(W, wt, val, n);
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
OUTPUT:
#include<iostream>
float wt[20],pt[20],profit=0.0,y[20];
int n,i,w,temp,j,u;
class fractional_knapsack
public:
void getdata()
cin>>w;
cout<<"\nHow many objects you want to store inside the bag : "<<endl;
cin>>n;
for(i=1;i<=n;i++)
cin>>wt[i];
for(i=1;i<=n;i++)
cin>>pt[i];
void sortdata()
for(i=1;i<=n;i++)
for(j=1;j<=n;j++)
if((pt[j]/wt[j])<(pt[j+1]/wt[j+1]))
temp=pt[j];
pt[j]=pt[j+1];
pt[j+1]=temp;
temp=wt[j];
wt[j]=wt[j+1];
wt[j+1]=temp;
}
}
void calculation()
for(i=0;i<n;i++)
y[i]=0.0;
u=w;
for(i=0;i<n;i++)
if(wt[i]>u)
break;
y[i]=1.0;
u=u-wt[i];
if(i<n)
y[i]=u/wt[i];
for(i=0;i<n;i++)
profit=profit+(pt[i]*y[i]);
void displaydata()
cout<<"-------------------------------------------\n";
cout<<"-------------------------------------------\n";
for(i=1;i<=n;i++)
cout<<i<<"\t"<<wt[i]<<"\t"<<pt[i]<<"\t"<<endl;
cout<<"\nMAXIMUM PROFIT:"<<profit;
};
int main()
fractional_knapsack obj;
obj.getdata();
clock_t start,end;
start=clock();
obj.sortdata();
obj.calculation();
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
obj.displaydata();
return 0;
OUTPUT:
b) Activity Selection Problem
#include <bits/stdc++.h>
int i, j;
i = 0;
i = j;
int main()
clock_t start,end;
start=clock();
printMaxActivities(s, f, n);
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
return 0;
OUTPUT:
c) Huffman Coding
#include <iostream>
#include <cstdlib>
struct MinHeapNode {
struct MinHeapNode *left, *right; // Left and right child of this node
};
struct MinHeap {
};
// A utility function allocate a new min heap node with given character and frequency of the character
{
struct MinHeapNode* temp
= (struct MinHeapNode*)malloc
(sizeof(struct MinHeapNode));
temp->data = data;
temp->freq = freq;
return temp;
// current size is 0
minHeap->size = 0;
minHeap->capacity = capacity;
minHeap->array
= (struct MinHeapNode**)malloc(minHeap->
return minHeap;
}
// A utility function to swap two min heap nodes
struct MinHeapNode** b)
*a = *b;
*b = t;
smallest = left;
smallest = right;
if (smallest != idx) {
swapMinHeapNode(&minHeap->array[smallest],
&minHeap->array[idx]);
minHeapify(minHeap, smallest);
minHeap->array[0]
= minHeap->array[minHeap->size - 1];
--minHeap->size;
minHeapify(minHeap, 0);
return temp;
++minHeap->size;
int i = minHeap->size - 1;
while (i && minHeapNode->freq < minHeap->array[(i - 1) / 2]->freq) {
i = (i - 1) / 2;
minHeap->array[i] = minHeapNode;
int n = minHeap->size - 1;
int i;
minHeapify(minHeap, i);
int i;
cout<< arr[i];
cout<<"\n";
}
// Utility function to check if this node is leaf
// Creates a min heap of capacity equal to size and inserts all character of data[] in min heap. Initially
size of min heap is equal to capacity
minHeap->size = size;
buildMinHeap(minHeap);
return minHeap;
// Step 1: Create a min heap of capacity equal to size. Initially, there are modes equal to size.
while (!isSizeOne(minHeap)) {
// Step 2: Extract the two minimum freq items from min heap
left = extractMin(minHeap);
right = extractMin(minHeap);
// sum of the two nodes frequencies.Make the two extracted node as left and right children of
this new node.
top->left = left;
top->right = right;
insertMinHeap(minHeap, top);
// Step 4: The remaining node is the root node and the tree is complete.
return extractMin(minHeap);
// Prints huffman codes from the root of Huffman Tree. It uses arr[] to store codes
if (root->left) {
arr[top] = 0;
arr[top] = 1;
if (isLeaf(root)) {
printArr(arr, top);
// The main function that builds a Huffman Tree and print codes by traversing the built Huffman Tree
int main()
clock_t start,end;
start=clock();
HuffmanCodes(arr, freq, size);
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
return 0;
OUTPUT:
d) Task Scheduling Problem
#include<iostream>
#include<algorithm>
int plat_needed=1,result=1;
int i=1,j=0;
for(int i=0;i<n;i++)
plat_needed=1;
for(int j=i+1;j<n;j++)
plat_needed++;
result = max(result,plat_needed);
return result;
int main()
int n = sizeof(start)/sizeof(start[0]);
return 0;
OUTPUT:
VIVA – QUESTIONS
Ans) Dynamic Programming (DP) is an algorithmic technique for solving an optimization problem by
breaking it down into simpler subproblems and utilizing the fact that the optimal solution to the overall
problem depends upon the optimal solution to its subproblems.
Ans) The knapsack problem is a problem in combinatorial optimization: Given a set of items, each with
a weight and a value, determine the number of each item to include in a collection so that the total
weight is less than or equal to a given limit and the total value is as large as possible.
Ans) The Activity Selection Problem is an optimization problem which deals with the selection of non-
conflicting activities that needs to be executed by a single person or machine in a given time frame.
Each activity is marked by a start and finish time. Greedy technique is used for finding the solution since
this is an optimization problem.
Ans) The process of deciding which task will utilize the cpu time is called task scheduling. The
scheduling of the task may be on the basis of their priorities.
Ans) Greedy Method: In a greedy Algorithm, we make whatever choice seems best at the moment and
then solve the sub-problems arising after the choice is made.
Dynamic programming: It involves caching the results of the subproblems of a problem, so that every
subproblem is solved only once.
Name: AMAN CHAUHAN
Roll no.: 14614802719
Date: 07/12/21 EXPERIMENT- 6 Group: 5C7
#include <iostream>
#define V 9
// A utility function to find the vertex with minimum distance value, from
return min_index;
cout<<i<<"\t\t"<<dist[i]<<"\n";
}
int dist[V]; // The output array. dist[i] will hold the shortest
dist[src] = 0;
// Pick the minimum distance vertex from the set of vertices not
sptSet[u] = true;
printSolution(dist, V);
int main()
cout<<"DIJKSTRA ALGORITHM:\n\n";
int graph[V][V] = { { 0, 4, 0, 0, 0, 0, 0, 8, 0 },
{ 4, 0, 8, 0, 0, 0, 0, 11, 0 },
{ 0, 8, 0, 7, 0, 4, 0, 0, 2 },
{ 0, 0, 7, 0, 9, 14, 0, 0, 0 },
{ 0, 0, 0, 9, 0, 10, 0, 0, 0 },
{ 0, 0, 4, 14, 10, 0, 2, 0, 0 },
{ 0, 0, 0, 0, 0, 2, 0, 1, 6 },
{ 8, 11, 0, 0, 0, 0, 1, 0, 7 },
{ 0, 0, 2, 0, 0, 0, 6, 7, 0 } };
clock_t start,end;
start=clock();
dijkstra(graph, 0);
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
return 0;
OUTPUT:
VIVA – QUESTIONS
Ans) A greedy algorithm is an algorithmic strategy that makes the best optimal choice at each small
stage with the goal of this eventually leading to a globally optimum solution. This means that the
algorithm picks the best solution at the moment without regard for consequences.
Ans) Dijkstra's algorithm is an algorithm for finding the shortest paths between nodes in a graph,
Ans) Time Complexity of Dijkstra's Algorithm is O ( V2 ) but with min-priority queue it drops down to
O ( V + E l o g V ).
Ans) Dijkstra's algorithm work in both directed and undirected graphs, because you simply add edges
nodes into the PriorityQueue when you have an edge to travel to from your adjacency list.
Ans) Greedy Method: In a greedy Algorithm, we make whatever choice seems best at the moment and
then solve the sub-problems arising after the choice is made.
Dynamic programming: It involves caching the results of the subproblems of a problem, so that every
subproblem is solved only once.
Date: 14/12/21 EXPERIMENT- 7
SOURCE CODE:
a) Kruskal’s Algorithm.
#include<bits/stdc++.h>
struct Graph
int V, E;
// Constructor
Graph(int V, int E)
this->V = V;
this->E = E;
};
struct DisjointSets
int n;
// Constructor.
DisjointSets(int n)
{ // Allocate memory
this->n = n;
rnk[i] = 0;
parent[i] = i;
int find(int u)
if (u != parent[u])
parent[u] = find(parent[u]);
return parent[u];
// Union by rank
x = find(x), y = find(y);
parent[y] = x;
parent[x] = y;
if (rnk[x] == rnk[y])
rnk[y]++;
};
int Graph::kruskalMST()
sort(edges.begin(), edges.end());
DisjointSets ds(V);
int u = it->second.first;
int v = it->second.second;
// Check if the selected edge is creating a cycle or not (Cycle is created if u and v belong to same set)
if (set_u != set_v)
mst_wt += it->first;
ds.merge(set_u, set_v);
return mst_wt;
int main()
cout<<"KRUSKAL's ALGORITHM:\n\n";
int V = 9, E = 14;
g.addEdge(0, 1, 4);
g.addEdge(0, 7, 8);
g.addEdge(1, 2, 8);
g.addEdge(1, 7, 11);
g.addEdge(2, 3, 7);
g.addEdge(2, 8, 2);
g.addEdge(2, 5, 4);
g.addEdge(3, 4, 9);
g.addEdge(3, 5, 14);
g.addEdge(4, 5, 10);
g.addEdge(5, 6, 2);
g.addEdge(6, 7, 1);
g.addEdge(6, 8, 6);
g.addEdge(7, 8, 7);
clock_t start,end;
start=clock();
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
return 0;
OUTPUT:
b) Prim’s Algorithm.
#include<iostream>
min = key[v];
min_index = v;
return min_index;
int minCost=0;
cout<<"\nEdge \tWeight\n";
minCost+=cost[i][parent[i]];
bool visited[V];
visited[i] = false;
parent[i]=-1;
parent[v] = u;
key[v] = cost[u][v];
print_MST(parent, cost);
int main()
cout<<"PRIM's ALGORITHM:\n\n";
int cost[V][V];
cout<<"Enter the vertices for a graph with "<<V<< " vetices : "<<endl;
for(int j=0;j<V;j++)
cin>>cost[i][j];
clock_t start,end;
start=clock();
find_MST(cost);
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
return 0;
OUTPUT:
VIVA – QUESTIONS
Ans) A greedy algorithm is an algorithmic strategy that makes the best optimal choice at each small
stage with the goal of this eventually leading to a globally optimum solution. This means that the
algorithm picks the best solution at the moment without regard for consequences.
Ans) Kruskal's Algorithm is a famous greedy algorithm. It is used for finding the Minimum Spanning
Tree (MST) of a given graph. To apply Kruskal's algorithm, the given graph must be weighted,
connected and undirected.
Ans) Prim's Algorithm is used to find the minimum spanning tree from a graph. Prim's algorithm finds
the subset of edges that includes every vertex of the graph such that the sum of the weights of the edges
can be minimized.
Ans) Prim's Algorithm grows a solution from a random vertex by adding the next cheapest vertex to the
existing tree.
Kruskal's Algorithm grows a solution from the cheapest edge by adding the next cheapest edge to the
existing tree / forest.
Date: 14/12/21 EXPERIMENT- 8
SOURCE CODE:
a) Naïve String Matching Algorithm.
#include<iostream>
#include<string.h>
int M = strlen(pat);
int N = strlen(txt);
int j;
for (j = 0; j < M; j++){ /* For current index i, check for pattern match */
if (txt[i + j] != pat[j])
break;
int main()
{
clock_t start,end;
start=clock();
search(pat, txt);
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
return 0;
OUTPUT:
b) Rabin Karp String Matching Algorithm.
#include <iostream>
#include <string.h>
#define d 10
int m = strlen(pattern);
int n = strlen(text);
int i, j;
int p,t = 0;
int h = 1;
h = (h * d) % q;
for (i = 0; i < m; i++) { // Calculate hash value for pattern and text
p = (d * p + pattern[i]) % q;
t = (d * t + text[i]) % q;
if (p == t) {
if (text[i + j] != pattern[j])
break;
if (j == m)
if (i < n - m) {
t = (d * (t - text[i] * h) + text[i + m]) % q;
if (t < 0)
t = (t + q);
int main()
int q = 13;
clock_t start,end;
start=clock();
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
return 0;
OUTPUT:
c) Knuth Morris Pratt Algorithm.
#include <bits/stdc++.h>
int M = strlen(pat);
int N = strlen(txt);
// create lps[] that will hold the longest prefix suffix values for pattern
int lps[M];
computeLPSArray(pat, M, lps);
while (i < N) {
if (pat[j] == txt[i]) {
j++;
i++;
if (j == M) {
j = lps[j - 1];
j = lps[j - 1];
else
i = i + 1;
int len = 0;
int i = 1;
while (i < M) {
if (pat[i] == pat[len]) {
len++;
lps[i] = len;
i++;
if (len != 0) {
else // if (len == 0)
lps[i] = 0;
i++;
}
int main()
clock_t start,end;
start=clock();
KMPSearch(pat, txt);
end=clock();
double duration=double(end-start)/double(CLOCKS_PER_SEC);
return 0;
OUTPUT:
VIVA – QUESTIONS
Ans) String-matching algorithms, are an important class of string algorithms that try to find a place
where one or several strings are found within a larger string or text.
Ans) Naive Algorithm: It slides the pattern over text one by one and check for a match. If a match is
found, then slides by 1 again to check for subsequent matches.
Ans) Rabin Karp Algorithm: It matches the hash value of the pattern with the hash value of current
substring of text, and if the hash values match then only it starts matching individual characters.
Ans) KMP (Knuth Morris Pratt) Algorithm: The idea is whenever a mismatch is detected, we already
know some of the characters in the text of the next window. So, we take advantage of this information to
avoid matching the characters that we know will anyway match.
Ans) The time complexity of Naïve Pattern Search method is O(m*n). The m is the size of pattern and n
is the size of the main string.
The average and best-case running time of the Rabin-Karp algorithm is O(n+m), but its worst-case time
is O(nm).