Download as pdf or txt
Download as pdf or txt
You are on page 1of 6

dsm-1

December 19, 2023

0.1 1. Write afunction to read a data set and store it as a matrix


[125]: import pandas as pd
import numpy as np
from scipy.spatial import distance
from collections import Counter

def read_dataset(filename):
df = pd.read_csv(filename)
matrix = df.to_numpy()
return matrix

csv_file = "iris.csv"
dataset = read_dataset(csv_file)

[126]: # Print the matrix


print(dataset)

[[5.1 3.5 1.4 0.2 'Setosa']


[4.9 3.0 1.4 0.2 'Setosa']
[4.7 3.2 1.3 0.2 'Setosa']
[5.0 3.4 1.5 0.2 'Setosa']
[5.7 3.8 1.7 0.3 'Setosa']
[5.1 3.8 1.5 0.3 'Setosa']
[5.5 4.2 1.4 0.2 'Setosa']
[4.9 3.1 1.5 0.2 'Setosa']
[5.0 3.2 1.2 0.2 'Setosa']
[5.0 3.3 1.4 0.2 'Setosa']
[7.0 3.2 4.7 1.4 'Versicolor']
[6.9 3.1 4.9 1.5 'Versicolor']
[5.5 2.3 4.0 1.3 'Versicolor']
[6.5 2.8 4.6 1.5 'Versicolor']
[6.3 2.5 4.9 1.5 'Versicolor']
[6.0 3.4 4.5 1.6 'Versicolor']
[6.7 3.1 4.7 1.5 'Versicolor']
[6.3 2.3 4.4 1.3 'Versicolor']
[5.6 3.0 4.1 1.3 'Versicolor']
[5.1 2.5 3.0 1.1 'Versicolor']

1
[6.3 3.3 6.0 2.5 'Virginica']
[5.8 2.7 5.1 1.9 'Virginica']
[7.1 3.0 5.9 2.1 'Virginica']
[6.3 2.9 5.6 1.8 'Virginica']
[6.5 3.0 5.8 2.2 'Virginica']
[7.6 3.0 6.6 2.1 'Virginica']
[4.9 2.5 4.5 1.7 'Virginica']
[7.3 2.9 6.3 1.8 'Virginica']
[6.7 2.5 5.8 1.8 'Virginica']
[7.2 3.6 6.1 2.5 'Virginica']]

0.2 2.a Calculate Data mean for each attribute and represent it as a vector
[ ]: def calculate_data_mean(filename):
# Read the CSV file using pandas
df = pd.read_csv(filename)

# Calculate the mean for each attribute


mean_vector = df.mean(numeric_only=True)

return mean_vector

# Provide the relative file path to the CSV file


csv_file = "iris.csv"

# Call the calculate_data_mean function with the csv_file path


mean_vector = calculate_data_mean(csv_file)

# Print the mean vector


print("Mean Vector:")
print(mean_vector)

0.3 2.b Calculate Manhattan distance between two data objects


[127]: def manhattan(vec1, vec2):
dist = np.sum(np.absolute(np.array(vec1) - np.array(vec2)))
return dist

0.4 2.c Calculate Euclidian distance between two data objects


[128]: # calculating Euclidean distance using linalg.norm()
def euclidean_distance(vec1, vec2):
dist = np.linalg.norm(vec1 - vec2)
return dist

2
0.5 2.d Calculate Chebyshev distance between two data objects
[124]: def Chebychev(vec1,vec2):
dist= np.max(np.absolute(np.array(vec1) - np.array(vec2)))
return dist

0.6 2.e Calculate Mahalanobis distance.


[ ]: def mahalanobis_distance(data, x):
mean_vector = data.mean().values
cov_matrix = data.cov().values
inv_cov_matrix = np.linalg.inv(cov_matrix)
x_minus_mean = x - mean_vector
mahalanobis_sq = np.dot(np.dot(x_minus_mean, inv_cov_matrix), x_minus_mean.
↪T)

mahalanobis_distance = np.sqrt(mahalanobis_sq)
return mahalanobis_distance

iris_data = pd.read_csv('iris.csv')

# Select the columns to use for calculating the Mahalanobis distance


columns = ['sepal.length', 'sepal.width', 'petal.length', 'petal.width']
iris_subset = iris_data[columns]

# Example usage: calculate the Mahalanobis distance for a specific point


point = np.array([5.0, 3.2, 1.4, 0.2]) # Example point
distance = mahalanobis_distance(iris_subset, point)
print("Mahalanobis Distance:", distance)

# ref https://www.machinelearningplus.com/statistics/mahalanobis-distance/

0.7 Write a separate function to implement the K-Nearest Neighbors classifi-


cation method using all the functions implemented in question(2) above
[132]: def knn_classify(data, labels, query_point, k, distance_metric):
distances = []
for i, row in data.iterrows():
if distance_metric == 'manhattan':
dist = manhattan(row, query_point)
elif distance_metric == 'chebyshev':
dist = chebyshev(row, query_point)
elif distance_metric == 'euclidean':
dist = euclidean(row, query_point)
elif distance_metric == 'mahalanobis':
dist = mahalanobis(data, query_point)
else:

3
raise ValueError("Invalid distance metric. Supported options are␣
↪ 'manhattan', 'chebyshev', 'euclidean', and 'mahalanobis'.")
distances.append((dist, labels[i]))

distances.sort()
k_nearest = distances[:k]
k_nearest_labels = [label for (_, label) in k_nearest]

most_common = Counter(k_nearest_labels).most_common(1)
predicted_label = most_common[0][0]

return predicted_label

# Load the iris dataset


iris_data = pd.read_csv('iris.csv')

# Select the feature columns and the corresponding labels


feature_columns = ['sepal.length', 'sepal.width', 'petal.length', 'petal.width']
iris_features = iris_data[feature_columns]
iris_labels = iris_data['variety']

# Example usage: classify a random point using KNN with different distance␣
↪metrics

random_point = np.array([6.1, 2.9, 4.7, 1.3])


k = 5 # Number of nearest neighbors to consider

distance_metrics = ['manhattan', 'chebyshev', 'euclidean', 'mahalanobis']

for metric in distance_metrics:


predicted_label = knn_classify(iris_features, iris_labels, random_point, k,␣
↪metric)

print(f"Predicted variety using {metric.capitalize()} distance:␣


↪{predicted_label}")

Predicted variety using Manhattan distance: Versicolor


Predicted variety using Chebyshev distance: Versicolor
Predicted variety using Euclidean distance: Versicolor
Predicted variety using Mahalanobis distance: Setosa

0.8 Write a separate function to implement the K-means clustering method


using all the functions implemented in question (2) above
[146]: from sklearn.metrics.pairwise import pairwise_distances

def initialize_centroids(data, k):


"""Randomly initialize k centroids from the data."""
centroids = data[np.random.choice(range(data.shape[0]), k, replace=False)]

4
return centroids

def assign_clusters(data, centroids, distance_metric):


"""Assign each data point to the nearest centroid based on the distance␣
↪metric."""

if distance_metric == 'mahalanobis':
distances = pairwise_distances(data, metric=distance_metric)
else:
distances = pairwise_distances(data, centroids, metric=distance_metric)
cluster_labels = np.argmin(distances, axis=1)
return cluster_labels

def update_centroids(data, cluster_labels, k):


"""Update the centroids based on the mean of data points in each cluster."""
centroids = []
for i in range(k):
cluster_data = data[cluster_labels == i]
centroid = np.mean(cluster_data, axis=0)
centroids.append(centroid)
centroids = np.vstack(centroids)
return centroids

def kmeans(data, k, distance_metric='euclidean', max_iterations=100):


"""Perform K-means clustering on the data using the specified distance␣
↪metric."""

centroids = initialize_centroids(data, k)
for _ in range(max_iterations):
cluster_labels = assign_clusters(data, centroids, distance_metric)
new_centroids = update_centroids(data, cluster_labels, k)
if np.array_equal(centroids, new_centroids):
break
centroids = new_centroids
return cluster_labels, centroids

# Load the dataset


iris_data = np.loadtxt('iris.csv', delimiter=',', skiprows=1, usecols=(0, 1, 2,␣
↪3))

# Set the number of clusters (k)


k = 3

# Perform K-means clustering using different distance metrics


distance_metrics = ['euclidean', 'chebyshev', 'manhattan', 'mahalanobis']
for metric in distance_metrics:
cluster_labels, centroids = kmeans(iris_data, k, distance_metric=metric)
print(f"Distance Metric: {metric}")
print("Cluster Labels:")

5
print(cluster_labels)
print("Centroids:")
print(centroids)
print("-------------")

Distance Metric: euclidean


Cluster Labels:
[2 2 2 2 2 2 2 2 2 2 0 0 1 0 0 1 0 1 1 1 0 0 0 0 0 0 1 0 0 0]
Centroids:
[[6.72857143 2.97142857 5.5 1.86428571]
[5.56666667 2.66666667 4.08333333 1.38333333]
[5.09 3.45 1.43 0.22 ]]
-------------
Distance Metric: chebyshev
Cluster Labels:
[1 1 1 1 2 2 2 1 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0]
Centroids:
[[6.44736842 2.9 5.18421053 1.75263158]
[4.9625 3.15 1.5875 0.3125 ]
[5.43333333 3.93333333 1.53333333 0.26666667]]
-------------
Distance Metric: manhattan
Cluster Labels:
[0 0 0 0 0 0 0 0 0 0 2 2 2 2 2 2 2 2 2 0 1 2 1 2 1 1 2 1 1 1]
Centroids:
[[5.09090909 3.36363636 1.57272727 0.3 ]
[6.95714286 3.04285714 6.07142857 2.14285714]
[6.15 2.81666667 4.66666667 1.525 ]]
-------------
Distance Metric: mahalanobis
Cluster Labels:
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
24 25 26 27 28 29]
Centroids:
[[5.1 3.5 1.4 0.2]
[4.9 3. 1.4 0.2]
[4.7 3.2 1.3 0.2]]
-------------

You might also like