Untitled Document

You might also like

Download as pdf or txt
Download as pdf or txt
You are on page 1of 19

Vedant pandey(2100270110093)

get the input from user and perform numerical operations (MAX, MIN, AVG, SUM, SQRT,
ROUND)using in Python.

def get_user_input():
nums = [] while
True:
num = input("Enter a number (or 'done' to finish): ")
if num.lower() == 'done':
break
else:
try:
nums.append(float(num))
except ValueError:
print("Invalid input. Please enter a valid number.")
return nums

def numerical_operations(nums):
if not nums: print("No numbers
entered.") return

maximum = max(nums) minimum = min(nums)


average = sum(nums) / len(nums) total_sum =
sum(nums) sqrt_values = [round(num ** 0.5, 2) for
num in nums] rounded_values = [round(num) for
num in nums]

print("Maximum:", maximum)
print("Minimum:", minimum)
print("Average:", average) print("Sum:",
total_sum) print("Square roots:",
sqrt_values) print("Rounded values:",
rounded_values)

def main():
print("Enter numbers to perform numerical operations.")
numbers = get_user_input()
numerical_operations(numbers)

if __name__ == "__main__":
main()
Vedant pandey(2100270110093)

OUTPUT

Enter numbers to perform numerical operations.


Enter a number (or 'done' to finish): 10
Enter a number (or 'done' to finish): 20
Enter a number (or 'done' to finish): 30
Enter a number (or 'done' to finish): done
Maximum: 30.0
Minimum: 10.0
Average: 20.0
Sum: 60.0
Square roots: [3.16, 4.47, 5.48]
Rounded values: [10, 20, 30]
Vedant pandey(2100270110093)

Program-5

perform dimensionality reduction operation using PCA for Houses Data Set in Python.

import pandas as pd from sklearn.decomposition


import PCA from sklearn.preprocessing import
StandardScaler

def apply_pca(data, num_components=2):


numeric_data = data.select_dtypes(include=['int64', 'float64'])
scaler = StandardScaler() scaled_data =
scaler.fit_transform(numeric_data) pca =
PCA(n_components=num_components) pca_data =
pca.fit_transform(scaled_data) pca_columns = [f'PC{i+1}' for i in
range(num_components)] pca_df =
pd.DataFrame(data=pca_data, columns=pca_columns)
final_data = pd.concat([pca_df, data], axis=1) return final_data

url =
"https://raw.githubusercontent.com/dipanjanS/practical-machine-learning-with-python/master
/notebooks/Ch05_Machine_Learning_Pipeline/AB_NYC_2019.csv"
data = pd.read_csv(url)

result = apply_pca(data)
print(result.head())
Vedant pandey(2100270110093)

OUTPUT
Vedant pandey(2100270110093)

Program-6

Perform Simple Logistic Regression with Python.

import numpy as np import pandas as pd from


sklearn.model_selection import train_test_split from
sklearn.linear_model import LogisticRegression from sklearn
import datasets from sklearn.metrics import accuracy_score,
confusion_matrix

iris = datasets.load_iris()
X = iris.data y =
iris.target

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

model = LogisticRegression() model.fit(X_train, y_train) y_pred =

model.predict(X_test)

accuracy = accuracy_score(y_test, y_pred)


print("Accuracy:", accuracy)

conf_matrix = confusion_matrix(y_test, y_pred)


print("Confusion Matrix:") print(conf_matrix)

OUTPUT

Accuracy: 1.0
Confusion Matrix:
[[10 0 0]
[ 0 9 0]
[ 0 0 11]]
Vedant pandey(2100270110093)

Program-7

To perform data import/export (.CSV, .XLS, .TXT) operations using data frames in Python.

import pandas as pd . import pandas as pd

data = {
'Name': ['John', 'Anna', 'Peter', 'Linda'],
'Age': [28, 35, 42, 32],
'City': ['New York', 'Paris', 'Berlin', 'London']
} df =

pd.DataFrame(data)

df.to_csv('sample_data.csv', index=False)
df.to_excel('sample_data.xlsx', index=False)
df.to_csv('sample_data.txt', sep='\t', index=False)

df_csv = pd.read_csv('sample_data.csv')
df_excel = pd.read_excel('sample_data.xlsx')
df_txt = pd.read_csv('sample_data.txt', sep='\t')

print("Data imported from CSV file:")


print(df_csv) print("\nData imported
from Excel file:") print(df_excel)
print("\nData imported from text file:")
print(df_txt)
Vedant pandey(2100270110093)

OUTPUT
Vedant pandey(2100270110093)

Program-8

To perform K-Means clustering operation and visualize for iris data set in Python.

import pandas as pd import


numpy as np import seaborn
as sns import
matplotlib.pyplot as plt from
sklearn import tree

from sklearn.metrics import accuracy_score, classification_report


from sklearn.datasets import load_iris from sklearn.tree import
DecisionTreeClassifier from sklearn.model_selection import
train_test_split

import warnings
warnings.filterwarnings('ignore')

iris = load_iris() iris =


sns.load_dataset('iris')
iris.head()

iris_setosa = iris.loc[iris["species"] == "Iris-setosa"]


iris_virginica = iris.loc[iris["species"] == "Iris-virginica"]
iris_versicolor = iris.loc[iris["species"] == "Iris-versicolor"]

sns.FacetGrid(iris, hue="species", size=3).map(sns.distplot, "petal_length").add_legend()


sns.FacetGrid(iris, hue="species", size=3).map(sns.distplot, "petal_width").add_legend()
sns.FacetGrid(iris, hue="species", size=3).map(sns.distplot, "sepal_length").add_legend()
plt.show()
Vedant pandey(2100270110093)
Vedant pandey(2100270110093)

Program-9
Write R/Python script to diagnose any disease using KNN classification and plot the resultsin
Python.
Vedant pandey(2100270110093)
PROGRAM - 10

To perform market basket analysis using Association Rules (Apriori) in Python.

from mlxtend.frequent_patterns import apriori from


mlxtend.frequent_patterns import association_rules
import pandas as pd

data = {'TID': [1, 2, 3, 4, 5],


'Items': [['Milk', 'Bread', 'Butter'],
['Milk', 'Bread', 'Eggs'],
['Milk', 'Bread', 'Butter', 'Eggs'],
['Milk', 'Bread'],
['Milk', 'Eggs']]}

df = pd.DataFrame(data) df['Items'] = df['Items'].apply(lambda x: ','.join(x))

one_hot_encoded = df['Items'].str.get_dummies(sep=',') frequent_itemsets =

apriori(one_hot_encoded, min_support=0.5, use_colnames=True) rules =

association_rules(frequent_itemsets, metric="confidence", min_threshold=0.5)

print("Frequent Itemsets:")
print(frequent_itemsets)

print("\nAssociation Rules:")
print(rules)
OUTPUT
PROGRAM - 11

To perform data pre-processing operations:


i) Handling Missing data ii) Min-Max
normalization

import pandas as pd from sklearn.preprocessing


import MinMaxScaler

data = {
'A': [1, 2, None, 4, 5],
'B': [10, 20, 30, None, 50],
'C': [100, 200, 300, 400, None]
} df = pd.DataFrame(data)

df.fillna(df.mean(), inplace=True)

scaler = MinMaxScaler() df_normalized =


pd.DataFrame(scaler.fit_transform(df), columns=df.columns)

print("Original DataFrame with missing values:")


print(df)

print("\nDataFrame after handling missing values (filled with mean):")


print(df)

print("\nDataFrame after Min-Max normalization:")


print(df_normalized)

You might also like