Download as docx, pdf, or txt
Download as docx, pdf, or txt
You are on page 1of 18

CODE

graph = {
'5': ['3', '7'],
'3': ['2', '4'],
'7': ['10'],
'2': [],
'4': ['10'],
'10': ['1', '6'],
'1': ['2', '7'],
'8': ['6', '7'],
'9': ['11', '22'],
'11': [],
'12': ['17'],
'17': [],
'22': ['30'],
'30': ['6'],
'6': ['11', '12']
}

def bfs(visited, graph, node):


visited.append(node)
queue = [node]

while queue:
m = queue.pop(0)
print(m, end=" ")

for neighbour in graph[m]:


if neighbour not in visited:
visited.append(neighbour)
queue.append(neighbour)

def bfs_traversal(graph):
visited = []
for node in graph:
if node not in visited:
bfs(visited, graph, node)

print("Follow the breath first search")


bfs_traversal(graph)

OUTPUT:
CODE:
graph ={
'5':['3','7'],
'3':['2','4'],
'7':['10'],
'2':[],
'4':['10'],
'10':['2','4'],
}

visited = []
queue = []

def bfs(visited,graph,node):
visited.append(node)
queue.append(node)

while queue:
m = queue.pop(0)
print(m,end=" ")

for neighbour in graph[m]:


if neighbour not in visited:
visited.append(neighbour)
queue.append(neighbour)

print("follow the breath first search")


bfs(visited,graph,'5')

OUTPUT:
CODE:
graph ={
'5':['3','7'],
'3':['2','4'],
'7':['10'],
'2':[],
'4':['10'],
'10':['2','4'],
}

visited = []
queue = []

def bfs(visited,graph,node):
visited.append(node)
queue.append(node)

while queue:
m = queue.pop(0)
print(m,end=" ")

for neighbour in graph[m]:


if neighbour not in visited:
visited.append(neighbour)
queue.append(neighbour)

print("follow the breath first search")


bfs(visited,graph,'5')

OUTPUT:
CODE:
import random
import itertools

POPULATION_SIZE = 50
NUM_GENERATIONS = 25

# cities and their distances distances from each others


cities = ["City A", "City B", "City C", "City D", "City E"]
distances = {
("City A", "City B"): 5,
("City A", "City C"): 8,
("City A", "City D"): 6,
("City A", "City E"): 2,
("City B", "City C"): 3,
("City B", "City D"): 9,
("City B", "City E"): 7,
("City C", "City D"): 4,
("City C", "City E"): 6,
("City D", "City E"): 5,
("City B", "City A"): 5,
("City C", "City A"): 8,
("City D", "City A"): 6,
("City E", "City A"): 2,
("City C", "City B"): 3,
("City D", "City B"): 9,
("City E", "City B"): 7,
("City D", "City C"): 4,
("City E", "City C"): 6,
("City E", "City D"): 5
}

#This fitness function is use to calculate total distance traveled


def fitness(path):

total_distance = 0
#will add all
distances
for i in range(len(path) - 1):
total_distance += distances[(path[i], path[i+1])]
#this use to return to starting city
total_distance += distances[(path[-1], path[0])]
return total_distance

# this function is called from create_population function it will randomly


#select individuals for population
def create_individual():
return random.sample(cities, len(cities))
#this function is simply for creating population
#as we have set population to 50 so it will create
#50 different solution which we called population
def create_population():
return [create_individual() for _ in range(POPULATION_SIZE)]

#this function will select the samples of three individuals an then


#return the those having minimum fitness value
def tournament_selection(population, tournament_size=3):
tournament = random.sample(population, tournament_size)
return min(tournament, key=fitness)

#crossover will produce offsprings from parents


def crossover(parent1, parent2):

crossover_point = random.randint(1, len(parent1) - 1)

#here child include the slice of one parent means some genes of one
parents
#and those genes of second parent which are not included in first parent
child1 = parent1[:crossover_point] + [gene for gene in parent2 if gene not
in parent1[:crossover_point]]
child2 = parent2[:crossover_point] + [gene for gene in parent1 if gene not
in parent2[:crossover_point]]
return child1, child2

#will mutate the offspring by randomly swaping there elements


def mutate(individual):
#we took two random indexes and save it in index1 and index2
index1, index2 = random.sample(range(len(individual)), 2)
#here we swap them to mutate them
individual[index1], individual[index2] = individual[index2],
individual[index1]
return individual

# Genetic algorithm for TSP


def genetic_algorithm():
#this will create population which mean number of solution as we set
population to
#so it will generate 50 different/random solutions
population = create_population()
#this for loop will run 25 times as we have (NUM_GENERATIOn) we set it
above
#with a value 25 which mean it will runs up to 25 generations
for _ in range(NUM_GENERATIONS):

#this will select 25 pairs of parents from population that we created


parents = [tournament_selection(population) for _ in range(2)]
#here we call the crossover function which will return two set of
solutions
#and we will store it in ofspring1 and offspring2 respectivel
#as loop will run 25 times so 25 pairs of offsprinfgs will return from
crossover function
offspring1, offspring2 = crossover(parents[0], parents[1])
#here we mutate the ofspring which will randomly swap two elements
within list ofsspring
offspring1 = mutate(offspring1)
offspring2 = mutate(offspring2)

#this will assign the offspring to random places in population array


population[random.randint(0, POPULATION_SIZE - 1)] = offspring1
population[random.randint(0, POPULATION_SIZE - 1)] = offspring2
#this will find the individual in population with minimum fitness value
#actually it will call fitness function for each individual and will
select the min from it
best_individual = min(population, key=fitness)
#will return best_individual value to place from where it is called
return best_individual

#it will get best solution


best_solution = genetic_algorithm()
#here we simply print the return value to the best solution
print("Best solution:", best_solution)
#will calculate the total distance
print("Total distance:", fitness(best_solution)
CODE:
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error, r2_score
import matplotlib.pyplot as plt
import seaborn as sns

url = 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/daily-min-
temperatures.csv'
data = pd.read_csv(url)

print(data.head())

#Temperature as our target variable now we have Date to extract features


# let convert the Date column to datetime
data['Date'] = pd.to_datetime(data['Date'])

data['Year'] = data['Date'].dt.year
data['Month'] = data['Date'].dt.month
data['Day'] = data['Date'].dt.day

# Define the features (X) and the target (y)


X = data[['Year', 'Month', 'Day']]
y = data['Temp']

# Split the dataset into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4,
random_state=60)

# Create a decision tree regressor model regressor


= DecisionTreeRegressor(random_state=60)

# Train the model regressor.fit(X_train,


y_train)

# Make predictions on the test set


y_pred = regressor.predict(X_test)

# Evaluate the model


mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
#calculate mean Squared Error between the actual and predicted value
#The R² score tells us how well our model's predictions match the actual data
print(f"Mean Squared Error: {mse}")
print(f"R^2 Score: {r2}")

plt.figure(figsize=(10, 6))
sns.scatterplot(x=y_test, y=y_pred)
plt.xlabel('Actual Temperature')
plt.ylabel('Predicted Temperature')
plt.title('Actual vs Predicted Temperature')
plt.show()

OUTPUT:
This code analyzes MRI scans to determine whether the brain contains
a tumor or not.

Directory Strucure:
1: D:\AI LAB\tasks\brainTumor\no
2: D:\AI LAB\tasks\brainTumor\yes

NO samples:

YES Samples:
CODE:

import cv2
import os
import pandas as pd
import numpy as np
import random
from skimage.feature import hog
from sklearn import svm

from sklearn.model_selection import train_test_split


from sklearn.metrics import classification_report, accuracy_score

import os
import cv2
from skimage.feature import hog

training_path = r'D:\AI LAB\tasks\brainTumor'

i=0
categories = []
imgs_names = []
features = []

# Iterate through subdirectories


for subdir in ['no','yes']:
subdir_path = os.path.join(training_path, subdir)

if not os.path.exists(subdir_path):
print(f"Directory does not exist: {subdir_path}")
continue

for filename in os.listdir(subdir_path):


if filename.lower().endswith('.jpg'):
full_path = os.path.join(subdir_path, filename)
print(f"Processing file: {full_path}")

# Append the image name to the list of names


imgs_names.append(filename)

# Define the category (class (dog or cat)) of each image


if 'no' in subdir:
categories.append(1)
else:
categories.append(0)

# Read the image


img = cv2.imread(full_path)
# Check if the image was read correctly
if img is None:
print(f"Error reading image {filename}. Skipping.")
continue

# Resize the image


resized_img = cv2.resize(img, (128, 64))

# Extract the features using HOG


fd, hog_img = hog(resized_img, orientations=9, pixels_per_cell=(8,
8),
cells_per_block=(2, 2), visualize=True,
channel_axis=-1)

# Append the feature array to the list of features


features.append(fd)

# Take only the first 100 data images


i += 1
if i == 100:
break

print("Processing complete. Total images processed:", i)

len(categories)
len(features)

# Split the data into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(features, categories,
test_size=0.2, random_state=42)

# Train an SVM classifier


clf = svm.SVC(kernel='linear')
clf.fit(X_train, y_train)

# Make predictions
y_pred = clf.predict(X_test)

# Evaluate the model


print("Accuracy:", accuracy_score(y_test, y_pred))
print("Classification Report:")
print(classification_report(y_test, y_pred))

# Function to test a single image


def test_single_image(image_path):
# Read the image
img = cv2.imread(image_path)

# Check if the image was read correctly


if img is None:
print(f"Error reading image {image_path}.")
return

# Resize the image


resized_img = cv2.resize(img, (128, 64))

# Extract the features using HOG


fd, hog_img = hog(resized_img, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(2, 2), visualize=True, channel_axis=-1)

# Predict the category using the trained SVM model


prediction = clf.predict([fd])

# Map prediction to category name


category ='no' if prediction[0] == 1 else 'yes'

if category=='no':
print("No evidence of tumor identified on MRI scan")
else:
print("Tumor is identified on MRI scan")

# Test a single image


test_image_path = r'D:\AI LAB\tasks\brainTumor\no/1 no.jpeg'
test_single_image(test_image_path)

# Make predictions on the training set


y_train_pred = clf.predict(X_train)

# Calculate and print the training accuracy


train_accuracy = accuracy_score(y_train, y_train_pred)
print("Training Accuracy:", train_accuracy)

# Make predictions on the test set


y_test_pred = clf.predict(X_test)

# Evaluate the model on the test set


print("Test Accuracy:", accuracy_score(y_test, y_test_pred))
print("Classification Report:")
print(classification_report(y_test, y_test_pred))
OUTPUT:

CODE:
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
import matplotlib.pyplot as plt

# Creating the DataFrame with new features


data = {
'Rain': ['Yes', 'Yes', 'Yes', 'No', 'No', 'No', 'Yes', 'No', 'No', 'Yes'],
'Temperature': ['Cold', 'Hot', 'Warm', 'Warm', 'Hot', 'Cold', 'Warm',
'Hot', 'Warm', 'Cold'],
'Day': ['Weekday', 'Weekday', 'Weekend', 'Weekend', 'Weekday', 'Weekend',
'Weekend', 'Weekday', 'Weekend', 'Weekday'],
'Wind': ['Yes', 'No', 'Yes', 'No', 'No', 'Yes', 'Yes', 'No', 'No', 'Yes'],
'Humidity': ['High', 'Low', 'High', 'Low', 'Low', 'High', 'High', 'Low',
'Low', 'High'],
'Activity Type': ['Sports', 'Reading', 'Sports', 'Reading', 'Reading',
'Sports', 'Sports', 'Reading', 'Reading', 'Sports'],
'Time of Day': ['Morning', 'Afternoon', 'Morning', 'Afternoon', 'Morning',
'Evening', 'Morning', 'Afternoon', 'Evening', 'Morning'],
'Play': ['Inside', 'Inside', 'Inside', 'Outside', 'Outside', 'Inside',
'Inside', 'Outside', 'Outside', 'Inside']
}

df = pd.DataFrame(data)

# Initialize the label encoder


len = LabelEncoder()

# Encode the categorical variables


df['Rain'] = le.fit_transform(df['Rain'])
df['Temperature'] = le.fit_transform(df['Temperature'])
df['Day'] = le.fit_transform(df['Day'])
df['Wind'] = le.fit_transform(df['Wind'])
df['Humidity'] = le.fit_transform(df['Humidity'])
df['Activity Type'] = le.fit_transform(df['Activity Type'])
df['Time of Day'] = le.fit_transform(df['Time of Day'])
df['Play'] = le.fit_transform(df['Play'])

# Features (X) and target (y)


X = df.drop('Play', axis=1)
y = df['Play']

# Initialize the Decision Tree Classifier with entropy


clf = DecisionTreeClassifier(criterion='entropy')

# Fit the model


clf.fit(X, y)
# Visualize the decision tree
plt.figure(figsize=(12,8))
tree.plot_tree(clf, feature_names=X.columns,
class_names=le.inverse_transform([0, 1]), filled=True, rounded=True)
plt.show()

OUTPUT:
TASK#5:

You might also like