Professional Documents
Culture Documents
classification full
classification full
mnist= fetch_openml('mnist_784')
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\datasets\
_openml.py:1002: FutureWarning: The default value of `parser` will
change from `'liac-arff'` to `'auto'` in 1.4. You can set
`parser='auto'` to silence this warning. Therefore, an `ImportError`
will be raised from 1.4 if the dataset is dense and pandas is not
installed. Note that the pandas parser may return different data
types. See the Notes Section in fetch_openml's API doc for details.
warn(
mnist
x,y=mnist['data'],mnist['target']
x
pixel1 pixel2 pixel3 pixel4 pixel5 pixel6 pixel7 pixel8
pixel9 \
0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0
1 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0
2 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0
3 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0
4 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0
... ... ... ... ... ... ... ... ...
...
69995 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0
69996 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0
69997 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0
69998 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0
69999 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0
0 5
1 0
2 4
3 1
4 9
..
69995 2
69996 3
69997 4
69998 5
69999 6
Name: class, Length: 70000, dtype: category
Categories (10, object): ['0', '1', '2', '3', ..., '6', '7', '8', '9']
y[36001]
'2'
y[1000]
'0'
x_train,x_test,y_train,y_test=x[:60000],x[60000:],y[:60000],y[60000:]
x_train
y_test
60000 7
60001 2
60002 1
60003 0
60004 4
..
69995 2
69996 3
69997 4
69998 5
69999 6
Name: class, Length: 10000, dtype: category
Categories (10, object): ['0', '1', '2', '3', ..., '6', '7', '8', '9']
y_train
0 5
1 0
2 4
3 1
4 9
..
59995 8
59996 3
59997 5
59998 6
59999 8
Name: class, Length: 60000, dtype: category
Categories (10, object): ['0', '1', '2', '3', ..., '6', '7', '8', '9']
x_test
CREATING A 2 DETECTOR
import numpy as np
y_train=y_train.astype(np.int8)
y_test=y_test.astype(np.int8)
y_train2=(y_train==2)
y_test2=(y_test==2)
y_train
0 5
1 0
2 4
3 1
4 9
..
59995 8
59996 3
59997 5
59998 6
59999 8
Name: class, Length: 60000, dtype: int8
performance Measure
from sklearn.linear_model import LogisticRegression
LGR=LogisticRegression(tol=0.1,solver='lbfgs')
LGR.fit(x_train,y_train2)
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
LogisticRegression(tol=0.1)
pred2=LGR.predict(x_test)
pred2
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
cm2
array([[ 976, 1132, 145, 997, 978, 892, 951, 1011, 969, 1009],
[ 4, 3, 887, 13, 4, 0, 7, 17, 5, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=int64)
a2
0.0979
Performance Measure
from sklearn.model_selection import cross_val_predict
y_train_predict = cross_val_predict(LGR,x_train,y_train2,cv=3)
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
y_train_predict
y_train_predict.mean()
0.09326666666666666
Confusion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix(y_train2,y_train_predict)
array([[53566, 476],
[ 838, 5120]], dtype=int64)
y_train_perfect_predictions=y_train2
confusion_matrix(y_train2,y_train_perfect_predictions)
array([[54042, 0],
[ 0, 5958]], dtype=int64)
0.9149392423159399
recall_score(y_train2,y_train_predict)
0.8593487747566297
f1_score(y_train2,y_train_predict)
0.8862731521550978
y_scores=cross_val_predict(LGR,x_train,y_train2,cv=3,method='decision_
function')
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
y_scores
precisions,recalls,thresholds=precision_recall_curve(y_train2,y_scores
)
precisions
recalls
thresholds
plt.plot(thresholds,precisions[:-1],"b--",label="precisions")
plt.plot(thresholds,recalls[:-1],"g-",label="precisions")
plt.xlabel("thresholds")
plt.legend(loc="upper left")
plt.ylim([0,1])
plt.show()
data visualization
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
def plot_digit(data):
image = data.reshape(28, 28)
plt.imshow(image, cmap = mpl.cm.binary,
interpolation="nearest")
plt.axis("off")
# EXTRA
def plot_digits(instances, images_per_row=10, **options):
size = 28
images_per_row = min(len(instances), images_per_row)
# This is equivalent to n_rows = ceil(len(instances) /
images_per_row):
n_rows = (len(instances) - 1) // images_per_row + 1
images_per_row * size)
# Now that we have a big image, we just need to show it:
plt.imshow(big_image, cmap = mpl.cm.binary, **options)
plt.axis("off")
plt.figure(figsize=(9,9))
example_images = x[:100]
plot_digits(example_images, images_per_row=10)
plt.show()
mutliclass classification
from sklearn.svm import SVC
svm_clf=SVC(gamma="auto",random_state=42)
svm_clf.fit(x_train[:1000],y_train[:1000])
svm_clf.predict([y_test2[:784]])
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\base.py:464:
UserWarning: X does not have valid feature names, but SVC was fitted
with feature names
warnings.warn(
array([7], dtype=int8)
some_digit_score=svm_clf.decision_function([y_test2[:784]])
some_digit_score
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\base.py:464:
UserWarning: X does not have valid feature names, but SVC was fitted
with feature names
warnings.warn(
np.argmax(some_digit_score)
svm_clf.classes_
svm_clf.classes_[5]
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\base.py:464:
UserWarning: X does not have valid feature names, but SVC was fitted
with feature names
warnings.warn(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\base.py:464:
UserWarning: X does not have valid feature names, but SVC was fitted
with feature names
warnings.warn(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\base.py:464:
UserWarning: X does not have valid feature names, but SVC was fitted
with feature names
warnings.warn(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\base.py:464:
UserWarning: X does not have valid feature names, but SVC was fitted
with feature names
warnings.warn(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\base.py:464:
UserWarning: X does not have valid feature names, but SVC was fitted
with feature names
warnings.warn(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\base.py:464:
UserWarning: X does not have valid feature names, but SVC was fitted
with feature names
warnings.warn(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\base.py:464:
UserWarning: X does not have valid feature names, but SVC was fitted
with feature names
warnings.warn(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\base.py:464:
UserWarning: X does not have valid feature names, but SVC was fitted
with feature names
warnings.warn(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\base.py:464:
UserWarning: X does not have valid feature names, but SVC was fitted
with feature names
warnings.warn(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\base.py:464:
UserWarning: X does not have valid feature names, but SVC was fitted
with feature names
warnings.warn(
array([7], dtype=int8)
len(ovr_clf.estimators_)
10
LGR.fit(x_train, y_train)
LGR.predict([y_test2[:784]])
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\base.py:464:
UserWarning: X does not have valid feature names, but
LogisticRegression was fitted with feature names
warnings.warn(
array([5], dtype=int8)
LGR.decision_function([y_test2[:784]])
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\base.py:464:
UserWarning: X does not have valid feature names, but
LogisticRegression was fitted with feature names
warnings.warn(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
Error Analysis
y_train_predict = cross_val_predict(LGR, x_train_scaled, y_train,
cv=3)
conf_mx = confusion_matrix(y_train, y_train_predict)
conf_mx
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\linear_model\
_logistic.py:460: ConvergenceWarning: lbfgs failed to converge
(status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
https://scikit-learn.org/stable/modules/linear_model.html#logistic-
regression
n_iter_i = _check_optimize_result(
def plot_confusion_matrix(matrix):
"""If you prefer color and a colorbar"""
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
cax = ax.matshow(matrix)
fig.colorbar(cax)
plt.matshow(conf_mx, cmap=plt.cm.gray)
plt.show()
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
plt.show()
cl_a, cl_b = 2, 8
X_aa = x_train[(y_train == cl_a) & (y_train_predict == cl_a)]
X_ab = x_train[(y_train == cl_a) & (y_train_predict == cl_b)]
X_ba = x_train[(y_train == cl_b) & (y_train_predict == cl_a)]
X_bb = x_train[(y_train == cl_b) & (y_train_predict == cl_b)]
plt.figure(figsize=(8,8))
plt.subplot(221); plot_digits(X_aa[:25], images_per_row=5)
plt.subplot(222); plot_digits(X_ab[:25], images_per_row=5)
plt.subplot(223); plot_digits(X_ba[:25], images_per_row=5)
plt.subplot(224); plot_digits(X_bb[:25], images_per_row=5)
plt.show()
Multilabel Classification
from sklearn.neighbors import KNeighborsClassifier
knn_clf = KNeighborsClassifier()
knn_clf.fit(x_train, y_multilabel)
KNeighborsClassifier()
knn_clf.predict([y_test2[:784]])
C:\Users\sindu\anaconda3\Lib\site-packages\sklearn\base.py:464:
UserWarning: X does not have valid feature names, but
KNeighborsClassifier was fitted with feature names
warnings.warn(
array([[False, True]])
----------------------------------------------------------------------
-----
AttributeError Traceback (most recent call
last)
Cell In[66], line 1
----> 1 y_train_knn_pred = cross_val_predict(knn_clf, x_train,
y_multilabel, cv=3)
2 f1_score(y_multilabel, y_train_knn_pred, average="macro")
File ~\anaconda3\Lib\site-packages\sklearn\model_selection\
_validation.py:1036, in cross_val_predict(estimator, X, y, groups, cv,
n_jobs, verbose, fit_params, pre_dispatch, method)
1033 # We clone the estimator to make sure that all the folds are
1034 # independent, and that it is pickle-able.
1035 parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
-> 1036 predictions = parallel(
1037 delayed(_fit_and_predict)(
1038 clone(estimator), X, y, train, test, verbose,
fit_params, method
1039 )
1040 for train, test in splits
1041 )
1043 inv_test_indices = np.empty(len(test_indices), dtype=int)
1044 inv_test_indices[test_indices] = np.arange(len(test_indices))
File ~\anaconda3\Lib\site-packages\sklearn\utils\parallel.py:65, in
Parallel.__call__(self, iterable)
60 config = get_config()
61 iterable_with_config = (
62 (_with_config(delayed_func, config), args, kwargs)
63 for delayed_func, args, kwargs in iterable
64 )
---> 65 return super().__call__(iterable_with_config)
File ~\anaconda3\Lib\site-packages\joblib\parallel.py:1085, in
Parallel.__call__(self, iterable)
1076 try:
1077 # Only set self._iterating to True if at least a batch
1078 # was dispatched. In particular this covers the edge
(...)
1082 # was very quick and its callback already dispatched all
the
1083 # remaining jobs.
1084 self._iterating = False
-> 1085 if self.dispatch_one_batch(iterator):
1086 self._iterating = self._original_iterator is not None
1088 while self.dispatch_one_batch(iterator):
File ~\anaconda3\Lib\site-packages\joblib\parallel.py:901, in
Parallel.dispatch_one_batch(self, iterator)
899 return False
900 else:
--> 901 self._dispatch(tasks)
902 return True
File ~\anaconda3\Lib\site-packages\joblib\parallel.py:819, in
Parallel._dispatch(self, batch)
817 with self._lock:
818 job_idx = len(self._jobs)
--> 819 job = self._backend.apply_async(batch, callback=cb)
820 # A job can complete so quickly than its callback is
821 # called before we get here, causing self._jobs to
822 # grow. To ensure correct results ordering, .insert is
823 # used (rather than .append) in the following line
824 self._jobs.insert(job_idx, job)
File ~\anaconda3\Lib\site-packages\joblib\_parallel_backends.py:208,
in SequentialBackend.apply_async(self, func, callback)
206 def apply_async(self, func, callback=None):
207 """Schedule a func to be run"""
--> 208 result = ImmediateResult(func)
209 if callback:
210 callback(result)
File ~\anaconda3\Lib\site-packages\joblib\_parallel_backends.py:597,
in ImmediateResult.__init__(self, batch)
594 def __init__(self, batch):
595 # Don't delay the application, to avoid keeping the input
596 # arguments in memory
--> 597 self.results = batch()
File ~\anaconda3\Lib\site-packages\joblib\parallel.py:288, in
BatchedCalls.__call__(self)
284 def __call__(self):
285 # Set the default nested backend to self._backend but do
not set the
286 # change the default number of processes to -1
287 with parallel_backend(self._backend, n_jobs=self._n_jobs):
--> 288 return [func(*args, **kwargs)
289 for func, args, kwargs in self.items]
File ~\anaconda3\Lib\site-packages\joblib\parallel.py:288, in
<listcomp>(.0)
284 def __call__(self):
285 # Set the default nested backend to self._backend but do
not set the
286 # change the default number of processes to -1
287 with parallel_backend(self._backend, n_jobs=self._n_jobs):
--> 288 return [func(*args, **kwargs)
289 for func, args, kwargs in self.items]
File ~\anaconda3\Lib\site-packages\sklearn\utils\parallel.py:127, in
_FuncWrapper.__call__(self, *args, **kwargs)
125 config = {}
126 with config_context(**config):
--> 127 return self.function(*args, **kwargs)
File ~\anaconda3\Lib\site-packages\sklearn\model_selection\
_validation.py:1120, in _fit_and_predict(estimator, X, y, train, test,
verbose, fit_params, method)
1118 estimator.fit(X_train, y_train, **fit_params)
1119 func = getattr(estimator, method)
-> 1120 predictions = func(X_test)
1122 encode = (
1123 method in ["decision_function", "predict_proba",
"predict_log_proba"]
1124 and y is not None
1125 )
1127 if encode:
File ~\anaconda3\Lib\site-packages\sklearn\neighbors\
_classification.py:246, in KNeighborsClassifier.predict(self, X)
244 check_is_fitted(self, "_fit_method")
245 if self.weights == "uniform":
--> 246 if self._fit_method == "brute" and
ArgKminClassMode.is_usable_for(
247 X, self._fit_X, self.metric
248 ):
249 probabilities = self.predict_proba(X)
250 if self.outputs_2d_:
File ~\anaconda3\Lib\site-packages\sklearn\metrics\
_pairwise_distances_reduction\_dispatcher.py:471, in
ArgKminClassMode.is_usable_for(cls, X, Y, metric)
448 @classmethod
449 def is_usable_for(cls, X, Y, metric) -> bool:
450 """Return True if the dispatcher can be used for the given
parameters.
451
452 Parameters
(...)
468 True if the PairwiseDistancesReduction can be used, else
False.
469 """
470 return (
--> 471 ArgKmin.is_usable_for(X, Y, metric)
472 # TODO: Support CSR matrices.
473 and not issparse(X)
474 and not issparse(Y)
475 # TODO: implement Euclidean specialization with GEMM.
476 and metric not in ("euclidean", "sqeuclidean")
477 )
File ~\anaconda3\Lib\site-packages\sklearn\metrics\
_pairwise_distances_reduction\_dispatcher.py:115, in
BaseDistancesReductionDispatcher.is_usable_for(cls, X, Y, metric)
101 def is_valid_sparse_matrix(X):
102 return (
103 isspmatrix_csr(X)
104 and
(...)
110 X.indices.dtype == X.indptr.dtype == np.int32
111 )
113 is_usable = (
114 get_config().get("enable_cython_pairwise_dist", True)
--> 115 and (is_numpy_c_ordered(X) or is_valid_sparse_matrix(X))
116 and (is_numpy_c_ordered(Y) or is_valid_sparse_matrix(Y))
117 and X.dtype == Y.dtype
118 and X.dtype in (np.float32, np.float64)
119 and metric in cls.valid_metrics()
120 )
122 return is_usable
File ~\anaconda3\Lib\site-packages\sklearn\metrics\
_pairwise_distances_reduction\_dispatcher.py:99, in
BaseDistancesReductionDispatcher.is_usable_for.<locals>.is_numpy_c_ord
ered(X)
98 def is_numpy_c_ordered(X):
---> 99 return hasattr(X, "flags") and X.flags.c_contiguous
some_index = 5
plt.subplot(121); plot_digit(x_test_mod.iloc[some_index].values)
plt.subplot(122); plot_digit(y_test_mod.iloc[some_index].values)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
plt.subplot(122)
plot_digit(y_test_mod.iloc[some_index].values)
plt.title('Original Test Data')
plt.show()
C:\Users\sindu\AppData\Local\Temp\ipykernel_23300\1455393970.py:31:
MatplotlibDeprecationWarning: Auto-removal of overlapping axes is
deprecated since 3.6 and will be removed two minor releases later;
explicitly call ax.remove() as needed.
plt.subplot(122)