before
stringlengths 87
36.6k
| after
stringlengths 116
37.2k
|
|---|---|
def _score(self, method_caller, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
method_caller : callable
Returns predictions given an estimator, method name, and other
arguments, potentially caching results.
clf : object
Trained classifier to use for scoring. Must have a `predict_proba`
method; the output of that is used to compute the score.
X : {array-like, sparse matrix}
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, default=None
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
y_pred = method_caller(clf, 'predict_proba', X)
if y_type == 'binary' and y_pred.shape[1] <= 2:
if y_pred.shape[1] == 2:
pos_label = self._kwargs.get('pos_label', clf.classes_[1])
self._check_pos_label(pos_label, clf.classes_)
col_idx = np.flatnonzero(clf.classes_ == pos_label)[0]
y_pred = y_pred[:, col_idx]
err_msg = f'Got predict_proba of shape {y_pred.shape}, but need classifier with two classes for {self._score_func.__name__} scoring'
raise ValueError(err_msg)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred, sample_weight=sample_weight, **self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
|
def _score(self, method_caller, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
method_caller : callable
Returns predictions given an estimator, method name, and other
arguments, potentially caching results.
clf : object
Trained classifier to use for scoring. Must have a `predict_proba`
method; the output of that is used to compute the score.
X : {array-like, sparse matrix}
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, default=None
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
y_pred = method_caller(clf, 'predict_proba', X)
if y_type == 'binary' and y_pred.shape[1] <= 2:
<DeepExtract>
if y_pred.shape[1] == 2:
pos_label = self._kwargs.get('pos_label', clf.classes_[1])
self._check_pos_label(pos_label, clf.classes_)
col_idx = np.flatnonzero(clf.classes_ == pos_label)[0]
y_pred = y_pred[:, col_idx]
err_msg = f'Got predict_proba of shape {y_pred.shape}, but need classifier with two classes for {self._score_func.__name__} scoring'
raise ValueError(err_msg)
</DeepExtract>
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred, sample_weight=sample_weight, **self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
|
@validate_params({'y_true': ['array-like'], 'y_score': ['array-like'], 'k': [Interval(Integral, 1, None, closed='left'), None], 'sample_weight': ['array-like', None], 'ignore_ties': ['boolean']})
def ndcg_score(y_true, y_score, *, k=None, sample_weight=None, ignore_ties=False):
"""Compute Normalized Discounted Cumulative Gain.
Sum the true scores ranked in the order induced by the predicted scores,
after applying a logarithmic discount. Then divide by the best possible
score (Ideal DCG, obtained for a perfect ranking) to obtain a score between
0 and 1.
This ranking metric returns a high value if true labels are ranked high by
``y_score``.
Parameters
----------
y_true : array-like of shape (n_samples, n_labels)
True targets of multilabel classification, or true scores of entities
to be ranked. Negative values in `y_true` may result in an output
that is not between 0 and 1.
.. versionchanged:: 1.2
These negative values are deprecated, and will raise an error in v1.4.
y_score : array-like of shape (n_samples, n_labels)
Target scores, can either be probability estimates, confidence values,
or non-thresholded measure of decisions (as returned by
"decision_function" on some classifiers).
k : int, default=None
Only consider the highest k scores in the ranking. If `None`, use all
outputs.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If `None`, all samples are given the same weight.
ignore_ties : bool, default=False
Assume that there are no ties in y_score (which is likely to be the
case if y_score is continuous) for efficiency gains.
Returns
-------
normalized_discounted_cumulative_gain : float in [0., 1.]
The averaged NDCG scores for all samples.
See Also
--------
dcg_score : Discounted Cumulative Gain (not normalized).
References
----------
`Wikipedia entry for Discounted Cumulative Gain
<https://en.wikipedia.org/wiki/Discounted_cumulative_gain>`_
Jarvelin, K., & Kekalainen, J. (2002).
Cumulated gain-based evaluation of IR techniques. ACM Transactions on
Information Systems (TOIS), 20(4), 422-446.
Wang, Y., Wang, L., Li, Y., He, D., Chen, W., & Liu, T. Y. (2013, May).
A theoretical analysis of NDCG ranking measures. In Proceedings of the 26th
Annual Conference on Learning Theory (COLT 2013)
McSherry, F., & Najork, M. (2008, March). Computing information retrieval
performance measures efficiently in the presence of tied scores. In
European conference on information retrieval (pp. 414-421). Springer,
Berlin, Heidelberg.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import ndcg_score
>>> # we have groud-truth relevance of some answers to a query:
>>> true_relevance = np.asarray([[10, 0, 0, 1, 5]])
>>> # we predict some scores (relevance) for the answers
>>> scores = np.asarray([[.1, .2, .3, 4, 70]])
>>> ndcg_score(true_relevance, scores)
0.69...
>>> scores = np.asarray([[.05, 1.1, 1., .5, .0]])
>>> ndcg_score(true_relevance, scores)
0.49...
>>> # we can set k to truncate the sum; only top k answers contribute.
>>> ndcg_score(true_relevance, scores, k=4)
0.35...
>>> # the normalization takes k into account so a perfect answer
>>> # would still get 1.0
>>> ndcg_score(true_relevance, true_relevance, k=4)
1.0...
>>> # now we have some ties in our prediction
>>> scores = np.asarray([[1, 0, 0, 0, 1]])
>>> # by default ties are averaged, so here we get the average (normalized)
>>> # true relevance of our top predictions: (10 / 10 + 5 / 10) / 2 = .75
>>> ndcg_score(true_relevance, scores, k=1)
0.75...
>>> # we can choose to ignore ties for faster results, but only
>>> # if we know there aren't ties in our scores, otherwise we get
>>> # wrong results:
>>> ndcg_score(true_relevance,
... scores, k=1, ignore_ties=True)
0.5...
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
if y_true.min() < 0:
warnings.warn('ndcg_score should not be used on negative y_true values. ndcg_score will raise a ValueError on negative y_true values starting from version 1.4.', FutureWarning)
if y_true.ndim > 1 and y_true.shape[1] <= 1:
raise ValueError(f'Computing NDCG is only meaningful when there is more than 1 document. Got {y_true.shape[1]} instead.')
y_type = type_of_target(y_true, input_name='y_true')
supported_fmt = ('multilabel-indicator', 'continuous-multioutput', 'multiclass-multioutput')
if y_type not in supported_fmt:
raise ValueError('Only {} formats are supported. Got {} instead'.format(supported_fmt, y_type))
gain = _dcg_sample_scores(y_true, y_score, k, ignore_ties=ignore_ties)
normalizing_gain = _dcg_sample_scores(y_true, y_true, k, ignore_ties=True)
all_irrelevant = normalizing_gain == 0
gain[all_irrelevant] = 0
gain[~all_irrelevant] /= normalizing_gain[~all_irrelevant]
gain = gain
return np.average(gain, weights=sample_weight)
|
@validate_params({'y_true': ['array-like'], 'y_score': ['array-like'], 'k': [Interval(Integral, 1, None, closed='left'), None], 'sample_weight': ['array-like', None], 'ignore_ties': ['boolean']})
def ndcg_score(y_true, y_score, *, k=None, sample_weight=None, ignore_ties=False):
"""Compute Normalized Discounted Cumulative Gain.
Sum the true scores ranked in the order induced by the predicted scores,
after applying a logarithmic discount. Then divide by the best possible
score (Ideal DCG, obtained for a perfect ranking) to obtain a score between
0 and 1.
This ranking metric returns a high value if true labels are ranked high by
``y_score``.
Parameters
----------
y_true : array-like of shape (n_samples, n_labels)
True targets of multilabel classification, or true scores of entities
to be ranked. Negative values in `y_true` may result in an output
that is not between 0 and 1.
.. versionchanged:: 1.2
These negative values are deprecated, and will raise an error in v1.4.
y_score : array-like of shape (n_samples, n_labels)
Target scores, can either be probability estimates, confidence values,
or non-thresholded measure of decisions (as returned by
"decision_function" on some classifiers).
k : int, default=None
Only consider the highest k scores in the ranking. If `None`, use all
outputs.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If `None`, all samples are given the same weight.
ignore_ties : bool, default=False
Assume that there are no ties in y_score (which is likely to be the
case if y_score is continuous) for efficiency gains.
Returns
-------
normalized_discounted_cumulative_gain : float in [0., 1.]
The averaged NDCG scores for all samples.
See Also
--------
dcg_score : Discounted Cumulative Gain (not normalized).
References
----------
`Wikipedia entry for Discounted Cumulative Gain
<https://en.wikipedia.org/wiki/Discounted_cumulative_gain>`_
Jarvelin, K., & Kekalainen, J. (2002).
Cumulated gain-based evaluation of IR techniques. ACM Transactions on
Information Systems (TOIS), 20(4), 422-446.
Wang, Y., Wang, L., Li, Y., He, D., Chen, W., & Liu, T. Y. (2013, May).
A theoretical analysis of NDCG ranking measures. In Proceedings of the 26th
Annual Conference on Learning Theory (COLT 2013)
McSherry, F., & Najork, M. (2008, March). Computing information retrieval
performance measures efficiently in the presence of tied scores. In
European conference on information retrieval (pp. 414-421). Springer,
Berlin, Heidelberg.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import ndcg_score
>>> # we have groud-truth relevance of some answers to a query:
>>> true_relevance = np.asarray([[10, 0, 0, 1, 5]])
>>> # we predict some scores (relevance) for the answers
>>> scores = np.asarray([[.1, .2, .3, 4, 70]])
>>> ndcg_score(true_relevance, scores)
0.69...
>>> scores = np.asarray([[.05, 1.1, 1., .5, .0]])
>>> ndcg_score(true_relevance, scores)
0.49...
>>> # we can set k to truncate the sum; only top k answers contribute.
>>> ndcg_score(true_relevance, scores, k=4)
0.35...
>>> # the normalization takes k into account so a perfect answer
>>> # would still get 1.0
>>> ndcg_score(true_relevance, true_relevance, k=4)
1.0...
>>> # now we have some ties in our prediction
>>> scores = np.asarray([[1, 0, 0, 0, 1]])
>>> # by default ties are averaged, so here we get the average (normalized)
>>> # true relevance of our top predictions: (10 / 10 + 5 / 10) / 2 = .75
>>> ndcg_score(true_relevance, scores, k=1)
0.75...
>>> # we can choose to ignore ties for faster results, but only
>>> # if we know there aren't ties in our scores, otherwise we get
>>> # wrong results:
>>> ndcg_score(true_relevance,
... scores, k=1, ignore_ties=True)
0.5...
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
if y_true.min() < 0:
warnings.warn('ndcg_score should not be used on negative y_true values. ndcg_score will raise a ValueError on negative y_true values starting from version 1.4.', FutureWarning)
if y_true.ndim > 1 and y_true.shape[1] <= 1:
raise ValueError(f'Computing NDCG is only meaningful when there is more than 1 document. Got {y_true.shape[1]} instead.')
<DeepExtract>
y_type = type_of_target(y_true, input_name='y_true')
supported_fmt = ('multilabel-indicator', 'continuous-multioutput', 'multiclass-multioutput')
if y_type not in supported_fmt:
raise ValueError('Only {} formats are supported. Got {} instead'.format(supported_fmt, y_type))
</DeepExtract>
<DeepExtract>
gain = _dcg_sample_scores(y_true, y_score, k, ignore_ties=ignore_ties)
normalizing_gain = _dcg_sample_scores(y_true, y_true, k, ignore_ties=True)
all_irrelevant = normalizing_gain == 0
gain[all_irrelevant] = 0
gain[~all_irrelevant] /= normalizing_gain[~all_irrelevant]
gain = gain
</DeepExtract>
return np.average(gain, weights=sample_weight)
|
def write_conda_environment(build_metadata):
template = environment.from_string("\n# DO NOT EDIT: this file is generated from the specification found in the\n# following script to centralize the configuration for CI builds:\n# build_tools/update_environments_and_lock_files.py\nchannels:\n - {{ build_metadata['channel'] }}\ndependencies:\n {% for conda_dep in build_metadata['conda_dependencies'] %}\n - {{ conda_dep | get_package_with_constraint(build_metadata) }}\n {% endfor %}\n {% if build_metadata['pip_dependencies'] %}\n - pip\n - pip:\n {% for pip_dep in build_metadata.get('pip_dependencies', []) %}\n - {{ pip_dep | get_package_with_constraint(build_metadata, uses_pip=True) }}\n {% endfor %}\n {% endif %}".strip())
content = template.render(build_metadata=build_metadata)
build_name = build_metadata['build_name']
folder_path = Path(build_metadata['folder'])
output_path = folder_path / f'{build_name}_environment.yml'
output_path.write_text(content)
|
def write_conda_environment(build_metadata):
<DeepExtract>
template = environment.from_string("\n# DO NOT EDIT: this file is generated from the specification found in the\n# following script to centralize the configuration for CI builds:\n# build_tools/update_environments_and_lock_files.py\nchannels:\n - {{ build_metadata['channel'] }}\ndependencies:\n {% for conda_dep in build_metadata['conda_dependencies'] %}\n - {{ conda_dep | get_package_with_constraint(build_metadata) }}\n {% endfor %}\n {% if build_metadata['pip_dependencies'] %}\n - pip\n - pip:\n {% for pip_dep in build_metadata.get('pip_dependencies', []) %}\n - {{ pip_dep | get_package_with_constraint(build_metadata, uses_pip=True) }}\n {% endfor %}\n {% endif %}".strip())
content = template.render(build_metadata=build_metadata)
</DeepExtract>
build_name = build_metadata['build_name']
folder_path = Path(build_metadata['folder'])
output_path = folder_path / f'{build_name}_environment.yml'
output_path.write_text(content)
|
def fit(self, X, y=None, init=None):
"""
Compute the position of the points in the embedding space.
Parameters
----------
X : array-like of shape (n_samples, n_features) or (n_samples, n_samples)
Input data. If ``dissimilarity=='precomputed'``, the input should
be the dissimilarity matrix.
y : Ignored
Not used, present for API consistency by convention.
init : ndarray of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the SMACOF
algorithm. By default, the algorithm is initialized with a randomly
chosen array.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
X = self._validate_data(X)
if X.shape[0] == X.shape[1] and self.dissimilarity != 'precomputed':
warnings.warn("The MDS API has changed. ``fit`` now constructs an dissimilarity matrix from data. To use a custom dissimilarity matrix, set ``dissimilarity='precomputed'``.")
if self.dissimilarity == 'precomputed':
self.dissimilarity_matrix_ = X
elif self.dissimilarity == 'euclidean':
self.dissimilarity_matrix_ = euclidean_distances(X)
(self.embedding_, self.stress_, self.n_iter_) = smacof(self.dissimilarity_matrix_, metric=self.metric, n_components=self.n_components, init=init, n_init=self.n_init, n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose, eps=self.eps, random_state=self.random_state, return_n_iter=True, normalized_stress=self.normalized_stress)
return self.embedding_
return self
|
def fit(self, X, y=None, init=None):
"""
Compute the position of the points in the embedding space.
Parameters
----------
X : array-like of shape (n_samples, n_features) or (n_samples, n_samples)
Input data. If ``dissimilarity=='precomputed'``, the input should
be the dissimilarity matrix.
y : Ignored
Not used, present for API consistency by convention.
init : ndarray of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the SMACOF
algorithm. By default, the algorithm is initialized with a randomly
chosen array.
Returns
-------
self : object
Fitted estimator.
"""
<DeepExtract>
self._validate_params()
X = self._validate_data(X)
if X.shape[0] == X.shape[1] and self.dissimilarity != 'precomputed':
warnings.warn("The MDS API has changed. ``fit`` now constructs an dissimilarity matrix from data. To use a custom dissimilarity matrix, set ``dissimilarity='precomputed'``.")
if self.dissimilarity == 'precomputed':
self.dissimilarity_matrix_ = X
elif self.dissimilarity == 'euclidean':
self.dissimilarity_matrix_ = euclidean_distances(X)
(self.embedding_, self.stress_, self.n_iter_) = smacof(self.dissimilarity_matrix_, metric=self.metric, n_components=self.n_components, init=init, n_init=self.n_init, n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose, eps=self.eps, random_state=self.random_state, return_n_iter=True, normalized_stress=self.normalized_stress)
return self.embedding_
</DeepExtract>
return self
|
def _fit_transform(X, y=None, W=None, H=None, update_H=True):
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, 'NMF (input X)')
(n_samples, n_features) = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
if not isinstance(n_components, numbers.Integral) or n_components <= 0:
raise ValueError('Number of components must be a positive integer; got (n_components=%r)' % n_components)
if not isinstance(self.max_iter, numbers.Integral) or self.max_iter < 0:
raise ValueError('Maximum number of iterations must be a positive integer; got (max_iter=%r)' % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError('Tolerance for stopping criteria must be positive; got (tol=%r)' % self.tol)
if self.init == 'custom' and update_H:
_check_init(H, (n_components, n_features), 'NMF (input H)')
_check_init(W, (n_samples, n_components), 'NMF (input W)')
elif not update_H:
_check_init(H, (n_components, n_features), 'NMF (input H)')
W = np.zeros((n_samples, n_components))
else:
(W, H) = _initialize_nmf(X, n_components, init=self.init, random_state=self.random_state)
if update_H:
gradW = np.dot(W, np.dot(H, H.T)) - safe_sparse_dot(X, H.T, dense_output=True)
gradH = np.dot(np.dot(W.T, W), H) - safe_sparse_dot(W.T, X, dense_output=True)
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
tolW = max(0.001, self.tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, self.max_iter + 1):
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < self.tol ** 2:
break
(Wt, gradWt, iterW) = _nls_subproblem(X.T, H.T, W.T, tolW, self.nls_max_iter, alpha=self.alpha, l1_ratio=self.l1_ratio)
(W, gradW) = (Wt.T, gradWt.T)
if iterW == 1:
tolW = 0.1 * tolW
(H, gradH, iterH) = _nls_subproblem(X, W, H, tolH, self.nls_max_iter, alpha=self.alpha, l1_ratio=self.l1_ratio)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0
if n_iter == self.max_iter:
(Wt, _, _) = _nls_subproblem(X.T, H.T, W.T, tolW, self.nls_max_iter, alpha=self.alpha, l1_ratio=self.l1_ratio)
W = Wt.T
(W, H, n_iter) = (W, H, n_iter)
else:
WtX = safe_sparse_dot(H.T.T, X.T)
WtW = np.dot(H.T.T, H.T)
gamma = 1
for n_iter in range(1, self.nls_max_iter + 1):
grad = np.dot(WtW, W.T) - WtX
if self.alpha > 0 and self.l1_ratio == 1.0:
grad += self.alpha
elif self.alpha > 0:
grad += self.alpha * (self.l1_ratio + (1 - self.l1_ratio) * W.T)
if _norm(grad * np.logical_or(grad < 0, W.T > 0)) < self.tol:
break
Hp = W.T
for inner_iter in range(20):
Hn = W.T - gamma * grad
Hn *= Hn > 0
d = Hn - W.T
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
W.T = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
W.T = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == self.nls_max_iter:
warnings.warn('Iteration limit reached in nls subproblem.', ConvergenceWarning)
(Wt, _, n_iter) = (W.T, grad, n_iter)
W = Wt.T
if n_iter == self.max_iter and self.tol > 0:
warnings.warn('Maximum number of iteration %d reached. Increase it to improve convergence.' % self.max_iter, ConvergenceWarning)
return (W, H, n_iter)
|
def _fit_transform(X, y=None, W=None, H=None, update_H=True):
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, 'NMF (input X)')
(n_samples, n_features) = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
if not isinstance(n_components, numbers.Integral) or n_components <= 0:
raise ValueError('Number of components must be a positive integer; got (n_components=%r)' % n_components)
if not isinstance(self.max_iter, numbers.Integral) or self.max_iter < 0:
raise ValueError('Maximum number of iterations must be a positive integer; got (max_iter=%r)' % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError('Tolerance for stopping criteria must be positive; got (tol=%r)' % self.tol)
if self.init == 'custom' and update_H:
_check_init(H, (n_components, n_features), 'NMF (input H)')
_check_init(W, (n_samples, n_components), 'NMF (input W)')
elif not update_H:
_check_init(H, (n_components, n_features), 'NMF (input H)')
W = np.zeros((n_samples, n_components))
else:
(W, H) = _initialize_nmf(X, n_components, init=self.init, random_state=self.random_state)
if update_H:
<DeepExtract>
gradW = np.dot(W, np.dot(H, H.T)) - safe_sparse_dot(X, H.T, dense_output=True)
gradH = np.dot(np.dot(W.T, W), H) - safe_sparse_dot(W.T, X, dense_output=True)
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
tolW = max(0.001, self.tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, self.max_iter + 1):
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < self.tol ** 2:
break
(Wt, gradWt, iterW) = _nls_subproblem(X.T, H.T, W.T, tolW, self.nls_max_iter, alpha=self.alpha, l1_ratio=self.l1_ratio)
(W, gradW) = (Wt.T, gradWt.T)
if iterW == 1:
tolW = 0.1 * tolW
(H, gradH, iterH) = _nls_subproblem(X, W, H, tolH, self.nls_max_iter, alpha=self.alpha, l1_ratio=self.l1_ratio)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0
if n_iter == self.max_iter:
(Wt, _, _) = _nls_subproblem(X.T, H.T, W.T, tolW, self.nls_max_iter, alpha=self.alpha, l1_ratio=self.l1_ratio)
W = Wt.T
(W, H, n_iter) = (W, H, n_iter)
</DeepExtract>
else:
<DeepExtract>
WtX = safe_sparse_dot(H.T.T, X.T)
WtW = np.dot(H.T.T, H.T)
gamma = 1
for n_iter in range(1, self.nls_max_iter + 1):
grad = np.dot(WtW, W.T) - WtX
if self.alpha > 0 and self.l1_ratio == 1.0:
grad += self.alpha
elif self.alpha > 0:
grad += self.alpha * (self.l1_ratio + (1 - self.l1_ratio) * W.T)
if _norm(grad * np.logical_or(grad < 0, W.T > 0)) < self.tol:
break
Hp = W.T
for inner_iter in range(20):
Hn = W.T - gamma * grad
Hn *= Hn > 0
d = Hn - W.T
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
W.T = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
W.T = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == self.nls_max_iter:
warnings.warn('Iteration limit reached in nls subproblem.', ConvergenceWarning)
(Wt, _, n_iter) = (W.T, grad, n_iter)
</DeepExtract>
W = Wt.T
if n_iter == self.max_iter and self.tol > 0:
warnings.warn('Maximum number of iteration %d reached. Increase it to improve convergence.' % self.max_iter, ConvergenceWarning)
return (W, H, n_iter)
|
def score(self, X, y=None, sample_weight=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse='csr', reset=False, dtype=[np.float64, np.float32], order='C', accept_large_sparse=False)
X = X
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
with threadpool_limits(limits=1, user_api='blas'):
result = _labels_inertia(X, sample_weight, self.cluster_centers_, self._n_threads, return_inertia)
(_, scores) = result
return -scores
|
def score(self, X, y=None, sample_weight=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self)
<DeepExtract>
X = self._validate_data(X, accept_sparse='csr', reset=False, dtype=[np.float64, np.float32], order='C', accept_large_sparse=False)
X = X
</DeepExtract>
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
<DeepExtract>
with threadpool_limits(limits=1, user_api='blas'):
result = _labels_inertia(X, sample_weight, self.cluster_centers_, self._n_threads, return_inertia)
(_, scores) = result
</DeepExtract>
return -scores
|
def inplace_swap_row(X, m, n):
"""
Swap two rows of a CSC/CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two rows are to be swapped. It should be of CSR or
CSC format.
m : int
Index of the row of X to be swapped.
n : int
Index of the row of X to be swapped.
"""
if isinstance(X, sp.csc_matrix):
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError('m and n should be valid integers')
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
m_mask = X.indices == m
X.indices[X.indices == n] = m
X.indices[m_mask] = n
elif isinstance(X, sp.csr_matrix):
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError('m and n should be valid integers')
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
if m > n:
(m, n) = (n, m)
indptr = X.indptr
m_start = indptr[m]
m_stop = indptr[m + 1]
n_start = indptr[n]
n_stop = indptr[n + 1]
nz_m = m_stop - m_start
nz_n = n_stop - n_start
if nz_m != nz_n:
X.indptr[m + 2:n] += nz_n - nz_m
X.indptr[m + 1] = m_start + nz_n
X.indptr[n] = n_stop - nz_m
X.indices = np.concatenate([X.indices[:m_start], X.indices[n_start:n_stop], X.indices[m_stop:n_start], X.indices[m_start:m_stop], X.indices[n_stop:]])
X.data = np.concatenate([X.data[:m_start], X.data[n_start:n_stop], X.data[m_stop:n_start], X.data[m_start:m_stop], X.data[n_stop:]])
else:
input_type = X.format if sp.issparse(X) else type(X)
err = 'Expected a CSR or CSC sparse matrix, got %s.' % input_type
raise TypeError(err)
</DeepExtract>
|
def inplace_swap_row(X, m, n):
"""
Swap two rows of a CSC/CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two rows are to be swapped. It should be of CSR or
CSC format.
m : int
Index of the row of X to be swapped.
n : int
Index of the row of X to be swapped.
"""
if isinstance(X, sp.csc_matrix):
<DeepExtract>
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError('m and n should be valid integers')
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
m_mask = X.indices == m
X.indices[X.indices == n] = m
X.indices[m_mask] = n
</DeepExtract>
elif isinstance(X, sp.csr_matrix):
<DeepExtract>
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError('m and n should be valid integers')
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
if m > n:
(m, n) = (n, m)
indptr = X.indptr
m_start = indptr[m]
m_stop = indptr[m + 1]
n_start = indptr[n]
n_stop = indptr[n + 1]
nz_m = m_stop - m_start
nz_n = n_stop - n_start
if nz_m != nz_n:
X.indptr[m + 2:n] += nz_n - nz_m
X.indptr[m + 1] = m_start + nz_n
X.indptr[n] = n_stop - nz_m
X.indices = np.concatenate([X.indices[:m_start], X.indices[n_start:n_stop], X.indices[m_stop:n_start], X.indices[m_start:m_stop], X.indices[n_stop:]])
X.data = np.concatenate([X.data[:m_start], X.data[n_start:n_stop], X.data[m_stop:n_start], X.data[m_start:m_stop], X.data[n_stop:]])
</DeepExtract>
else:
<DeepExtract>
input_type = X.format if sp.issparse(X) else type(X)
err = 'Expected a CSR or CSC sparse matrix, got %s.' % input_type
raise TypeError(err)
</DeepExtract>
|
@validate_params({'X': ['array-like'], 'connectivity': ['array-like', 'sparse matrix', None], 'n_clusters': [Interval(Integral, 1, None, closed='left'), None], 'return_distance': ['boolean']})
def ward_tree(X, *, connectivity=None, n_clusters=None, return_distance=False):
"""Ward clustering based on a Feature matrix.
Recursively merges the pair of clusters that minimally increases
within-cluster variance.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Feature matrix representing `n_samples` samples to be clustered.
connectivity : {array-like, sparse matrix}, default=None
Connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is None, i.e, the Ward algorithm is unstructured.
n_clusters : int, default=None
`n_clusters` should be less than `n_samples`. Stop early the
construction of the tree at `n_clusters.` This is useful to decrease
computation time if the number of clusters is not small compared to the
number of samples. In this case, the complete tree is not computed, thus
the 'children' output is of limited use, and the 'parents' output should
rather be used. This option is valid only when specifying a connectivity
matrix.
return_distance : bool, default=False
If `True`, return the distance between the clusters.
Returns
-------
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_connected_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree.
parents : ndarray of shape (n_nodes,) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : ndarray of shape (n_nodes-1,)
Only returned if `return_distance` is set to `True` (for compatibility).
The distances between the centers of the nodes. `distances[i]`
corresponds to a weighted Euclidean distance between
the nodes `children[i, 1]` and `children[i, 2]`. If the nodes refer to
leaves of the tree, then `distances[i]` is their unweighted Euclidean
distance. Distances are updated in the following way
(from scipy.hierarchy.linkage):
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
(n_samples, n_features) = X.shape
if connectivity is None:
from scipy.cluster import hierarchy
if n_clusters is not None:
warnings.warn('Partial build of the tree is implemented only for structured clustering (i.e. with explicit connectivity). The algorithm will build the full tree and only retain the lower branches required for the specified number of clusters', stacklevel=2)
X = np.require(X, requirements='W')
out = hierarchy.ward(X)
children_ = out[:, :2].astype(np.intp)
if return_distance:
distances = out[:, 2]
return (children_, 1, n_samples, None, distances)
else:
return (children_, 1, n_samples, None)
n_samples = X.shape[0]
if connectivity.shape[0] != n_samples or connectivity.shape[1] != n_samples:
raise ValueError('Wrong shape for connectivity matrix: %s when X is %s' % (connectivity.shape, X.shape))
connectivity = connectivity + connectivity.T
if not sparse.isspmatrix_lil(connectivity):
if not sparse.isspmatrix(connectivity):
connectivity = sparse.lil_matrix(connectivity)
else:
connectivity = connectivity.tolil()
(n_connected_components, labels) = connected_components(connectivity)
if n_connected_components > 1:
warnings.warn('the number of connected components of the connectivity matrix is %d > 1. Completing it to avoid stopping the tree early.' % n_connected_components, stacklevel=2)
connectivity = _fix_connected_components(X=X, graph=connectivity, n_connected_components=n_connected_components, component_labels=labels, metric='euclidean', mode='connectivity')
(connectivity, n_connected_components) = (connectivity, n_connected_components)
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
if n_clusters > n_samples:
raise ValueError('Cannot provide more clusters than samples. %i n_clusters was asked, and there are %i samples.' % (n_clusters, n_samples))
n_nodes = 2 * n_samples - n_clusters
coord_row = []
coord_col = []
A = []
for (ind, row) in enumerate(connectivity.rows):
A.append(row)
row = [i for i in row if i < ind]
coord_row.extend(len(row) * [ind])
coord_col.extend(row)
coord_row = np.array(coord_row, dtype=np.intp, order='C')
coord_col = np.array(coord_col, dtype=np.intp, order='C')
moments_1 = np.zeros(n_nodes, order='C')
moments_1[:n_samples] = 1
moments_2 = np.zeros((n_nodes, n_features), order='C')
moments_2[:n_samples] = X
inertia = np.empty(len(coord_row), dtype=np.float64, order='C')
_hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, inertia)
inertia = list(zip(inertia, coord_row, coord_col))
heapify(inertia)
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=bool)
children = []
if return_distance:
distances = np.empty(n_nodes - n_samples)
not_visited = np.empty(n_nodes, dtype=bool, order='C')
for k in range(n_samples, n_nodes):
while True:
(inert, i, j) = heappop(inertia)
if used_node[i] and used_node[j]:
break
(parent[i], parent[j]) = (k, k)
children.append((i, j))
used_node[i] = used_node[j] = False
if return_distance:
distances[k - n_samples] = inert
moments_1[k] = moments_1[i] + moments_1[j]
moments_2[k] = moments_2[i] + moments_2[j]
coord_col = []
not_visited.fill(1)
not_visited[k] = 0
_hierarchical._get_parents(A[i], coord_col, parent, not_visited)
_hierarchical._get_parents(A[j], coord_col, parent, not_visited)
[A[col].append(k) for col in coord_col]
A.append(coord_col)
coord_col = np.array(coord_col, dtype=np.intp, order='C')
coord_row = np.empty(coord_col.shape, dtype=np.intp, order='C')
coord_row.fill(k)
n_additions = len(coord_row)
ini = np.empty(n_additions, dtype=np.float64, order='C')
_hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, ini)
[heappush(inertia, (ini[idx], k, coord_col[idx])) for idx in range(n_additions)]
n_leaves = n_samples
children = [c[::-1] for c in children]
children = np.array(children)
if return_distance:
distances = np.sqrt(2.0 * distances)
return (children, n_connected_components, n_leaves, parent, distances)
else:
return (children, n_connected_components, n_leaves, parent)
|
@validate_params({'X': ['array-like'], 'connectivity': ['array-like', 'sparse matrix', None], 'n_clusters': [Interval(Integral, 1, None, closed='left'), None], 'return_distance': ['boolean']})
def ward_tree(X, *, connectivity=None, n_clusters=None, return_distance=False):
"""Ward clustering based on a Feature matrix.
Recursively merges the pair of clusters that minimally increases
within-cluster variance.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Feature matrix representing `n_samples` samples to be clustered.
connectivity : {array-like, sparse matrix}, default=None
Connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is None, i.e, the Ward algorithm is unstructured.
n_clusters : int, default=None
`n_clusters` should be less than `n_samples`. Stop early the
construction of the tree at `n_clusters.` This is useful to decrease
computation time if the number of clusters is not small compared to the
number of samples. In this case, the complete tree is not computed, thus
the 'children' output is of limited use, and the 'parents' output should
rather be used. This option is valid only when specifying a connectivity
matrix.
return_distance : bool, default=False
If `True`, return the distance between the clusters.
Returns
-------
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_connected_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree.
parents : ndarray of shape (n_nodes,) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : ndarray of shape (n_nodes-1,)
Only returned if `return_distance` is set to `True` (for compatibility).
The distances between the centers of the nodes. `distances[i]`
corresponds to a weighted Euclidean distance between
the nodes `children[i, 1]` and `children[i, 2]`. If the nodes refer to
leaves of the tree, then `distances[i]` is their unweighted Euclidean
distance. Distances are updated in the following way
(from scipy.hierarchy.linkage):
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
(n_samples, n_features) = X.shape
if connectivity is None:
from scipy.cluster import hierarchy
if n_clusters is not None:
warnings.warn('Partial build of the tree is implemented only for structured clustering (i.e. with explicit connectivity). The algorithm will build the full tree and only retain the lower branches required for the specified number of clusters', stacklevel=2)
X = np.require(X, requirements='W')
out = hierarchy.ward(X)
children_ = out[:, :2].astype(np.intp)
if return_distance:
distances = out[:, 2]
return (children_, 1, n_samples, None, distances)
else:
return (children_, 1, n_samples, None)
<DeepExtract>
n_samples = X.shape[0]
if connectivity.shape[0] != n_samples or connectivity.shape[1] != n_samples:
raise ValueError('Wrong shape for connectivity matrix: %s when X is %s' % (connectivity.shape, X.shape))
connectivity = connectivity + connectivity.T
if not sparse.isspmatrix_lil(connectivity):
if not sparse.isspmatrix(connectivity):
connectivity = sparse.lil_matrix(connectivity)
else:
connectivity = connectivity.tolil()
(n_connected_components, labels) = connected_components(connectivity)
if n_connected_components > 1:
warnings.warn('the number of connected components of the connectivity matrix is %d > 1. Completing it to avoid stopping the tree early.' % n_connected_components, stacklevel=2)
connectivity = _fix_connected_components(X=X, graph=connectivity, n_connected_components=n_connected_components, component_labels=labels, metric='euclidean', mode='connectivity')
(connectivity, n_connected_components) = (connectivity, n_connected_components)
</DeepExtract>
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
if n_clusters > n_samples:
raise ValueError('Cannot provide more clusters than samples. %i n_clusters was asked, and there are %i samples.' % (n_clusters, n_samples))
n_nodes = 2 * n_samples - n_clusters
coord_row = []
coord_col = []
A = []
for (ind, row) in enumerate(connectivity.rows):
A.append(row)
row = [i for i in row if i < ind]
coord_row.extend(len(row) * [ind])
coord_col.extend(row)
coord_row = np.array(coord_row, dtype=np.intp, order='C')
coord_col = np.array(coord_col, dtype=np.intp, order='C')
moments_1 = np.zeros(n_nodes, order='C')
moments_1[:n_samples] = 1
moments_2 = np.zeros((n_nodes, n_features), order='C')
moments_2[:n_samples] = X
inertia = np.empty(len(coord_row), dtype=np.float64, order='C')
_hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, inertia)
inertia = list(zip(inertia, coord_row, coord_col))
heapify(inertia)
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=bool)
children = []
if return_distance:
distances = np.empty(n_nodes - n_samples)
not_visited = np.empty(n_nodes, dtype=bool, order='C')
for k in range(n_samples, n_nodes):
while True:
(inert, i, j) = heappop(inertia)
if used_node[i] and used_node[j]:
break
(parent[i], parent[j]) = (k, k)
children.append((i, j))
used_node[i] = used_node[j] = False
if return_distance:
distances[k - n_samples] = inert
moments_1[k] = moments_1[i] + moments_1[j]
moments_2[k] = moments_2[i] + moments_2[j]
coord_col = []
not_visited.fill(1)
not_visited[k] = 0
_hierarchical._get_parents(A[i], coord_col, parent, not_visited)
_hierarchical._get_parents(A[j], coord_col, parent, not_visited)
[A[col].append(k) for col in coord_col]
A.append(coord_col)
coord_col = np.array(coord_col, dtype=np.intp, order='C')
coord_row = np.empty(coord_col.shape, dtype=np.intp, order='C')
coord_row.fill(k)
n_additions = len(coord_row)
ini = np.empty(n_additions, dtype=np.float64, order='C')
_hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, ini)
[heappush(inertia, (ini[idx], k, coord_col[idx])) for idx in range(n_additions)]
n_leaves = n_samples
children = [c[::-1] for c in children]
children = np.array(children)
if return_distance:
distances = np.sqrt(2.0 * distances)
return (children, n_connected_components, n_leaves, parent, distances)
else:
return (children, n_connected_components, n_leaves, parent)
|
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=None
If None, defaults to 1.0 / n_features.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
The kernel matrix.
"""
(X, Y, dtype_float) = _return_float_dtype(X, Y)
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
else:
X = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError('Precomputed metric requires shape (n_queries, n_indexed). Got (%d, %d) for %d indexed.' % (X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError('Incompatible dimension for X and Y matrices: X.shape[1] == %d while Y.shape[1] == %d' % (X.shape[1], Y.shape[1]))
(X, Y) = (X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K)
return K
|
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=None
If None, defaults to 1.0 / n_features.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
The kernel matrix.
"""
<DeepExtract>
(X, Y, dtype_float) = _return_float_dtype(X, Y)
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
else:
X = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError('Precomputed metric requires shape (n_queries, n_indexed). Got (%d, %d) for %d indexed.' % (X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError('Incompatible dimension for X and Y matrices: X.shape[1] == %d while Y.shape[1] == %d' % (X.shape[1], Y.shape[1]))
(X, Y) = (X, Y)
</DeepExtract>
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K)
return K
|
def nan_euclidean_distances(X, Y=None, *, squared=False, missing_values=np.nan, copy=True):
"""Calculate the euclidean distances in the presence of missing values.
Compute the euclidean distance between each pair of samples in X and Y,
where Y=X is assumed if Y=None. When calculating the distance between a
pair of samples, this formulation ignores feature coordinates with a
missing value in either sample and scales up the weight of the remaining
coordinates:
dist(x,y) = sqrt(weight * sq. distance from present coordinates)
where,
weight = Total # of coordinates / # of present coordinates
For example, the distance between ``[3, na, na, 6]`` and ``[1, na, 4, 5]``
is:
.. math::
\\sqrt{\\frac{4}{2}((3-1)^2 + (6-5)^2)}
If all the coordinates are missing or if there are no common present
coordinates then NaN is returned for that pair.
Read more in the :ref:`User Guide <metrics>`.
.. versionadded:: 0.22
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
An array where each row is a sample and each column is a feature.
Y : array-like of shape (n_samples_Y, n_features), default=None
An array where each row is a sample and each column is a feature.
If `None`, method uses `Y=X`.
squared : bool, default=False
Return squared Euclidean distances.
missing_values : np.nan or int, default=np.nan
Representation of missing value.
copy : bool, default=True
Make and use a deep copy of X and Y (if Y exists).
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
Returns the distances between the row vectors of `X`
and the row vectors of `Y`.
See Also
--------
paired_distances : Distances between pairs of elements of X and Y.
References
----------
* John K. Dixon, "Pattern Recognition with Partly Missing Data",
IEEE Transactions on Systems, Man, and Cybernetics, Volume: 9, Issue:
10, pp. 617 - 621, Oct. 1979.
http://ieeexplore.ieee.org/abstract/document/4310090/
Examples
--------
>>> from sklearn.metrics.pairwise import nan_euclidean_distances
>>> nan = float("NaN")
>>> X = [[0, 1], [1, nan]]
>>> nan_euclidean_distances(X, X) # distance between rows of X
array([[0. , 1.41421356],
[1.41421356, 0. ]])
>>> # get distance to origin
>>> nan_euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
"""
force_all_finite = 'allow-nan' if is_scalar_nan(missing_values) else True
(X, Y, dtype_float) = _return_float_dtype(X, Y)
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=False, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
else:
X = check_array(X, accept_sparse=False, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
Y = check_array(Y, accept_sparse=False, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError('Precomputed metric requires shape (n_queries, n_indexed). Got (%d, %d) for %d indexed.' % (X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError('Incompatible dimension for X and Y matrices: X.shape[1] == %d while Y.shape[1] == %d' % (X.shape[1], Y.shape[1]))
(X, Y) = (X, Y)
missing_X = _get_mask(X, missing_values)
missing_Y = missing_X if Y is X else _get_mask(Y, missing_values)
X[missing_X] = 0
Y[missing_Y] = 0
(X, Y) = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
X_norm_squared = check_array(X_norm_squared, ensure_2d=False)
original_shape = X_norm_squared.shape
if X_norm_squared.shape == (X.shape[0],):
X_norm_squared = X_norm_squared.reshape(-1, 1)
if X_norm_squared.shape == (1, X.shape[0]):
X_norm_squared = X_norm_squared.T
if X_norm_squared.shape != (X.shape[0], 1):
raise ValueError(f'Incompatible dimensions for X of shape {X.shape} and X_norm_squared of shape {original_shape}.')
if Y_norm_squared is not None:
Y_norm_squared = check_array(Y_norm_squared, ensure_2d=False)
original_shape = Y_norm_squared.shape
if Y_norm_squared.shape == (Y.shape[0],):
Y_norm_squared = Y_norm_squared.reshape(1, -1)
if Y_norm_squared.shape == (Y.shape[0], 1):
Y_norm_squared = Y_norm_squared.T
if Y_norm_squared.shape != (1, Y.shape[0]):
raise ValueError(f'Incompatible dimensions for Y of shape {Y.shape} and Y_norm_squared of shape {original_shape}.')
distances = _euclidean_distances(X, Y, X_norm_squared, Y_norm_squared, True)
XX = X * X
YY = Y * Y
distances -= np.dot(XX, missing_Y.T)
distances -= np.dot(missing_X, YY.T)
np.clip(distances, 0, None, out=distances)
if X is Y:
np.fill_diagonal(distances, 0.0)
present_X = 1 - missing_X
present_Y = present_X if Y is X else ~missing_Y
present_count = np.dot(present_X, present_Y.T)
distances[present_count == 0] = np.nan
np.maximum(1, present_count, out=present_count)
distances /= present_count
distances *= X.shape[1]
if not squared:
np.sqrt(distances, out=distances)
return distances
|
def nan_euclidean_distances(X, Y=None, *, squared=False, missing_values=np.nan, copy=True):
"""Calculate the euclidean distances in the presence of missing values.
Compute the euclidean distance between each pair of samples in X and Y,
where Y=X is assumed if Y=None. When calculating the distance between a
pair of samples, this formulation ignores feature coordinates with a
missing value in either sample and scales up the weight of the remaining
coordinates:
dist(x,y) = sqrt(weight * sq. distance from present coordinates)
where,
weight = Total # of coordinates / # of present coordinates
For example, the distance between ``[3, na, na, 6]`` and ``[1, na, 4, 5]``
is:
.. math::
\\sqrt{\\frac{4}{2}((3-1)^2 + (6-5)^2)}
If all the coordinates are missing or if there are no common present
coordinates then NaN is returned for that pair.
Read more in the :ref:`User Guide <metrics>`.
.. versionadded:: 0.22
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
An array where each row is a sample and each column is a feature.
Y : array-like of shape (n_samples_Y, n_features), default=None
An array where each row is a sample and each column is a feature.
If `None`, method uses `Y=X`.
squared : bool, default=False
Return squared Euclidean distances.
missing_values : np.nan or int, default=np.nan
Representation of missing value.
copy : bool, default=True
Make and use a deep copy of X and Y (if Y exists).
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
Returns the distances between the row vectors of `X`
and the row vectors of `Y`.
See Also
--------
paired_distances : Distances between pairs of elements of X and Y.
References
----------
* John K. Dixon, "Pattern Recognition with Partly Missing Data",
IEEE Transactions on Systems, Man, and Cybernetics, Volume: 9, Issue:
10, pp. 617 - 621, Oct. 1979.
http://ieeexplore.ieee.org/abstract/document/4310090/
Examples
--------
>>> from sklearn.metrics.pairwise import nan_euclidean_distances
>>> nan = float("NaN")
>>> X = [[0, 1], [1, nan]]
>>> nan_euclidean_distances(X, X) # distance between rows of X
array([[0. , 1.41421356],
[1.41421356, 0. ]])
>>> # get distance to origin
>>> nan_euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
"""
force_all_finite = 'allow-nan' if is_scalar_nan(missing_values) else True
<DeepExtract>
(X, Y, dtype_float) = _return_float_dtype(X, Y)
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=False, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
else:
X = check_array(X, accept_sparse=False, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
Y = check_array(Y, accept_sparse=False, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError('Precomputed metric requires shape (n_queries, n_indexed). Got (%d, %d) for %d indexed.' % (X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError('Incompatible dimension for X and Y matrices: X.shape[1] == %d while Y.shape[1] == %d' % (X.shape[1], Y.shape[1]))
(X, Y) = (X, Y)
</DeepExtract>
missing_X = _get_mask(X, missing_values)
missing_Y = missing_X if Y is X else _get_mask(Y, missing_values)
X[missing_X] = 0
Y[missing_Y] = 0
<DeepExtract>
(X, Y) = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
X_norm_squared = check_array(X_norm_squared, ensure_2d=False)
original_shape = X_norm_squared.shape
if X_norm_squared.shape == (X.shape[0],):
X_norm_squared = X_norm_squared.reshape(-1, 1)
if X_norm_squared.shape == (1, X.shape[0]):
X_norm_squared = X_norm_squared.T
if X_norm_squared.shape != (X.shape[0], 1):
raise ValueError(f'Incompatible dimensions for X of shape {X.shape} and X_norm_squared of shape {original_shape}.')
if Y_norm_squared is not None:
Y_norm_squared = check_array(Y_norm_squared, ensure_2d=False)
original_shape = Y_norm_squared.shape
if Y_norm_squared.shape == (Y.shape[0],):
Y_norm_squared = Y_norm_squared.reshape(1, -1)
if Y_norm_squared.shape == (Y.shape[0], 1):
Y_norm_squared = Y_norm_squared.T
if Y_norm_squared.shape != (1, Y.shape[0]):
raise ValueError(f'Incompatible dimensions for Y of shape {Y.shape} and Y_norm_squared of shape {original_shape}.')
distances = _euclidean_distances(X, Y, X_norm_squared, Y_norm_squared, True)
</DeepExtract>
XX = X * X
YY = Y * Y
distances -= np.dot(XX, missing_Y.T)
distances -= np.dot(missing_X, YY.T)
np.clip(distances, 0, None, out=distances)
if X is Y:
np.fill_diagonal(distances, 0.0)
present_X = 1 - missing_X
present_Y = present_X if Y is X else ~missing_Y
present_count = np.dot(present_X, present_Y.T)
distances[present_count == 0] = np.nan
np.maximum(1, present_count, out=present_count)
distances /= present_count
distances *= X.shape[1]
if not squared:
np.sqrt(distances, out=distances)
return distances
|
@ignore_warnings(category=FutureWarning)
def check_transformers_unfitted(name, transformer):
global REGRESSION_DATASET
if REGRESSION_DATASET is None:
(X, y) = make_regression(n_samples=200, n_features=10, n_informative=1, bias=5.0, noise=20, random_state=42)
X = StandardScaler().fit_transform(X)
REGRESSION_DATASET = (X, y)
(X, y) = REGRESSION_DATASET
transformer = clone(transformer)
with raises((AttributeError, ValueError), err_msg=f'The unfitted transformer {name} does not raise an error when transform is called. Perhaps use check_is_fitted in transform.'):
transformer.transform(X)
|
@ignore_warnings(category=FutureWarning)
def check_transformers_unfitted(name, transformer):
<DeepExtract>
global REGRESSION_DATASET
if REGRESSION_DATASET is None:
(X, y) = make_regression(n_samples=200, n_features=10, n_informative=1, bias=5.0, noise=20, random_state=42)
X = StandardScaler().fit_transform(X)
REGRESSION_DATASET = (X, y)
(X, y) = REGRESSION_DATASET
</DeepExtract>
transformer = clone(transformer)
with raises((AttributeError, ValueError), err_msg=f'The unfitted transformer {name} does not raise an error when transform is called. Perhaps use check_is_fitted in transform.'):
transformer.transform(X)
|
def predict(self, X):
"""
Predict if a particular sample is an outlier or not.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
For each observation, tells whether or not (+1 or -1) it should
be considered as an inlier according to the fitted model.
"""
check_is_fitted(self)
decision_func = self.score_samples(X) - self.offset_
is_inlier = np.ones_like(decision_func, dtype=int)
is_inlier[decision_func < 0] = -1
return is_inlier
|
def predict(self, X):
"""
Predict if a particular sample is an outlier or not.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
For each observation, tells whether or not (+1 or -1) it should
be considered as an inlier according to the fitted model.
"""
check_is_fitted(self)
<DeepExtract>
decision_func = self.score_samples(X) - self.offset_
</DeepExtract>
is_inlier = np.ones_like(decision_func, dtype=int)
is_inlier[decision_func < 0] = -1
return is_inlier
|
def _check_params_vs_input(self, X, default_n_init=None):
if X.shape[0] < self.n_clusters:
raise ValueError(f'n_samples={X.shape[0]} should be >= n_clusters={self.n_clusters}.')
if self.tol == 0:
self._tol = 0
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
self._tol = np.mean(variances) * self.tol
self._n_init = self.n_init
if self._n_init == 'warn':
warnings.warn(f"The default value of `n_init` will change from {default_n_init} to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning", FutureWarning)
self._n_init = default_n_init
if self._n_init == 'auto':
if self.init == 'k-means++':
self._n_init = 1
else:
self._n_init = default_n_init
if _is_arraylike_not_scalar(self.init) and self._n_init != 1:
warnings.warn(f'Explicit initial center position passed: performing only one init in {self.__class__.__name__} instead of n_init={self._n_init}.', RuntimeWarning, stacklevel=2)
self._n_init = 1
|
def _check_params_vs_input(self, X, default_n_init=None):
if X.shape[0] < self.n_clusters:
raise ValueError(f'n_samples={X.shape[0]} should be >= n_clusters={self.n_clusters}.')
<DeepExtract>
if self.tol == 0:
self._tol = 0
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
self._tol = np.mean(variances) * self.tol
</DeepExtract>
self._n_init = self.n_init
if self._n_init == 'warn':
warnings.warn(f"The default value of `n_init` will change from {default_n_init} to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning", FutureWarning)
self._n_init = default_n_init
if self._n_init == 'auto':
if self.init == 'k-means++':
self._n_init = 1
else:
self._n_init = default_n_init
if _is_arraylike_not_scalar(self.init) and self._n_init != 1:
warnings.warn(f'Explicit initial center position passed: performing only one init in {self.__class__.__name__} instead of n_init={self._n_init}.', RuntimeWarning, stacklevel=2)
self._n_init = 1
|
@ignore_warnings
def check_fit2d_1feature(name, estimator_orig):
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
if '1darray' in _safe_tags(estimator_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator_orig, key='pairwise'):
X = kernel(X, X)
X = X
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
if hasattr(estimator, 'n_components'):
estimator.n_components = 1
if hasattr(estimator, 'n_clusters'):
estimator.n_clusters = 1
if name == 'RandomizedLogisticRegression':
estimator.sample_fraction = 1
if name == 'RANSACRegressor':
estimator.residual_threshold = 0.5
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
set_random_state(estimator, 1)
msgs = ['1 feature\\(s\\)', 'n_features = 1', 'n_features=1']
with raises(ValueError, match=msgs, may_pass=True):
estimator.fit(X, y)
|
@ignore_warnings
def check_fit2d_1feature(name, estimator_orig):
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
<DeepExtract>
if '1darray' in _safe_tags(estimator_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator_orig, key='pairwise'):
X = kernel(X, X)
X = X
</DeepExtract>
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
<DeepExtract>
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
if hasattr(estimator, 'n_components'):
estimator.n_components = 1
if hasattr(estimator, 'n_clusters'):
estimator.n_clusters = 1
if name == 'RandomizedLogisticRegression':
estimator.sample_fraction = 1
if name == 'RANSACRegressor':
estimator.residual_threshold = 0.5
<DeepExtract>
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
set_random_state(estimator, 1)
msgs = ['1 feature\\(s\\)', 'n_features = 1', 'n_features=1']
with raises(ValueError, match=msgs, may_pass=True):
estimator.fit(X, y)
|
def check_sample_weights_not_overwritten(name, estimator_orig):
estimator = clone(estimator_orig)
set_random_state(estimator, random_state=0)
X = np.array([[1, 3], [1, 3], [1, 3], [1, 3], [2, 1], [2, 1], [2, 1], [2, 1], [3, 3], [3, 3], [3, 3], [3, 3], [4, 1], [4, 1], [4, 1], [4, 1]], dtype=np.float64)
y = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int)
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
sample_weight_original = np.ones(y.shape[0])
sample_weight_original[0] = 10.0
sample_weight_fit = sample_weight_original.copy()
estimator.fit(X, y, sample_weight=sample_weight_fit)
err_msg = f'{name} overwrote the original `sample_weight` given during fit'
assert_allclose(sample_weight_fit, sample_weight_original, err_msg=err_msg)
|
def check_sample_weights_not_overwritten(name, estimator_orig):
estimator = clone(estimator_orig)
set_random_state(estimator, random_state=0)
X = np.array([[1, 3], [1, 3], [1, 3], [1, 3], [2, 1], [2, 1], [2, 1], [2, 1], [3, 3], [3, 3], [3, 3], [3, 3], [4, 1], [4, 1], [4, 1], [4, 1]], dtype=np.float64)
y = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int)
<DeepExtract>
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
sample_weight_original = np.ones(y.shape[0])
sample_weight_original[0] = 10.0
sample_weight_fit = sample_weight_original.copy()
estimator.fit(X, y, sample_weight=sample_weight_fit)
err_msg = f'{name} overwrote the original `sample_weight` given during fit'
assert_allclose(sample_weight_fit, sample_weight_original, err_msg=err_msg)
|
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : ndarray of shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState instance
Random number generator to use.
Returns
-------
h : ndarray of shape (n_samples, n_components)
Values of the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
p = expit(p, out=p)
return rng.uniform(size=p.shape) < p
|
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : ndarray of shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState instance
Random number generator to use.
Returns
-------
h : ndarray of shape (n_samples, n_components)
Values of the hidden layer.
"""
<DeepExtract>
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
p = expit(p, out=p)
</DeepExtract>
return rng.uniform(size=p.shape) < p
|
def _staged_raw_predict(self, X):
"""Compute raw predictions of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Yields
------
raw_predictions : generator of ndarray of shape (n_samples, n_trees_per_iteration)
The raw predictions of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
X = self._validate_data(X, dtype=X_DTYPE, force_all_finite=False, reset=False)
check_is_fitted(self)
if X.shape[1] != self._n_features:
raise ValueError('X has {} features but this estimator was trained with {} features.'.format(X.shape[1], self._n_features))
n_samples = X.shape[0]
raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions += self._baseline_prediction
n_threads = _openmp_effective_n_threads()
for iteration in range(len(self._predictors)):
if not False:
(known_cat_bitsets, f_idx_map) = self._bin_mapper.make_known_categories_bitsets()
for predictors_of_ith_iteration in self._predictors[iteration:iteration + 1]:
for (k, predictor) in enumerate(predictors_of_ith_iteration):
if False:
predict = partial(predictor.predict_binned, missing_values_bin_idx=self._bin_mapper.missing_values_bin_idx_, n_threads=n_threads)
else:
predict = partial(predictor.predict, known_cat_bitsets=known_cat_bitsets, f_idx_map=f_idx_map, n_threads=n_threads)
raw_predictions[:, k] += predict(X)
yield raw_predictions.copy()
|
def _staged_raw_predict(self, X):
"""Compute raw predictions of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Yields
------
raw_predictions : generator of ndarray of shape (n_samples, n_trees_per_iteration)
The raw predictions of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
X = self._validate_data(X, dtype=X_DTYPE, force_all_finite=False, reset=False)
check_is_fitted(self)
if X.shape[1] != self._n_features:
raise ValueError('X has {} features but this estimator was trained with {} features.'.format(X.shape[1], self._n_features))
n_samples = X.shape[0]
raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions += self._baseline_prediction
n_threads = _openmp_effective_n_threads()
for iteration in range(len(self._predictors)):
<DeepExtract>
if not False:
(known_cat_bitsets, f_idx_map) = self._bin_mapper.make_known_categories_bitsets()
for predictors_of_ith_iteration in self._predictors[iteration:iteration + 1]:
for (k, predictor) in enumerate(predictors_of_ith_iteration):
if False:
predict = partial(predictor.predict_binned, missing_values_bin_idx=self._bin_mapper.missing_values_bin_idx_, n_threads=n_threads)
else:
predict = partial(predictor.predict, known_cat_bitsets=known_cat_bitsets, f_idx_map=f_idx_map, n_threads=n_threads)
raw_predictions[:, k] += predict(X)
</DeepExtract>
yield raw_predictions.copy()
|
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
X_dense[3, :] = 0.0
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
X_sparse_pruned = sparse.csr_matrix(X_dense)
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert X_norm1 is not X
if hasattr(X_norm1, 'toarray'):
X_norm1 = X_norm1.toarray()
X_norm1 = X_norm1
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert X_norm2 is X
if hasattr(X_norm2, 'toarray'):
X_norm2 = X_norm2.toarray()
X_norm2 = X_norm2
for X_norm in (X_norm1, X_norm2):
row_maxs = abs(X_norm).max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
if hasattr(X_norm, 'toarray'):
X_norm = X_norm.toarray()
X_norm = X_norm
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
|
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
X_dense[3, :] = 0.0
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
X_sparse_pruned = sparse.csr_matrix(X_dense)
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert X_norm1 is not X
<DeepExtract>
if hasattr(X_norm1, 'toarray'):
X_norm1 = X_norm1.toarray()
X_norm1 = X_norm1
</DeepExtract>
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert X_norm2 is X
<DeepExtract>
if hasattr(X_norm2, 'toarray'):
X_norm2 = X_norm2.toarray()
X_norm2 = X_norm2
</DeepExtract>
for X_norm in (X_norm1, X_norm2):
row_maxs = abs(X_norm).max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
<DeepExtract>
if hasattr(X_norm, 'toarray'):
X_norm = X_norm.toarray()
X_norm = X_norm
</DeepExtract>
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
|
def test_validate_params():
"""Check that validate_params works no matter how the arguments are passed"""
with pytest.raises(InvalidParameterError, match="The 'a' parameter of _func must be"):
with pytest.raises(InvalidParameterError, match="The 'b' parameter of _func must be"):
with pytest.raises(InvalidParameterError, match="The 'c' parameter of _func must be"):
with pytest.raises(InvalidParameterError, match="The 'd' parameter of _func must be"):
with pytest.raises(InvalidParameterError, match="The 'b' parameter of _func must be"):
with pytest.raises(InvalidParameterError, match="The 'c' parameter of _func must be"):
</DeepExtract>
|
def test_validate_params():
"""Check that validate_params works no matter how the arguments are passed"""
with pytest.raises(InvalidParameterError, match="The 'a' parameter of _func must be"):
<DeepExtract>
</DeepExtract>
with pytest.raises(InvalidParameterError, match="The 'b' parameter of _func must be"):
<DeepExtract>
</DeepExtract>
with pytest.raises(InvalidParameterError, match="The 'c' parameter of _func must be"):
<DeepExtract>
</DeepExtract>
with pytest.raises(InvalidParameterError, match="The 'd' parameter of _func must be"):
<DeepExtract>
</DeepExtract>
with pytest.raises(InvalidParameterError, match="The 'b' parameter of _func must be"):
<DeepExtract>
</DeepExtract>
with pytest.raises(InvalidParameterError, match="The 'c' parameter of _func must be"):
<DeepExtract>
</DeepExtract>
|
def partial_fit(self, X, y=None, sample_weight=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
Returns
-------
self : object
Return updated estimator.
"""
has_centers = hasattr(self, 'cluster_centers_')
if not has_centers:
self._validate_params()
X = self._validate_data(X, accept_sparse='csr', dtype=[np.float64, np.float32], order='C', accept_large_sparse=False, reset=not has_centers)
self._random_state = getattr(self, '_random_state', check_random_state(self.random_state))
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self.n_steps_ = getattr(self, 'n_steps_', 0)
x_squared_norms = row_norms(X, squared=True)
if not has_centers:
if X.shape[0] < self.n_clusters:
raise ValueError(f'n_samples={X.shape[0]} should be >= n_clusters={self.n_clusters}.')
self._tol = _tolerance(X, self.tol)
self._n_init = self.n_init
if self._n_init == 'warn':
warnings.warn(f"The default value of `n_init` will change from {default_n_init} to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning", FutureWarning)
self._n_init = default_n_init
if self._n_init == 'auto':
if self.init == 'k-means++':
self._n_init = 1
else:
self._n_init = default_n_init
if _is_arraylike_not_scalar(self.init) and self._n_init != 1:
warnings.warn(f'Explicit initial center position passed: performing only one init in {self.__class__.__name__} instead of n_init={self._n_init}.', RuntimeWarning, stacklevel=2)
self._n_init = 1
self._n_threads = _openmp_effective_n_threads()
init = self.init
if _is_arraylike_not_scalar(init):
init = check_array(init, dtype=X.dtype, copy=True, order='C')
if init.shape[0] != self.n_clusters:
raise ValueError(f'The shape of the initial centers {init.shape} does not match the number of clusters {self.n_clusters}.')
if init.shape[1] != X.shape[1]:
raise ValueError(f'The shape of the initial centers {init.shape} does not match the number of features of the data {X.shape[1]}.')
if sp.issparse(X):
return
n_active_threads = int(np.ceil(X.shape[0] / CHUNK_SIZE))
if n_active_threads < self._n_threads:
modules = threadpool_info()
has_vcomp = 'vcomp' in [module['prefix'] for module in modules]
has_mkl = ('mkl', 'intel') in [(module['internal_api'], module.get('threading_layer', None)) for module in modules]
if has_vcomp and has_mkl:
self._warn_mkl_vcomp(n_active_threads)
n_samples = X.shape[0]
n_clusters = self.n_clusters if n_centroids is None else n_centroids
if self._init_size is not None and self._init_size < n_samples:
init_indices = self._random_state.randint(0, n_samples, self._init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
sample_weight = sample_weight[init_indices]
if isinstance(init, str) and init == 'k-means++':
(centers, _) = _kmeans_plusplus(X, n_clusters, random_state=self._random_state, x_squared_norms=x_squared_norms, sample_weight=sample_weight)
elif isinstance(init, str) and init == 'random':
seeds = self._random_state.choice(n_samples, size=n_clusters, replace=False, p=sample_weight / sample_weight.sum())
centers = X[seeds]
elif _is_arraylike_not_scalar(self.init):
centers = init
elif callable(init):
centers = init(X, n_clusters, random_state=self._random_state)
centers = check_array(centers, dtype=X.dtype, copy=False, order='C')
self._validate_center_shape(X, centers)
if sp.issparse(centers):
centers = centers.toarray()
self.cluster_centers_ = centers
self._counts = np.zeros(self.n_clusters, dtype=X.dtype)
self._n_since_last_reassign = 0
with threadpool_limits(limits=1, user_api='blas'):
(labels, inertia) = _labels_inertia(X, sample_weight, self.cluster_centers_, n_threads=self._n_threads)
if sp.issparse(X):
_minibatch_update_sparse(X, sample_weight, self.cluster_centers_, self.cluster_centers_, self._counts, labels, self._n_threads)
else:
_minibatch_update_dense(X, sample_weight, self.cluster_centers_, self.cluster_centers_, self._counts, labels, self._n_threads)
if self._random_reassign() and self.reassignment_ratio > 0:
to_reassign = self._counts < self.reassignment_ratio * self._counts.max()
if to_reassign.sum() > 0.5 * X.shape[0]:
indices_dont_reassign = np.argsort(self._counts)[int(0.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
new_centers = self._random_state.choice(X.shape[0], replace=False, size=n_reassigns)
if self.verbose:
print(f'[MiniBatchKMeans] Reassigning {n_reassigns} cluster centers.')
if sp.issparse(X):
assign_rows_csr(X, new_centers.astype(np.intp, copy=False), np.where(to_reassign)[0].astype(np.intp, copy=False), self.cluster_centers_)
else:
self.cluster_centers_[to_reassign] = X[new_centers]
self._counts[to_reassign] = np.min(self._counts[~to_reassign])
return inertia
if self.compute_labels:
with threadpool_limits(limits=1, user_api='blas'):
result = _labels_inertia(X, sample_weight, self.cluster_centers_, self._n_threads, return_inertia)
(self.labels_, self.inertia_) = result
self.n_steps_ += 1
self._n_features_out = self.cluster_centers_.shape[0]
return self
|
def partial_fit(self, X, y=None, sample_weight=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
Returns
-------
self : object
Return updated estimator.
"""
has_centers = hasattr(self, 'cluster_centers_')
if not has_centers:
self._validate_params()
X = self._validate_data(X, accept_sparse='csr', dtype=[np.float64, np.float32], order='C', accept_large_sparse=False, reset=not has_centers)
self._random_state = getattr(self, '_random_state', check_random_state(self.random_state))
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self.n_steps_ = getattr(self, 'n_steps_', 0)
x_squared_norms = row_norms(X, squared=True)
if not has_centers:
<DeepExtract>
if X.shape[0] < self.n_clusters:
raise ValueError(f'n_samples={X.shape[0]} should be >= n_clusters={self.n_clusters}.')
self._tol = _tolerance(X, self.tol)
self._n_init = self.n_init
if self._n_init == 'warn':
warnings.warn(f"The default value of `n_init` will change from {default_n_init} to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning", FutureWarning)
self._n_init = default_n_init
if self._n_init == 'auto':
if self.init == 'k-means++':
self._n_init = 1
else:
self._n_init = default_n_init
if _is_arraylike_not_scalar(self.init) and self._n_init != 1:
warnings.warn(f'Explicit initial center position passed: performing only one init in {self.__class__.__name__} instead of n_init={self._n_init}.', RuntimeWarning, stacklevel=2)
self._n_init = 1
</DeepExtract>
self._n_threads = _openmp_effective_n_threads()
init = self.init
if _is_arraylike_not_scalar(init):
init = check_array(init, dtype=X.dtype, copy=True, order='C')
<DeepExtract>
if init.shape[0] != self.n_clusters:
raise ValueError(f'The shape of the initial centers {init.shape} does not match the number of clusters {self.n_clusters}.')
if init.shape[1] != X.shape[1]:
raise ValueError(f'The shape of the initial centers {init.shape} does not match the number of features of the data {X.shape[1]}.')
</DeepExtract>
<DeepExtract>
if sp.issparse(X):
return
n_active_threads = int(np.ceil(X.shape[0] / CHUNK_SIZE))
if n_active_threads < self._n_threads:
modules = threadpool_info()
has_vcomp = 'vcomp' in [module['prefix'] for module in modules]
has_mkl = ('mkl', 'intel') in [(module['internal_api'], module.get('threading_layer', None)) for module in modules]
if has_vcomp and has_mkl:
self._warn_mkl_vcomp(n_active_threads)
</DeepExtract>
<DeepExtract>
n_samples = X.shape[0]
n_clusters = self.n_clusters if n_centroids is None else n_centroids
if self._init_size is not None and self._init_size < n_samples:
init_indices = self._random_state.randint(0, n_samples, self._init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
sample_weight = sample_weight[init_indices]
if isinstance(init, str) and init == 'k-means++':
(centers, _) = _kmeans_plusplus(X, n_clusters, random_state=self._random_state, x_squared_norms=x_squared_norms, sample_weight=sample_weight)
elif isinstance(init, str) and init == 'random':
seeds = self._random_state.choice(n_samples, size=n_clusters, replace=False, p=sample_weight / sample_weight.sum())
centers = X[seeds]
elif _is_arraylike_not_scalar(self.init):
centers = init
elif callable(init):
centers = init(X, n_clusters, random_state=self._random_state)
centers = check_array(centers, dtype=X.dtype, copy=False, order='C')
self._validate_center_shape(X, centers)
if sp.issparse(centers):
centers = centers.toarray()
self.cluster_centers_ = centers
</DeepExtract>
self._counts = np.zeros(self.n_clusters, dtype=X.dtype)
self._n_since_last_reassign = 0
with threadpool_limits(limits=1, user_api='blas'):
<DeepExtract>
(labels, inertia) = _labels_inertia(X, sample_weight, self.cluster_centers_, n_threads=self._n_threads)
if sp.issparse(X):
_minibatch_update_sparse(X, sample_weight, self.cluster_centers_, self.cluster_centers_, self._counts, labels, self._n_threads)
else:
_minibatch_update_dense(X, sample_weight, self.cluster_centers_, self.cluster_centers_, self._counts, labels, self._n_threads)
if self._random_reassign() and self.reassignment_ratio > 0:
to_reassign = self._counts < self.reassignment_ratio * self._counts.max()
if to_reassign.sum() > 0.5 * X.shape[0]:
indices_dont_reassign = np.argsort(self._counts)[int(0.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
new_centers = self._random_state.choice(X.shape[0], replace=False, size=n_reassigns)
if self.verbose:
print(f'[MiniBatchKMeans] Reassigning {n_reassigns} cluster centers.')
if sp.issparse(X):
assign_rows_csr(X, new_centers.astype(np.intp, copy=False), np.where(to_reassign)[0].astype(np.intp, copy=False), self.cluster_centers_)
else:
self.cluster_centers_[to_reassign] = X[new_centers]
self._counts[to_reassign] = np.min(self._counts[~to_reassign])
return inertia
</DeepExtract>
if self.compute_labels:
<DeepExtract>
with threadpool_limits(limits=1, user_api='blas'):
result = _labels_inertia(X, sample_weight, self.cluster_centers_, self._n_threads, return_inertia)
(self.labels_, self.inertia_) = result
</DeepExtract>
self.n_steps_ += 1
self._n_features_out = self.cluster_centers_.shape[0]
return self
|
def fit(self, X, y=None):
"""Fit the Oracle Approximating Shrinkage covariance model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
X = self._validate_data(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
if len(X - self.location_.shape) == 2 and X - self.location_.shape[1] == 1:
if not True:
X - self.location_ = X - self.location_ - X - self.location_.mean()
(covariance, shrinkage) = (np.atleast_2d((X - self.location_ ** 2).mean()), 0.0)
(n_samples, n_features) = X - self.location_.shape
emp_cov = empirical_covariance(X - self.location_, assume_centered=True)
alpha = np.mean(emp_cov ** 2)
mu = np.trace(emp_cov) / n_features
mu_squared = mu ** 2
num = alpha + mu_squared
den = (n_samples + 1) * (alpha - mu_squared / n_features)
shrinkage = 1.0 if den == 0 else min(num / den, 1.0)
shrunk_cov = (1.0 - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
(covariance, shrinkage) = (shrunk_cov, shrinkage)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
|
def fit(self, X, y=None):
"""Fit the Oracle Approximating Shrinkage covariance model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
X = self._validate_data(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
<DeepExtract>
if len(X - self.location_.shape) == 2 and X - self.location_.shape[1] == 1:
if not True:
X - self.location_ = X - self.location_ - X - self.location_.mean()
(covariance, shrinkage) = (np.atleast_2d((X - self.location_ ** 2).mean()), 0.0)
(n_samples, n_features) = X - self.location_.shape
emp_cov = empirical_covariance(X - self.location_, assume_centered=True)
alpha = np.mean(emp_cov ** 2)
mu = np.trace(emp_cov) / n_features
mu_squared = mu ** 2
num = alpha + mu_squared
den = (n_samples + 1) * (alpha - mu_squared / n_features)
shrinkage = 1.0 if den == 0 else min(num / den, 1.0)
shrunk_cov = (1.0 - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
(covariance, shrinkage) = (shrunk_cov, shrinkage)
</DeepExtract>
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
|
def test_base_chain_crossval_fit_and_predict():
(X, y) = make_classification(n_samples=1000, n_features=100, n_classes=16, n_informative=10, random_state=0)
Y_multi = np.array([[int(yyy) for yyy in format(yy, '#06b')[2:]] for yy in y])
(X, Y) = (X, Y_multi)
for chain in [ClassifierChain(LogisticRegression()), RegressorChain(Ridge())]:
chain.fit(X, Y)
chain_cv = clone(chain).set_params(cv=3)
chain_cv.fit(X, Y)
Y_pred_cv = chain_cv.predict(X)
Y_pred = chain.predict(X)
assert Y_pred_cv.shape == Y_pred.shape
assert not np.all(Y_pred == Y_pred_cv)
if isinstance(chain, ClassifierChain):
assert jaccard_score(Y, Y_pred_cv, average='samples') > 0.4
else:
assert mean_squared_error(Y, Y_pred_cv) < 0.25
|
def test_base_chain_crossval_fit_and_predict():
<DeepExtract>
(X, y) = make_classification(n_samples=1000, n_features=100, n_classes=16, n_informative=10, random_state=0)
Y_multi = np.array([[int(yyy) for yyy in format(yy, '#06b')[2:]] for yy in y])
(X, Y) = (X, Y_multi)
</DeepExtract>
for chain in [ClassifierChain(LogisticRegression()), RegressorChain(Ridge())]:
chain.fit(X, Y)
chain_cv = clone(chain).set_params(cv=3)
chain_cv.fit(X, Y)
Y_pred_cv = chain_cv.predict(X)
Y_pred = chain.predict(X)
assert Y_pred_cv.shape == Y_pred.shape
assert not np.all(Y_pred == Y_pred_cv)
if isinstance(chain, ClassifierChain):
assert jaccard_score(Y, Y_pred_cv, average='samples') > 0.4
else:
assert mean_squared_error(Y, Y_pred_cv) < 0.25
|
@pytest.mark.parametrize('array_constr', [np.array, sp.csr_matrix], ids=['dense', 'sparse'])
@pytest.mark.parametrize('algo', ['lloyd', 'elkan'])
def test_k_means_1_iteration(array_constr, algo, global_random_seed):
X = np.random.RandomState(global_random_seed).uniform(size=(100, 5))
init_centers = X[:5]
X = array_constr(X)
def py_kmeans(X, init):
new_centers = init.copy()
labels = pairwise_distances_argmin(X, init)
for label in range(init.shape[0]):
new_centers[label] = X[labels == label].mean(axis=0)
labels = pairwise_distances_argmin(X, new_centers)
return (labels, new_centers)
new_centers = init_centers.copy()
labels = pairwise_distances_argmin(X, init_centers)
for label in range(init_centers.shape[0]):
new_centers[label] = X[labels == label].mean(axis=0)
labels = pairwise_distances_argmin(X, new_centers)
(py_labels, py_centers) = (labels, new_centers)
cy_kmeans = KMeans(n_clusters=5, n_init=1, init=init_centers, algorithm=algo, max_iter=1).fit(X)
cy_labels = cy_kmeans.labels_
cy_centers = cy_kmeans.cluster_centers_
assert_array_equal(py_labels, cy_labels)
assert_allclose(py_centers, cy_centers)
|
@pytest.mark.parametrize('array_constr', [np.array, sp.csr_matrix], ids=['dense', 'sparse'])
@pytest.mark.parametrize('algo', ['lloyd', 'elkan'])
def test_k_means_1_iteration(array_constr, algo, global_random_seed):
X = np.random.RandomState(global_random_seed).uniform(size=(100, 5))
init_centers = X[:5]
X = array_constr(X)
def py_kmeans(X, init):
new_centers = init.copy()
labels = pairwise_distances_argmin(X, init)
for label in range(init.shape[0]):
new_centers[label] = X[labels == label].mean(axis=0)
labels = pairwise_distances_argmin(X, new_centers)
return (labels, new_centers)
<DeepExtract>
new_centers = init_centers.copy()
labels = pairwise_distances_argmin(X, init_centers)
for label in range(init_centers.shape[0]):
new_centers[label] = X[labels == label].mean(axis=0)
labels = pairwise_distances_argmin(X, new_centers)
(py_labels, py_centers) = (labels, new_centers)
</DeepExtract>
cy_kmeans = KMeans(n_clusters=5, n_init=1, init=init_centers, algorithm=algo, max_iter=1).fit(X)
cy_labels = cy_kmeans.labels_
cy_centers = cy_kmeans.cluster_centers_
assert_array_equal(py_labels, cy_labels)
assert_allclose(py_centers, cy_centers)
|
def linkage_tree(X, connectivity=None, n_clusters=None, linkage='complete', affinity='euclidean', return_distance=False):
"""Linkage agglomerative clustering based on a Feature matrix.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Feature matrix representing `n_samples` samples to be clustered.
connectivity : sparse matrix, default=None
Connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is `None`, i.e, the Ward algorithm is unstructured.
n_clusters : int, default=None
Stop early the construction of the tree at `n_clusters`. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
linkage : {"average", "complete", "single"}, default="complete"
Which linkage criteria to use. The linkage criterion determines which
distance to use between sets of observation.
- "average" uses the average of the distances of each observation of
the two sets.
- "complete" or maximum linkage uses the maximum distances between
all observations of the two sets.
- "single" uses the minimum of the distances between all
observations of the two sets.
affinity : str or callable, default='euclidean'
Which metric to use. Can be 'euclidean', 'manhattan', or any
distance known to paired distance (see metric.pairwise).
return_distance : bool, default=False
Whether or not to return the distances between the clusters.
Returns
-------
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_connected_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree.
parents : ndarray of shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : ndarray of shape (n_nodes-1,)
Returned when `return_distance` is set to `True`.
distances[i] refers to the distance between children[i][0] and
children[i][1] when they are merged.
See Also
--------
ward_tree : Hierarchical clustering with ward linkage.
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
(n_samples, n_features) = X.shape
linkage_choices = {'complete': _hierarchical.max_merge, 'average': _hierarchical.average_merge, 'single': None}
try:
join_func = linkage_choices[linkage]
except KeyError as e:
raise ValueError('Unknown linkage option, linkage should be one of %s, but %s was given' % (linkage_choices.keys(), linkage)) from e
if affinity == 'cosine' and np.any(~np.any(X, axis=1)):
raise ValueError('Cosine affinity cannot be used when X contains zero vectors')
if connectivity is None:
from scipy.cluster import hierarchy
if n_clusters is not None:
warnings.warn('Partial build of the tree is implemented only for structured clustering (i.e. with explicit connectivity). The algorithm will build the full tree and only retain the lower branches required for the specified number of clusters', stacklevel=2)
if affinity == 'precomputed':
if X.shape[0] != X.shape[1]:
raise ValueError(f'Distance matrix should be square, got matrix of shape {X.shape}')
(i, j) = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
elif affinity == 'l2':
affinity = 'euclidean'
elif affinity in ('l1', 'manhattan'):
affinity = 'cityblock'
elif callable(affinity):
X = affinity(X)
(i, j) = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
if linkage == 'single' and affinity != 'precomputed' and (not callable(affinity)) and (affinity in METRIC_MAPPING):
dist_metric = DistanceMetric.get_metric(affinity)
X = np.ascontiguousarray(X, dtype=np.double)
mst = _hierarchical.mst_linkage_core(X, dist_metric)
mst = mst[np.argsort(mst.T[2], kind='mergesort'), :]
out = _hierarchical.single_linkage_label(mst)
else:
out = hierarchy.linkage(X, method=linkage, metric=affinity)
children_ = out[:, :2].astype(int, copy=False)
if return_distance:
distances = out[:, 2]
return (children_, 1, n_samples, None, distances)
return (children_, 1, n_samples, None)
n_samples = X.shape[0]
if connectivity.shape[0] != n_samples or connectivity.shape[1] != n_samples:
raise ValueError('Wrong shape for connectivity matrix: %s when X is %s' % (connectivity.shape, X.shape))
connectivity = connectivity + connectivity.T
if not sparse.isspmatrix_lil(connectivity):
if not sparse.isspmatrix(connectivity):
connectivity = sparse.lil_matrix(connectivity)
else:
connectivity = connectivity.tolil()
(n_connected_components, labels) = connected_components(connectivity)
if n_connected_components > 1:
warnings.warn('the number of connected components of the connectivity matrix is %d > 1. Completing it to avoid stopping the tree early.' % n_connected_components, stacklevel=2)
connectivity = _fix_connected_components(X=X, graph=connectivity, n_connected_components=n_connected_components, component_labels=labels, metric=affinity, mode='connectivity')
(connectivity, n_connected_components) = (connectivity, n_connected_components)
connectivity = connectivity.tocoo()
diag_mask = connectivity.row != connectivity.col
connectivity.row = connectivity.row[diag_mask]
connectivity.col = connectivity.col[diag_mask]
connectivity.data = connectivity.data[diag_mask]
del diag_mask
if affinity == 'precomputed':
distances = X[connectivity.row, connectivity.col].astype(np.float64, copy=False)
else:
distances = paired_distances(X[connectivity.row], X[connectivity.col], metric=affinity)
connectivity.data = distances
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
assert n_clusters <= n_samples
n_nodes = 2 * n_samples - n_clusters
if linkage == 'single':
return _single_linkage_tree(connectivity, n_samples, n_nodes, n_clusters, n_connected_components, return_distance)
if return_distance:
distances = np.empty(n_nodes - n_samples)
A = np.empty(n_nodes, dtype=object)
inertia = list()
connectivity = connectivity.tolil()
for (ind, (data, row)) in enumerate(zip(connectivity.data, connectivity.rows)):
A[ind] = IntFloatDict(np.asarray(row, dtype=np.intp), np.asarray(data, dtype=np.float64))
inertia.extend((_hierarchical.WeightedEdge(d, ind, r) for (r, d) in zip(row, data) if r < ind))
del connectivity
heapify(inertia)
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=np.intp)
children = []
for k in range(n_samples, n_nodes):
while True:
edge = heappop(inertia)
if used_node[edge.a] and used_node[edge.b]:
break
i = edge.a
j = edge.b
if return_distance:
distances[k - n_samples] = edge.weight
parent[i] = parent[j] = k
children.append((i, j))
n_i = used_node[i]
n_j = used_node[j]
used_node[k] = n_i + n_j
used_node[i] = used_node[j] = False
coord_col = join_func(A[i], A[j], used_node, n_i, n_j)
for (col, d) in coord_col:
A[col].append(k, d)
heappush(inertia, _hierarchical.WeightedEdge(d, k, col))
A[k] = coord_col
A[i] = A[j] = 0
n_leaves = n_samples
children = np.array(children)[:, ::-1]
if return_distance:
return (children, n_connected_components, n_leaves, parent, distances)
return (children, n_connected_components, n_leaves, parent)
|
def linkage_tree(X, connectivity=None, n_clusters=None, linkage='complete', affinity='euclidean', return_distance=False):
"""Linkage agglomerative clustering based on a Feature matrix.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Feature matrix representing `n_samples` samples to be clustered.
connectivity : sparse matrix, default=None
Connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is `None`, i.e, the Ward algorithm is unstructured.
n_clusters : int, default=None
Stop early the construction of the tree at `n_clusters`. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
linkage : {"average", "complete", "single"}, default="complete"
Which linkage criteria to use. The linkage criterion determines which
distance to use between sets of observation.
- "average" uses the average of the distances of each observation of
the two sets.
- "complete" or maximum linkage uses the maximum distances between
all observations of the two sets.
- "single" uses the minimum of the distances between all
observations of the two sets.
affinity : str or callable, default='euclidean'
Which metric to use. Can be 'euclidean', 'manhattan', or any
distance known to paired distance (see metric.pairwise).
return_distance : bool, default=False
Whether or not to return the distances between the clusters.
Returns
-------
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_connected_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree.
parents : ndarray of shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : ndarray of shape (n_nodes-1,)
Returned when `return_distance` is set to `True`.
distances[i] refers to the distance between children[i][0] and
children[i][1] when they are merged.
See Also
--------
ward_tree : Hierarchical clustering with ward linkage.
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
(n_samples, n_features) = X.shape
linkage_choices = {'complete': _hierarchical.max_merge, 'average': _hierarchical.average_merge, 'single': None}
try:
join_func = linkage_choices[linkage]
except KeyError as e:
raise ValueError('Unknown linkage option, linkage should be one of %s, but %s was given' % (linkage_choices.keys(), linkage)) from e
if affinity == 'cosine' and np.any(~np.any(X, axis=1)):
raise ValueError('Cosine affinity cannot be used when X contains zero vectors')
if connectivity is None:
from scipy.cluster import hierarchy
if n_clusters is not None:
warnings.warn('Partial build of the tree is implemented only for structured clustering (i.e. with explicit connectivity). The algorithm will build the full tree and only retain the lower branches required for the specified number of clusters', stacklevel=2)
if affinity == 'precomputed':
if X.shape[0] != X.shape[1]:
raise ValueError(f'Distance matrix should be square, got matrix of shape {X.shape}')
(i, j) = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
elif affinity == 'l2':
affinity = 'euclidean'
elif affinity in ('l1', 'manhattan'):
affinity = 'cityblock'
elif callable(affinity):
X = affinity(X)
(i, j) = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
if linkage == 'single' and affinity != 'precomputed' and (not callable(affinity)) and (affinity in METRIC_MAPPING):
dist_metric = DistanceMetric.get_metric(affinity)
X = np.ascontiguousarray(X, dtype=np.double)
mst = _hierarchical.mst_linkage_core(X, dist_metric)
mst = mst[np.argsort(mst.T[2], kind='mergesort'), :]
out = _hierarchical.single_linkage_label(mst)
else:
out = hierarchy.linkage(X, method=linkage, metric=affinity)
children_ = out[:, :2].astype(int, copy=False)
if return_distance:
distances = out[:, 2]
return (children_, 1, n_samples, None, distances)
return (children_, 1, n_samples, None)
<DeepExtract>
n_samples = X.shape[0]
if connectivity.shape[0] != n_samples or connectivity.shape[1] != n_samples:
raise ValueError('Wrong shape for connectivity matrix: %s when X is %s' % (connectivity.shape, X.shape))
connectivity = connectivity + connectivity.T
if not sparse.isspmatrix_lil(connectivity):
if not sparse.isspmatrix(connectivity):
connectivity = sparse.lil_matrix(connectivity)
else:
connectivity = connectivity.tolil()
(n_connected_components, labels) = connected_components(connectivity)
if n_connected_components > 1:
warnings.warn('the number of connected components of the connectivity matrix is %d > 1. Completing it to avoid stopping the tree early.' % n_connected_components, stacklevel=2)
connectivity = _fix_connected_components(X=X, graph=connectivity, n_connected_components=n_connected_components, component_labels=labels, metric=affinity, mode='connectivity')
(connectivity, n_connected_components) = (connectivity, n_connected_components)
</DeepExtract>
connectivity = connectivity.tocoo()
diag_mask = connectivity.row != connectivity.col
connectivity.row = connectivity.row[diag_mask]
connectivity.col = connectivity.col[diag_mask]
connectivity.data = connectivity.data[diag_mask]
del diag_mask
if affinity == 'precomputed':
distances = X[connectivity.row, connectivity.col].astype(np.float64, copy=False)
else:
distances = paired_distances(X[connectivity.row], X[connectivity.col], metric=affinity)
connectivity.data = distances
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
assert n_clusters <= n_samples
n_nodes = 2 * n_samples - n_clusters
if linkage == 'single':
return _single_linkage_tree(connectivity, n_samples, n_nodes, n_clusters, n_connected_components, return_distance)
if return_distance:
distances = np.empty(n_nodes - n_samples)
A = np.empty(n_nodes, dtype=object)
inertia = list()
connectivity = connectivity.tolil()
for (ind, (data, row)) in enumerate(zip(connectivity.data, connectivity.rows)):
A[ind] = IntFloatDict(np.asarray(row, dtype=np.intp), np.asarray(data, dtype=np.float64))
inertia.extend((_hierarchical.WeightedEdge(d, ind, r) for (r, d) in zip(row, data) if r < ind))
del connectivity
heapify(inertia)
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=np.intp)
children = []
for k in range(n_samples, n_nodes):
while True:
edge = heappop(inertia)
if used_node[edge.a] and used_node[edge.b]:
break
i = edge.a
j = edge.b
if return_distance:
distances[k - n_samples] = edge.weight
parent[i] = parent[j] = k
children.append((i, j))
n_i = used_node[i]
n_j = used_node[j]
used_node[k] = n_i + n_j
used_node[i] = used_node[j] = False
coord_col = join_func(A[i], A[j], used_node, n_i, n_j)
for (col, d) in coord_col:
A[col].append(k, d)
heappush(inertia, _hierarchical.WeightedEdge(d, k, col))
A[k] = coord_col
A[i] = A[j] = 0
n_leaves = n_samples
children = np.array(children)[:, ::-1]
if return_distance:
return (children, n_connected_components, n_leaves, parent, distances)
return (children, n_connected_components, n_leaves, parent)
|
def check_requires_y_none(name, estimator_orig):
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
if '1darray' in _safe_tags(estimator, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator, key='pairwise'):
X = kernel(X, X)
X = X
expected_err_msgs = ('requires y to be passed, but the target y is None', 'Expected array-like (array or non-string sequence), got None', 'y should be a 1d array')
try:
estimator.fit(X, None)
except ValueError as ve:
if not any((msg in str(ve) for msg in expected_err_msgs)):
raise ve
|
def check_requires_y_none(name, estimator_orig):
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
<DeepExtract>
if '1darray' in _safe_tags(estimator, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator, key='pairwise'):
X = kernel(X, X)
X = X
</DeepExtract>
expected_err_msgs = ('requires y to be passed, but the target y is None', 'Expected array-like (array or non-string sequence), got None', 'y should be a 1d array')
try:
estimator.fit(X, None)
except ValueError as ve:
if not any((msg in str(ve) for msg in expected_err_msgs)):
raise ve
|
def calinski_harabasz_score(X, labels):
"""Compute the Calinski and Harabasz score.
It is also known as the Variance Ratio Criterion.
The score is defined as ratio of the sum of between-cluster dispersion and
of within-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabasz_index>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabasz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<https://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
"""
(X, labels) = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
(n_samples, _) = X.shape
n_labels = len(le.classes_)
if not 1 < n_labels < n_samples:
raise ValueError('Number of labels is %d. Valid values are 2 to n_samples - 1 (inclusive)' % n_labels)
(extra_disp, intra_disp) = (0.0, 0.0)
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return 1.0 if intra_disp == 0.0 else extra_disp * (n_samples - n_labels) / (intra_disp * (n_labels - 1.0))
|
def calinski_harabasz_score(X, labels):
"""Compute the Calinski and Harabasz score.
It is also known as the Variance Ratio Criterion.
The score is defined as ratio of the sum of between-cluster dispersion and
of within-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabasz_index>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabasz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<https://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
"""
(X, labels) = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
(n_samples, _) = X.shape
n_labels = len(le.classes_)
<DeepExtract>
if not 1 < n_labels < n_samples:
raise ValueError('Number of labels is %d. Valid values are 2 to n_samples - 1 (inclusive)' % n_labels)
</DeepExtract>
(extra_disp, intra_disp) = (0.0, 0.0)
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return 1.0 if intra_disp == 0.0 else extra_disp * (n_samples - n_labels) / (intra_disp * (n_labels - 1.0))
|
def test_subpopulation():
random_state = np.random.RandomState(0)
n_samples = 10000
X = random_state.normal(size=(n_samples, 4))
w = np.array([5.0, 10.0, 42.0, 7.0])
c = 1.0
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
(X, y, w, c) = (X, y, w, c)
theil_sen = TheilSenRegressor(max_subpopulation=250, random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
|
def test_subpopulation():
<DeepExtract>
random_state = np.random.RandomState(0)
n_samples = 10000
X = random_state.normal(size=(n_samples, 4))
w = np.array([5.0, 10.0, 42.0, 7.0])
c = 1.0
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
(X, y, w, c) = (X, y, w, c)
</DeepExtract>
theil_sen = TheilSenRegressor(max_subpopulation=250, random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
|
def fit(self, X, y, sample_weight=None):
"""Fit linear model with coordinate descent.
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
sample_weight : float or array-like of shape (n_samples,), default=None
Sample weights used for fitting and evaluation of the weighted
mean squared error of each cv-fold. Note that the cross validated
MSE that is finally used to find the best model is the unweighted
mean over the (weighted) MSEs of each test fold.
Returns
-------
self : object
Returns an instance of fitted model.
"""
self._validate_params()
copy_X = self.copy_X and self.fit_intercept
check_y_params = dict(copy=False, dtype=[np.float64, np.float32], ensure_2d=False)
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
reference_to_old_X = X
check_X_params = dict(accept_sparse='csc', dtype=[np.float64, np.float32], copy=False)
(X, y) = self._validate_data(X, y, validate_separately=(check_X_params, check_y_params))
if sparse.isspmatrix(X):
if hasattr(reference_to_old_X, 'data') and (not np.may_share_memory(reference_to_old_X.data, X.data)):
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
copy_X = False
del reference_to_old_X
else:
check_X_params = dict(accept_sparse='csc', dtype=[np.float64, np.float32], order='F', copy=copy_X)
(X, y) = self._validate_data(X, y, validate_separately=(check_X_params, check_y_params))
copy_X = False
check_consistent_length(X, y)
if not self._is_multitask():
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError('For multi-task outputs, use MultiTask%s' % self.__class__.__name__)
y = column_or_1d(y, warn=True)
elif sparse.isspmatrix(X):
raise TypeError('X should be dense but a sparse matrix waspassed')
elif y.ndim == 1:
raise ValueError('For mono-task outputs, use %sCV' % self.__class__.__name__[9:])
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
path_params = self.get_params()
path_params.pop('fit_intercept', None)
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
check_scalar_alpha = partial(check_scalar, target_type=Real, min_val=0.0, include_boundaries='left')
if alphas is None:
alphas = [_alpha_grid(X, y, l1_ratio=l1_ratio, fit_intercept=self.fit_intercept, eps=self.eps, n_alphas=self.n_alphas, copy_X=self.copy_X) for l1_ratio in l1_ratios]
else:
for (index, alpha) in enumerate(alphas):
check_scalar_alpha(alpha, f'alphas[{index}]')
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
if effective_n_jobs(self.n_jobs) > 1:
path_params['copy_X'] = False
cv = check_cv(self.cv)
folds = list(cv.split(X, y))
best_mse = np.inf
jobs = (delayed(_path_residuals)(X, y, sample_weight, train, test, self.fit_intercept, self.path, path_params, alphas=this_alphas, l1_ratio=this_l1_ratio, X_order='F', dtype=X.dtype.type) for (this_l1_ratio, this_alphas) in zip(l1_ratios, alphas) for (train, test) in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer='threads')(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.moveaxis(mse_paths, 2, 1))
for (l1_ratio, l1_alphas, mse_alphas) in zip(l1_ratios, alphas, mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
else:
self.alphas_ = np.asarray(alphas[0])
common_params = {name: value for (name, value) in self.get_params().items() if name in model.get_params()}
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
precompute = getattr(self, 'precompute', None)
if isinstance(precompute, str) and precompute == 'auto':
model.precompute = False
if sample_weight is None:
model.fit(X, y)
else:
model.fit(X, y, sample_weight=sample_weight)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
|
def fit(self, X, y, sample_weight=None):
"""Fit linear model with coordinate descent.
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
sample_weight : float or array-like of shape (n_samples,), default=None
Sample weights used for fitting and evaluation of the weighted
mean squared error of each cv-fold. Note that the cross validated
MSE that is finally used to find the best model is the unweighted
mean over the (weighted) MSEs of each test fold.
Returns
-------
self : object
Returns an instance of fitted model.
"""
self._validate_params()
copy_X = self.copy_X and self.fit_intercept
check_y_params = dict(copy=False, dtype=[np.float64, np.float32], ensure_2d=False)
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
reference_to_old_X = X
check_X_params = dict(accept_sparse='csc', dtype=[np.float64, np.float32], copy=False)
(X, y) = self._validate_data(X, y, validate_separately=(check_X_params, check_y_params))
if sparse.isspmatrix(X):
if hasattr(reference_to_old_X, 'data') and (not np.may_share_memory(reference_to_old_X.data, X.data)):
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
copy_X = False
del reference_to_old_X
else:
check_X_params = dict(accept_sparse='csc', dtype=[np.float64, np.float32], order='F', copy=copy_X)
(X, y) = self._validate_data(X, y, validate_separately=(check_X_params, check_y_params))
copy_X = False
check_consistent_length(X, y)
if not self._is_multitask():
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError('For multi-task outputs, use MultiTask%s' % self.__class__.__name__)
y = column_or_1d(y, warn=True)
elif sparse.isspmatrix(X):
raise TypeError('X should be dense but a sparse matrix waspassed')
elif y.ndim == 1:
raise ValueError('For mono-task outputs, use %sCV' % self.__class__.__name__[9:])
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
<DeepExtract>
</DeepExtract>
path_params = self.get_params()
path_params.pop('fit_intercept', None)
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
check_scalar_alpha = partial(check_scalar, target_type=Real, min_val=0.0, include_boundaries='left')
if alphas is None:
alphas = [_alpha_grid(X, y, l1_ratio=l1_ratio, fit_intercept=self.fit_intercept, eps=self.eps, n_alphas=self.n_alphas, copy_X=self.copy_X) for l1_ratio in l1_ratios]
else:
for (index, alpha) in enumerate(alphas):
check_scalar_alpha(alpha, f'alphas[{index}]')
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
if effective_n_jobs(self.n_jobs) > 1:
path_params['copy_X'] = False
cv = check_cv(self.cv)
folds = list(cv.split(X, y))
best_mse = np.inf
jobs = (delayed(_path_residuals)(X, y, sample_weight, train, test, self.fit_intercept, self.path, path_params, alphas=this_alphas, l1_ratio=this_l1_ratio, X_order='F', dtype=X.dtype.type) for (this_l1_ratio, this_alphas) in zip(l1_ratios, alphas) for (train, test) in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer='threads')(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.moveaxis(mse_paths, 2, 1))
for (l1_ratio, l1_alphas, mse_alphas) in zip(l1_ratios, alphas, mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
else:
self.alphas_ = np.asarray(alphas[0])
common_params = {name: value for (name, value) in self.get_params().items() if name in model.get_params()}
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
precompute = getattr(self, 'precompute', None)
if isinstance(precompute, str) and precompute == 'auto':
model.precompute = False
if sample_weight is None:
model.fit(X, y)
else:
model.fit(X, y, sample_weight=sample_weight)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
|
def canParseNext(self, instring, loc):
try:
try:
return self._parse(instring, loc, doActions=False)[0]
except ParseFatalException:
raise ParseException(instring, loc, self.errmsg, self)
except (ParseException, IndexError):
return False
else:
return True
|
def canParseNext(self, instring, loc):
try:
<DeepExtract>
try:
return self._parse(instring, loc, doActions=False)[0]
except ParseFatalException:
raise ParseException(instring, loc, self.errmsg, self)
</DeepExtract>
except (ParseException, IndexError):
return False
else:
return True
|
def test_fetch_openml_requires_pandas_in_future(monkeypatch):
"""Check that we raise a warning that pandas will be required in the future."""
params = {'as_frame': False, 'parser': 'auto'}
data_id = 1119
try:
check_pandas_support('test_fetch_openml_requires_pandas')
except ImportError:
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
warn_msg = "From version 1.4, `parser='auto'` with `as_frame=False` will use pandas"
with pytest.warns(FutureWarning, match=warn_msg):
fetch_openml(data_id=data_id, **params)
else:
raise SkipTest('This test requires pandas to not be installed.')
|
def test_fetch_openml_requires_pandas_in_future(monkeypatch):
"""Check that we raise a warning that pandas will be required in the future."""
params = {'as_frame': False, 'parser': 'auto'}
data_id = 1119
try:
check_pandas_support('test_fetch_openml_requires_pandas')
except ImportError:
<DeepExtract>
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
</DeepExtract>
warn_msg = "From version 1.4, `parser='auto'` with `as_frame=False` will use pandas"
with pytest.warns(FutureWarning, match=warn_msg):
fetch_openml(data_id=data_id, **params)
else:
raise SkipTest('This test requires pandas to not be installed.')
|
@validate_params({'y_true': ['array-like', 'sparse matrix'], 'y_pred': ['array-like', 'sparse matrix'], 'labels': ['array-like', None], 'target_names': ['array-like', None], 'sample_weight': ['array-like', None], 'digits': [Interval(Integral, 0, None, closed='left')], 'output_dict': ['boolean'], 'zero_division': [Options(Real, {0.0, 1.0, np.nan}), StrOptions({'warn'})]})
def classification_report(y_true, y_pred, *, labels=None, target_names=None, sample_weight=None, digits=2, output_dict=False, zero_division='warn'):
"""Build a text report showing the main classification metrics.
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array-like of shape (n_labels,), default=None
Optional list of label indices to include in the report.
target_names : array-like of shape (n_labels,), default=None
Optional display names matching the labels (same order).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
digits : int, default=2
Number of digits for formatting output floating point values.
When ``output_dict`` is ``True``, this will be ignored and the
returned values will not be rounded.
output_dict : bool, default=False
If True, return output as dict.
.. versionadded:: 0.20
zero_division : {"warn", 0.0, 1.0, np.nan}, default="warn"
Sets the value to return when there is a zero division. If set to
"warn", this acts as 0, but warnings are also raised.
.. versionadded:: 1.3
`np.nan` option was added.
Returns
-------
report : str or dict
Text summary of the precision, recall, F1 score for each class.
Dictionary returned if output_dict is True. Dictionary has the
following structure::
{'label 1': {'precision':0.5,
'recall':1.0,
'f1-score':0.67,
'support':1},
'label 2': { ... },
...
}
The reported averages include macro average (averaging the unweighted
mean per label), weighted average (averaging the support-weighted mean
per label), and sample average (only for multilabel classification).
Micro average (averaging the total true positives, false negatives and
false positives) is only shown for multi-label or multi-class
with a subset of classes, because it corresponds to accuracy
otherwise and would be the same for all metrics.
See also :func:`precision_recall_fscore_support` for more details
on averages.
Note that in binary classification, recall of the positive class
is also known as "sensitivity"; recall of the negative class is
"specificity".
See Also
--------
precision_recall_fscore_support: Compute precision, recall, F-measure and
support for each class.
confusion_matrix: Compute confusion matrix to evaluate the accuracy of a
classification.
multilabel_confusion_matrix: Compute a confusion matrix for each class or sample.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
accuracy 0.60 5
macro avg 0.50 0.56 0.49 5
weighted avg 0.70 0.60 0.61 5
<BLANKLINE>
>>> y_pred = [1, 1, 0]
>>> y_true = [1, 1, 1]
>>> print(classification_report(y_true, y_pred, labels=[1, 2, 3]))
precision recall f1-score support
<BLANKLINE>
1 1.00 0.67 0.80 3
2 0.00 0.00 0.00 0
3 0.00 0.00 0.00 0
<BLANKLINE>
micro avg 1.00 0.67 0.80 3
macro avg 0.33 0.22 0.27 3
weighted avg 1.00 0.67 0.80 3
<BLANKLINE>
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true, input_name='y_true')
type_pred = type_of_target(y_pred, input_name='y_pred')
y_type = {type_true, type_pred}
if y_type == {'binary', 'multiclass'}:
y_type = {'multiclass'}
if len(y_type) > 1:
raise ValueError("Classification metrics can't handle a mix of {0} and {1} targets".format(type_true, type_pred))
y_type = y_type.pop()
if y_type not in ['binary', 'multiclass', 'multilabel-indicator']:
raise ValueError('{0} is not supported'.format(y_type))
if y_type in ['binary', 'multiclass']:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type == 'binary':
try:
unique_values = np.union1d(y_true, y_pred)
except TypeError as e:
raise TypeError(f'Labels in y_true and y_pred should be of the same type. Got y_true={np.unique(y_true)} and y_pred={np.unique(y_pred)}. Make sure that the predictions provided by the classifier coincides with the true labels.') from e
if len(unique_values) > 2:
y_type = 'multiclass'
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
(y_type, y_true, y_pred) = (y_type, y_true, y_pred)
if labels is None:
labels = unique_labels(y_true, y_pred)
labels_given = False
else:
labels = np.asarray(labels)
labels_given = True
micro_is_accuracy = (y_type == 'multiclass' or y_type == 'binary') and (not labels_given or set(labels) == set(unique_labels(y_true, y_pred)))
if target_names is not None and len(labels) != len(target_names):
if labels_given:
warnings.warn('labels size, {0}, does not match size of target_names, {1}'.format(len(labels), len(target_names)))
else:
raise ValueError('Number of classes, {0}, does not match size of target_names, {1}. Try specifying the labels parameter'.format(len(labels), len(target_names)))
if target_names is None:
target_names = ['%s' % l for l in labels]
headers = ['precision', 'recall', 'f1-score', 'support']
zero_division_value = _check_zero_division(zero_division)
labels = _check_set_wise_labels(y_true, y_pred, None, labels, pos_label)
samplewise = None == 'samples'
MCM = multilabel_confusion_matrix(y_true, y_pred, sample_weight=sample_weight, labels=labels, samplewise=samplewise)
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if None == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
beta2 = beta ** 2
precision = _prf_divide(tp_sum, pred_sum, 'precision', 'predicted', None, warn_for, zero_division)
recall = _prf_divide(tp_sum, true_sum, 'recall', 'true', None, warn_for, zero_division)
if zero_division == 'warn' and ('f-score',) == warn_for:
if (pred_sum[true_sum == 0] == 0).any():
_warn_prf(None, 'true nor predicted', 'F-score is', len(true_sum))
if np.isposinf(beta):
f_score = recall
elif beta == 0:
f_score = precision
else:
denom = beta2 * precision + recall
mask = np.isclose(denom, 0) | np.isclose(pred_sum + true_sum, 0)
denom[mask] = 1
f_score = (1 + beta2) * precision * recall / denom
f_score[mask] = zero_division_value
if None == 'weighted':
weights = true_sum
elif None == 'samples':
weights = sample_weight
else:
weights = None
if None is not None:
assert None != 'binary' or len(precision) == 1
precision = _nanaverage(precision, weights=weights)
recall = _nanaverage(recall, weights=weights)
f_score = _nanaverage(f_score, weights=weights)
true_sum = None
(p, r, f1, s) = (precision, recall, f_score, true_sum)
rows = zip(target_names, p, r, f1, s)
if y_type.startswith('multilabel'):
average_options = ('micro', 'macro', 'weighted', 'samples')
else:
average_options = ('micro', 'macro', 'weighted')
if output_dict:
report_dict = {label[0]: label[1:] for label in rows}
for (label, scores) in report_dict.items():
report_dict[label] = dict(zip(headers, [float(i) for i in scores]))
else:
longest_last_line_heading = 'weighted avg'
name_width = max((len(cn) for cn in target_names))
width = max(name_width, len(longest_last_line_heading), digits)
head_fmt = '{:>{width}s} ' + ' {:>9}' * len(headers)
report = head_fmt.format('', *headers, width=width)
report += '\n\n'
row_fmt = '{:>{width}s} ' + ' {:>9.{digits}f}' * 3 + ' {:>9}\n'
for row in rows:
report += row_fmt.format(*row, width=width, digits=digits)
report += '\n'
for average in average_options:
if average.startswith('micro') and micro_is_accuracy:
line_heading = 'accuracy'
else:
line_heading = average + ' avg'
zero_division_value = _check_zero_division(zero_division)
labels = _check_set_wise_labels(y_true, y_pred, average, labels, pos_label)
samplewise = average == 'samples'
MCM = multilabel_confusion_matrix(y_true, y_pred, sample_weight=sample_weight, labels=labels, samplewise=samplewise)
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
beta2 = beta ** 2
precision = _prf_divide(tp_sum, pred_sum, 'precision', 'predicted', average, warn_for, zero_division)
recall = _prf_divide(tp_sum, true_sum, 'recall', 'true', average, warn_for, zero_division)
if zero_division == 'warn' and ('f-score',) == warn_for:
if (pred_sum[true_sum == 0] == 0).any():
_warn_prf(average, 'true nor predicted', 'F-score is', len(true_sum))
if np.isposinf(beta):
f_score = recall
elif beta == 0:
f_score = precision
else:
denom = beta2 * precision + recall
mask = np.isclose(denom, 0) | np.isclose(pred_sum + true_sum, 0)
denom[mask] = 1
f_score = (1 + beta2) * precision * recall / denom
f_score[mask] = zero_division_value
if average == 'weighted':
weights = true_sum
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = _nanaverage(precision, weights=weights)
recall = _nanaverage(recall, weights=weights)
f_score = _nanaverage(f_score, weights=weights)
true_sum = None
(avg_p, avg_r, avg_f1, _) = (precision, recall, f_score, true_sum)
avg = [avg_p, avg_r, avg_f1, np.sum(s)]
if output_dict:
report_dict[line_heading] = dict(zip(headers, [float(i) for i in avg]))
elif line_heading == 'accuracy':
row_fmt_accuracy = '{:>{width}s} ' + ' {:>9.{digits}}' * 2 + ' {:>9.{digits}f}' + ' {:>9}\n'
report += row_fmt_accuracy.format(line_heading, '', '', *avg[2:], width=width, digits=digits)
else:
report += row_fmt.format(line_heading, *avg, width=width, digits=digits)
if output_dict:
if 'accuracy' in report_dict.keys():
report_dict['accuracy'] = report_dict['accuracy']['precision']
return report_dict
else:
return report
|
@validate_params({'y_true': ['array-like', 'sparse matrix'], 'y_pred': ['array-like', 'sparse matrix'], 'labels': ['array-like', None], 'target_names': ['array-like', None], 'sample_weight': ['array-like', None], 'digits': [Interval(Integral, 0, None, closed='left')], 'output_dict': ['boolean'], 'zero_division': [Options(Real, {0.0, 1.0, np.nan}), StrOptions({'warn'})]})
def classification_report(y_true, y_pred, *, labels=None, target_names=None, sample_weight=None, digits=2, output_dict=False, zero_division='warn'):
"""Build a text report showing the main classification metrics.
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array-like of shape (n_labels,), default=None
Optional list of label indices to include in the report.
target_names : array-like of shape (n_labels,), default=None
Optional display names matching the labels (same order).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
digits : int, default=2
Number of digits for formatting output floating point values.
When ``output_dict`` is ``True``, this will be ignored and the
returned values will not be rounded.
output_dict : bool, default=False
If True, return output as dict.
.. versionadded:: 0.20
zero_division : {"warn", 0.0, 1.0, np.nan}, default="warn"
Sets the value to return when there is a zero division. If set to
"warn", this acts as 0, but warnings are also raised.
.. versionadded:: 1.3
`np.nan` option was added.
Returns
-------
report : str or dict
Text summary of the precision, recall, F1 score for each class.
Dictionary returned if output_dict is True. Dictionary has the
following structure::
{'label 1': {'precision':0.5,
'recall':1.0,
'f1-score':0.67,
'support':1},
'label 2': { ... },
...
}
The reported averages include macro average (averaging the unweighted
mean per label), weighted average (averaging the support-weighted mean
per label), and sample average (only for multilabel classification).
Micro average (averaging the total true positives, false negatives and
false positives) is only shown for multi-label or multi-class
with a subset of classes, because it corresponds to accuracy
otherwise and would be the same for all metrics.
See also :func:`precision_recall_fscore_support` for more details
on averages.
Note that in binary classification, recall of the positive class
is also known as "sensitivity"; recall of the negative class is
"specificity".
See Also
--------
precision_recall_fscore_support: Compute precision, recall, F-measure and
support for each class.
confusion_matrix: Compute confusion matrix to evaluate the accuracy of a
classification.
multilabel_confusion_matrix: Compute a confusion matrix for each class or sample.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
accuracy 0.60 5
macro avg 0.50 0.56 0.49 5
weighted avg 0.70 0.60 0.61 5
<BLANKLINE>
>>> y_pred = [1, 1, 0]
>>> y_true = [1, 1, 1]
>>> print(classification_report(y_true, y_pred, labels=[1, 2, 3]))
precision recall f1-score support
<BLANKLINE>
1 1.00 0.67 0.80 3
2 0.00 0.00 0.00 0
3 0.00 0.00 0.00 0
<BLANKLINE>
micro avg 1.00 0.67 0.80 3
macro avg 0.33 0.22 0.27 3
weighted avg 1.00 0.67 0.80 3
<BLANKLINE>
"""
<DeepExtract>
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true, input_name='y_true')
type_pred = type_of_target(y_pred, input_name='y_pred')
y_type = {type_true, type_pred}
if y_type == {'binary', 'multiclass'}:
y_type = {'multiclass'}
if len(y_type) > 1:
raise ValueError("Classification metrics can't handle a mix of {0} and {1} targets".format(type_true, type_pred))
y_type = y_type.pop()
if y_type not in ['binary', 'multiclass', 'multilabel-indicator']:
raise ValueError('{0} is not supported'.format(y_type))
if y_type in ['binary', 'multiclass']:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type == 'binary':
try:
unique_values = np.union1d(y_true, y_pred)
except TypeError as e:
raise TypeError(f'Labels in y_true and y_pred should be of the same type. Got y_true={np.unique(y_true)} and y_pred={np.unique(y_pred)}. Make sure that the predictions provided by the classifier coincides with the true labels.') from e
if len(unique_values) > 2:
y_type = 'multiclass'
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
(y_type, y_true, y_pred) = (y_type, y_true, y_pred)
</DeepExtract>
if labels is None:
labels = unique_labels(y_true, y_pred)
labels_given = False
else:
labels = np.asarray(labels)
labels_given = True
micro_is_accuracy = (y_type == 'multiclass' or y_type == 'binary') and (not labels_given or set(labels) == set(unique_labels(y_true, y_pred)))
if target_names is not None and len(labels) != len(target_names):
if labels_given:
warnings.warn('labels size, {0}, does not match size of target_names, {1}'.format(len(labels), len(target_names)))
else:
raise ValueError('Number of classes, {0}, does not match size of target_names, {1}. Try specifying the labels parameter'.format(len(labels), len(target_names)))
if target_names is None:
target_names = ['%s' % l for l in labels]
headers = ['precision', 'recall', 'f1-score', 'support']
<DeepExtract>
zero_division_value = _check_zero_division(zero_division)
labels = _check_set_wise_labels(y_true, y_pred, None, labels, pos_label)
samplewise = None == 'samples'
MCM = multilabel_confusion_matrix(y_true, y_pred, sample_weight=sample_weight, labels=labels, samplewise=samplewise)
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if None == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
beta2 = beta ** 2
precision = _prf_divide(tp_sum, pred_sum, 'precision', 'predicted', None, warn_for, zero_division)
recall = _prf_divide(tp_sum, true_sum, 'recall', 'true', None, warn_for, zero_division)
if zero_division == 'warn' and ('f-score',) == warn_for:
if (pred_sum[true_sum == 0] == 0).any():
_warn_prf(None, 'true nor predicted', 'F-score is', len(true_sum))
if np.isposinf(beta):
f_score = recall
elif beta == 0:
f_score = precision
else:
denom = beta2 * precision + recall
mask = np.isclose(denom, 0) | np.isclose(pred_sum + true_sum, 0)
denom[mask] = 1
f_score = (1 + beta2) * precision * recall / denom
f_score[mask] = zero_division_value
if None == 'weighted':
weights = true_sum
elif None == 'samples':
weights = sample_weight
else:
weights = None
if None is not None:
assert None != 'binary' or len(precision) == 1
precision = _nanaverage(precision, weights=weights)
recall = _nanaverage(recall, weights=weights)
f_score = _nanaverage(f_score, weights=weights)
true_sum = None
(p, r, f1, s) = (precision, recall, f_score, true_sum)
</DeepExtract>
rows = zip(target_names, p, r, f1, s)
if y_type.startswith('multilabel'):
average_options = ('micro', 'macro', 'weighted', 'samples')
else:
average_options = ('micro', 'macro', 'weighted')
if output_dict:
report_dict = {label[0]: label[1:] for label in rows}
for (label, scores) in report_dict.items():
report_dict[label] = dict(zip(headers, [float(i) for i in scores]))
else:
longest_last_line_heading = 'weighted avg'
name_width = max((len(cn) for cn in target_names))
width = max(name_width, len(longest_last_line_heading), digits)
head_fmt = '{:>{width}s} ' + ' {:>9}' * len(headers)
report = head_fmt.format('', *headers, width=width)
report += '\n\n'
row_fmt = '{:>{width}s} ' + ' {:>9.{digits}f}' * 3 + ' {:>9}\n'
for row in rows:
report += row_fmt.format(*row, width=width, digits=digits)
report += '\n'
for average in average_options:
if average.startswith('micro') and micro_is_accuracy:
line_heading = 'accuracy'
else:
line_heading = average + ' avg'
<DeepExtract>
zero_division_value = _check_zero_division(zero_division)
labels = _check_set_wise_labels(y_true, y_pred, average, labels, pos_label)
samplewise = average == 'samples'
MCM = multilabel_confusion_matrix(y_true, y_pred, sample_weight=sample_weight, labels=labels, samplewise=samplewise)
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
beta2 = beta ** 2
precision = _prf_divide(tp_sum, pred_sum, 'precision', 'predicted', average, warn_for, zero_division)
recall = _prf_divide(tp_sum, true_sum, 'recall', 'true', average, warn_for, zero_division)
if zero_division == 'warn' and ('f-score',) == warn_for:
if (pred_sum[true_sum == 0] == 0).any():
_warn_prf(average, 'true nor predicted', 'F-score is', len(true_sum))
if np.isposinf(beta):
f_score = recall
elif beta == 0:
f_score = precision
else:
denom = beta2 * precision + recall
mask = np.isclose(denom, 0) | np.isclose(pred_sum + true_sum, 0)
denom[mask] = 1
f_score = (1 + beta2) * precision * recall / denom
f_score[mask] = zero_division_value
if average == 'weighted':
weights = true_sum
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = _nanaverage(precision, weights=weights)
recall = _nanaverage(recall, weights=weights)
f_score = _nanaverage(f_score, weights=weights)
true_sum = None
(avg_p, avg_r, avg_f1, _) = (precision, recall, f_score, true_sum)
</DeepExtract>
avg = [avg_p, avg_r, avg_f1, np.sum(s)]
if output_dict:
report_dict[line_heading] = dict(zip(headers, [float(i) for i in avg]))
elif line_heading == 'accuracy':
row_fmt_accuracy = '{:>{width}s} ' + ' {:>9.{digits}}' * 2 + ' {:>9.{digits}f}' + ' {:>9}\n'
report += row_fmt_accuracy.format(line_heading, '', '', *avg[2:], width=width, digits=digits)
else:
report += row_fmt.format(line_heading, *avg, width=width, digits=digits)
if output_dict:
if 'accuracy' in report_dict.keys():
report_dict['accuracy'] = report_dict['accuracy']['precision']
return report_dict
else:
return report
|
def fit(self, X, y, sample_weight=None):
"""Fit Ridge classifier with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples
and `n_features` is the number of features. When using GCV,
will be cast to float64 if necessary.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), 'eigen')
(X, y) = self._validate_data(X, y, accept_sparse=accept_sparse, multi_output=True, y_numeric=False)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if self.class_weight:
sample_weight = sample_weight * compute_sample_weight(self.class_weight, y)
(X, y, sample_weight, Y) = (X, y, sample_weight, Y)
target = Y if self.cv is None else y
super().fit(X, target, sample_weight=sample_weight)
return self
|
def fit(self, X, y, sample_weight=None):
"""Fit Ridge classifier with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples
and `n_features` is the number of features. When using GCV,
will be cast to float64 if necessary.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
<DeepExtract>
accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), 'eigen')
(X, y) = self._validate_data(X, y, accept_sparse=accept_sparse, multi_output=True, y_numeric=False)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if self.class_weight:
sample_weight = sample_weight * compute_sample_weight(self.class_weight, y)
(X, y, sample_weight, Y) = (X, y, sample_weight, Y)
</DeepExtract>
target = Y if self.cv is None else y
super().fit(X, target, sample_weight=sample_weight)
return self
|
@skip_if_32bit
def test_incremental_variance_numerical_stability():
def np_var(A):
return A.var(axis=0)
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean) ** 2, axis=0)
def naive_mean_variance_update(x, last_mean, last_variance, last_sample_count):
updated_sample_count = last_sample_count + 1
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + (x - last_mean) * (x - updated_mean) / updated_sample_count
return (updated_mean, updated_variance, updated_sample_count)
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(100000000.0, dtype=np.float64)
x2 = np.log(1e-05, dtype=np.float64)
A0 = np.full((n_samples // 2, n_features), x1, dtype=np.float64)
A1 = np.full((n_samples // 2, n_features), x2, dtype=np.float64)
A = np.vstack((A0, A1))
assert np.abs(np_var(A) - one_pass_var(A)).max() > tol
(mean, var, n) = (A0[0, :], np.zeros(n_features), n_samples // 2)
for i in range(A1.shape[0]):
updated_sample_count = n + 1
samples_ratio = n / float(updated_sample_count)
updated_mean = A1[i, :] / updated_sample_count + mean * samples_ratio
updated_variance = var * samples_ratio + (A1[i, :] - mean) * (A1[i, :] - updated_mean) / updated_sample_count
(mean, var, n) = (updated_mean, updated_variance, updated_sample_count)
assert n == A.shape[0]
assert np.abs(A.mean(axis=0) - mean).max() > 1e-06
assert np.abs(np_var(A) - var).max() > tol
(mean, var) = (A0[0, :], np.zeros(n_features))
n = np.full(n_features, n_samples // 2, dtype=np.int32)
for i in range(A1.shape[0]):
(mean, var, n) = _incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])), mean, var, n)
assert_array_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert tol > np.abs(np_var(A) - var).max()
|
@skip_if_32bit
def test_incremental_variance_numerical_stability():
def np_var(A):
return A.var(axis=0)
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean) ** 2, axis=0)
def naive_mean_variance_update(x, last_mean, last_variance, last_sample_count):
updated_sample_count = last_sample_count + 1
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + (x - last_mean) * (x - updated_mean) / updated_sample_count
return (updated_mean, updated_variance, updated_sample_count)
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(100000000.0, dtype=np.float64)
x2 = np.log(1e-05, dtype=np.float64)
A0 = np.full((n_samples // 2, n_features), x1, dtype=np.float64)
A1 = np.full((n_samples // 2, n_features), x2, dtype=np.float64)
A = np.vstack((A0, A1))
assert np.abs(np_var(A) - one_pass_var(A)).max() > tol
(mean, var, n) = (A0[0, :], np.zeros(n_features), n_samples // 2)
for i in range(A1.shape[0]):
<DeepExtract>
updated_sample_count = n + 1
samples_ratio = n / float(updated_sample_count)
updated_mean = A1[i, :] / updated_sample_count + mean * samples_ratio
updated_variance = var * samples_ratio + (A1[i, :] - mean) * (A1[i, :] - updated_mean) / updated_sample_count
(mean, var, n) = (updated_mean, updated_variance, updated_sample_count)
</DeepExtract>
assert n == A.shape[0]
assert np.abs(A.mean(axis=0) - mean).max() > 1e-06
assert np.abs(np_var(A) - var).max() > tol
(mean, var) = (A0[0, :], np.zeros(n_features))
n = np.full(n_features, n_samples // 2, dtype=np.int32)
for i in range(A1.shape[0]):
(mean, var, n) = _incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])), mean, var, n)
assert_array_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert tol > np.abs(np_var(A) - var).max()
|
def test_classifier_chain_fit_and_predict_with_linear_svc():
(X, y) = make_classification(n_samples=1000, n_features=100, n_classes=16, n_informative=10, random_state=0)
Y_multi = np.array([[int(yyy) for yyy in format(yy, '#06b')[2:]] for yy in y])
(X, Y) = (X, Y_multi)
classifier_chain = ClassifierChain(LinearSVC())
classifier_chain.fit(X, Y)
Y_pred = classifier_chain.predict(X)
assert Y_pred.shape == Y.shape
Y_decision = classifier_chain.decision_function(X)
Y_binary = Y_decision >= 0
assert_array_equal(Y_binary, Y_pred)
assert not hasattr(classifier_chain, 'predict_proba')
|
def test_classifier_chain_fit_and_predict_with_linear_svc():
<DeepExtract>
(X, y) = make_classification(n_samples=1000, n_features=100, n_classes=16, n_informative=10, random_state=0)
Y_multi = np.array([[int(yyy) for yyy in format(yy, '#06b')[2:]] for yy in y])
(X, Y) = (X, Y_multi)
</DeepExtract>
classifier_chain = ClassifierChain(LinearSVC())
classifier_chain.fit(X, Y)
Y_pred = classifier_chain.predict(X)
assert Y_pred.shape == Y.shape
Y_decision = classifier_chain.decision_function(X)
Y_binary = Y_decision >= 0
assert_array_equal(Y_binary, Y_pred)
assert not hasattr(classifier_chain, 'predict_proba')
|
def fit(self, X, y=None, *, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples, n_output) or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of str -> object
Parameters passed to the `fit` method of the estimator.
If a fit parameter is an array-like whose length is equal to
`num_samples` then it will be split across CV groups along with `X`
and `y`. For example, the :term:`sample_weight` parameter is split
because `len(sample_weights) = len(X)`.
Returns
-------
self : object
Instance of fitted estimator.
"""
self._validate_params()
estimator = self.estimator
refit_metric = 'score'
if callable(self.scoring):
scorers = self.scoring
elif self.scoring is None or isinstance(self.scoring, str):
scorers = check_scoring(self.estimator, self.scoring)
else:
scorers = _check_multimetric_scoring(self.estimator, self.scoring)
multimetric_refit_msg = f'For multi-metric scoring, the parameter refit must be set to a scorer key or a callable to refit an estimator with the best parameter setting on the whole data and make the best_* attributes available for that metric. If this is not needed, refit should be set to False explicitly. {self.refit!r} was passed.'
valid_refit_dict = isinstance(self.refit, str) and self.refit in scorers
if self.refit is not False and (not valid_refit_dict) and (not callable(self.refit)):
raise ValueError(multimetric_refit_msg)
refit_metric = self.refit
(X, y, groups) = indexable(X, y, groups)
fit_params = _check_fit_params(X, fit_params)
cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator))
n_splits = cv_orig.get_n_splits(X, y, groups)
base_estimator = clone(self.estimator)
parallel = Parallel(n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(scorer=scorers, fit_params=fit_params, return_train_score=self.return_train_score, return_n_test_samples=True, return_times=True, return_parameters=False, error_score=self.error_score, verbose=self.verbose)
results = {}
with parallel:
all_candidate_params = []
all_out = []
all_more_results = defaultdict(list)
def evaluate_candidates(candidate_params, cv=None, more_results=None):
cv = cv or cv_orig
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print('Fitting {0} folds for each of {1} candidates, totalling {2} fits'.format(n_splits, n_candidates, n_candidates * n_splits))
out = parallel((delayed(_fit_and_score)(clone(base_estimator), X, y, train=train, test=test, parameters=parameters, split_progress=(split_idx, n_splits), candidate_progress=(cand_idx, n_candidates), **fit_and_score_kwargs) for ((cand_idx, parameters), (split_idx, (train, test))) in product(enumerate(candidate_params), enumerate(cv.split(X, y, groups)))))
if len(out) < 1:
raise ValueError('No fits were performed. Was the CV iterator empty? Were there no candidates?')
elif len(out) != n_candidates * n_splits:
raise ValueError('cv.split and cv.get_n_splits returned inconsistent results. Expected {} splits, got {}'.format(n_splits, len(out) // n_candidates))
_warn_or_raise_about_fit_failures(out, self.error_score)
if callable(self.scoring):
_insert_error_scores(out, self.error_score)
all_candidate_params.extend(candidate_params)
all_out.extend(out)
if more_results is not None:
for (key, value) in more_results.items():
all_more_results[key].extend(value)
nonlocal results
n_candidates = len(all_candidate_params)
all_out = _aggregate_score_dicts(all_out)
results = dict(all_more_results or {})
for (key, val) in results.items():
results[key] = np.asarray(val)
def _store(key_name, array, weights=None, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits)
if splits:
for split_idx in range(n_splits):
results['split%d_%s' % (split_idx, key_name)] = array[:, split_idx]
array_means = np.average(array, axis=1, weights=weights)
results['mean_%s' % key_name] = array_means
if key_name.startswith(('train_', 'test_')) and np.any(~np.isfinite(array_means)):
warnings.warn(f"One or more of the {key_name.split('_')[0]} scores are non-finite: {array_means}", category=UserWarning)
array_stds = np.sqrt(np.average((array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights))
results['std_%s' % key_name] = array_stds
if rank:
if np.isnan(array_means).all():
rank_result = np.ones_like(array_means, dtype=np.int32)
else:
min_array_means = np.nanmin(array_means) - 1
array_means = np.nan_to_num(array_means, nan=min_array_means)
rank_result = rankdata(-array_means, method='min').astype(np.int32, copy=False)
results['rank_%s' % key_name] = rank_result
_store('fit_time', all_out['fit_time'])
_store('score_time', all_out['score_time'])
param_results = defaultdict(partial(MaskedArray, np.empty(n_candidates), mask=True, dtype=object))
for (cand_idx, params) in enumerate(all_candidate_params):
for (name, value) in params.items():
param_results['param_%s' % name][cand_idx] = value
results.update(param_results)
results['params'] = all_candidate_params
test_scores_dict = _normalize_score_results(all_out['test_scores'])
if self.return_train_score:
train_scores_dict = _normalize_score_results(all_out['train_scores'])
for scorer_name in test_scores_dict:
_store('test_%s' % scorer_name, test_scores_dict[scorer_name], splits=True, rank=True, weights=None)
if self.return_train_score:
_store('train_%s' % scorer_name, train_scores_dict[scorer_name], splits=True)
results = results
return results
raise NotImplementedError('_run_search not implemented.')
first_test_score = all_out[0]['test_scores']
self.multimetric_ = isinstance(first_test_score, dict)
if callable(self.scoring) and self.multimetric_:
multimetric_refit_msg = f'For multi-metric scoring, the parameter refit must be set to a scorer key or a callable to refit an estimator with the best parameter setting on the whole data and make the best_* attributes available for that metric. If this is not needed, refit should be set to False explicitly. {self.refit!r} was passed.'
valid_refit_dict = isinstance(self.refit, str) and self.refit in first_test_score
if self.refit is not False and (not valid_refit_dict) and (not callable(self.refit)):
raise ValueError(multimetric_refit_msg)
refit_metric = self.refit
if self.refit or not self.multimetric_:
if callable(self.refit):
best_index = self.refit(results)
if not isinstance(best_index, numbers.Integral):
raise TypeError('best_index_ returned is not an integer')
if best_index < 0 or best_index >= len(results['params']):
raise IndexError('best_index_ index out of range')
else:
best_index = results[f'rank_test_{refit_metric}'].argmin()
self.best_index_ = best_index
if not callable(self.refit):
self.best_score_ = results[f'mean_test_{refit_metric}'][self.best_index_]
self.best_params_ = results['params'][self.best_index_]
if self.refit:
self.best_estimator_ = clone(clone(base_estimator).set_params(**self.best_params_))
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
if hasattr(self.best_estimator_, 'feature_names_in_'):
self.feature_names_in_ = self.best_estimator_.feature_names_in_
self.scorer_ = scorers
self.cv_results_ = results
self.n_splits_ = n_splits
return self
|
def fit(self, X, y=None, *, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples, n_output) or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of str -> object
Parameters passed to the `fit` method of the estimator.
If a fit parameter is an array-like whose length is equal to
`num_samples` then it will be split across CV groups along with `X`
and `y`. For example, the :term:`sample_weight` parameter is split
because `len(sample_weights) = len(X)`.
Returns
-------
self : object
Instance of fitted estimator.
"""
self._validate_params()
estimator = self.estimator
refit_metric = 'score'
if callable(self.scoring):
scorers = self.scoring
elif self.scoring is None or isinstance(self.scoring, str):
scorers = check_scoring(self.estimator, self.scoring)
else:
scorers = _check_multimetric_scoring(self.estimator, self.scoring)
<DeepExtract>
multimetric_refit_msg = f'For multi-metric scoring, the parameter refit must be set to a scorer key or a callable to refit an estimator with the best parameter setting on the whole data and make the best_* attributes available for that metric. If this is not needed, refit should be set to False explicitly. {self.refit!r} was passed.'
valid_refit_dict = isinstance(self.refit, str) and self.refit in scorers
if self.refit is not False and (not valid_refit_dict) and (not callable(self.refit)):
raise ValueError(multimetric_refit_msg)
</DeepExtract>
refit_metric = self.refit
(X, y, groups) = indexable(X, y, groups)
fit_params = _check_fit_params(X, fit_params)
cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator))
n_splits = cv_orig.get_n_splits(X, y, groups)
base_estimator = clone(self.estimator)
parallel = Parallel(n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(scorer=scorers, fit_params=fit_params, return_train_score=self.return_train_score, return_n_test_samples=True, return_times=True, return_parameters=False, error_score=self.error_score, verbose=self.verbose)
results = {}
with parallel:
all_candidate_params = []
all_out = []
all_more_results = defaultdict(list)
def evaluate_candidates(candidate_params, cv=None, more_results=None):
cv = cv or cv_orig
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print('Fitting {0} folds for each of {1} candidates, totalling {2} fits'.format(n_splits, n_candidates, n_candidates * n_splits))
out = parallel((delayed(_fit_and_score)(clone(base_estimator), X, y, train=train, test=test, parameters=parameters, split_progress=(split_idx, n_splits), candidate_progress=(cand_idx, n_candidates), **fit_and_score_kwargs) for ((cand_idx, parameters), (split_idx, (train, test))) in product(enumerate(candidate_params), enumerate(cv.split(X, y, groups)))))
if len(out) < 1:
raise ValueError('No fits were performed. Was the CV iterator empty? Were there no candidates?')
elif len(out) != n_candidates * n_splits:
raise ValueError('cv.split and cv.get_n_splits returned inconsistent results. Expected {} splits, got {}'.format(n_splits, len(out) // n_candidates))
_warn_or_raise_about_fit_failures(out, self.error_score)
if callable(self.scoring):
_insert_error_scores(out, self.error_score)
all_candidate_params.extend(candidate_params)
all_out.extend(out)
if more_results is not None:
for (key, value) in more_results.items():
all_more_results[key].extend(value)
nonlocal results
<DeepExtract>
n_candidates = len(all_candidate_params)
all_out = _aggregate_score_dicts(all_out)
results = dict(all_more_results or {})
for (key, val) in results.items():
results[key] = np.asarray(val)
def _store(key_name, array, weights=None, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits)
if splits:
for split_idx in range(n_splits):
results['split%d_%s' % (split_idx, key_name)] = array[:, split_idx]
array_means = np.average(array, axis=1, weights=weights)
results['mean_%s' % key_name] = array_means
if key_name.startswith(('train_', 'test_')) and np.any(~np.isfinite(array_means)):
warnings.warn(f"One or more of the {key_name.split('_')[0]} scores are non-finite: {array_means}", category=UserWarning)
array_stds = np.sqrt(np.average((array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights))
results['std_%s' % key_name] = array_stds
if rank:
if np.isnan(array_means).all():
rank_result = np.ones_like(array_means, dtype=np.int32)
else:
min_array_means = np.nanmin(array_means) - 1
array_means = np.nan_to_num(array_means, nan=min_array_means)
rank_result = rankdata(-array_means, method='min').astype(np.int32, copy=False)
results['rank_%s' % key_name] = rank_result
_store('fit_time', all_out['fit_time'])
_store('score_time', all_out['score_time'])
param_results = defaultdict(partial(MaskedArray, np.empty(n_candidates), mask=True, dtype=object))
for (cand_idx, params) in enumerate(all_candidate_params):
for (name, value) in params.items():
param_results['param_%s' % name][cand_idx] = value
results.update(param_results)
results['params'] = all_candidate_params
test_scores_dict = _normalize_score_results(all_out['test_scores'])
if self.return_train_score:
train_scores_dict = _normalize_score_results(all_out['train_scores'])
for scorer_name in test_scores_dict:
_store('test_%s' % scorer_name, test_scores_dict[scorer_name], splits=True, rank=True, weights=None)
if self.return_train_score:
_store('train_%s' % scorer_name, train_scores_dict[scorer_name], splits=True)
results = results
</DeepExtract>
return results
<DeepExtract>
raise NotImplementedError('_run_search not implemented.')
</DeepExtract>
first_test_score = all_out[0]['test_scores']
self.multimetric_ = isinstance(first_test_score, dict)
if callable(self.scoring) and self.multimetric_:
<DeepExtract>
multimetric_refit_msg = f'For multi-metric scoring, the parameter refit must be set to a scorer key or a callable to refit an estimator with the best parameter setting on the whole data and make the best_* attributes available for that metric. If this is not needed, refit should be set to False explicitly. {self.refit!r} was passed.'
valid_refit_dict = isinstance(self.refit, str) and self.refit in first_test_score
if self.refit is not False and (not valid_refit_dict) and (not callable(self.refit)):
raise ValueError(multimetric_refit_msg)
</DeepExtract>
refit_metric = self.refit
if self.refit or not self.multimetric_:
<DeepExtract>
if callable(self.refit):
best_index = self.refit(results)
if not isinstance(best_index, numbers.Integral):
raise TypeError('best_index_ returned is not an integer')
if best_index < 0 or best_index >= len(results['params']):
raise IndexError('best_index_ index out of range')
else:
best_index = results[f'rank_test_{refit_metric}'].argmin()
self.best_index_ = best_index
</DeepExtract>
if not callable(self.refit):
self.best_score_ = results[f'mean_test_{refit_metric}'][self.best_index_]
self.best_params_ = results['params'][self.best_index_]
if self.refit:
self.best_estimator_ = clone(clone(base_estimator).set_params(**self.best_params_))
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
if hasattr(self.best_estimator_, 'feature_names_in_'):
self.feature_names_in_ = self.best_estimator_.feature_names_in_
self.scorer_ = scorers
self.cv_results_ = results
self.n_splits_ = n_splits
return self
|
def _run_search(self, evaluate_candidates):
pass
if self.resource != 'n_samples' and any((self.resource in candidate for candidate in candidate_params)):
raise ValueError(f'Cannot use parameter {self.resource} as the resource since it is part of the searched parameters.')
n_required_iterations = 1 + floor(log(len(candidate_params), self.factor))
if self.min_resources == 'exhaust':
last_iteration = n_required_iterations - 1
self.min_resources_ = max(self.min_resources_, self.max_resources_ // self.factor ** last_iteration)
n_possible_iterations = 1 + floor(log(self.max_resources_ // self.min_resources_, self.factor))
if self.aggressive_elimination:
n_iterations = n_required_iterations
else:
n_iterations = min(n_possible_iterations, n_required_iterations)
if self.verbose:
print(f'n_iterations: {n_iterations}')
print(f'n_required_iterations: {n_required_iterations}')
print(f'n_possible_iterations: {n_possible_iterations}')
print(f'min_resources_: {self.min_resources_}')
print(f'max_resources_: {self.max_resources_}')
print(f'aggressive_elimination: {self.aggressive_elimination}')
print(f'factor: {self.factor}')
self.n_resources_ = []
self.n_candidates_ = []
for itr in range(n_iterations):
power = itr
if self.aggressive_elimination:
power = max(0, itr - n_required_iterations + n_possible_iterations)
n_resources = int(self.factor ** power * self.min_resources_)
n_resources = min(n_resources, self.max_resources_)
self.n_resources_.append(n_resources)
n_candidates = len(candidate_params)
self.n_candidates_.append(n_candidates)
if self.verbose:
print('-' * 10)
print(f'iter: {itr}')
print(f'n_candidates: {n_candidates}')
print(f'n_resources: {n_resources}')
if self.resource == 'n_samples':
cv = _SubsampleMetaSplitter(base_cv=self._checked_cv_orig, fraction=n_resources / self._n_samples_orig, subsample_test=True, random_state=self.random_state)
else:
candidate_params = [c.copy() for c in candidate_params]
for candidate in candidate_params:
candidate[self.resource] = n_resources
cv = self._checked_cv_orig
more_results = {'iter': [itr] * n_candidates, 'n_resources': [n_resources] * n_candidates}
results = evaluate_candidates(candidate_params, cv, more_results=more_results)
n_candidates_to_keep = ceil(n_candidates / self.factor)
(iteration, mean_test_score, params) = (np.asarray(a) for a in (results['iter'], results['mean_test_score'], results['params']))
iter_indices = np.flatnonzero(iteration == itr)
scores = mean_test_score[iter_indices]
sorted_indices = np.roll(np.argsort(scores), np.count_nonzero(np.isnan(scores)))
candidate_params = np.array(params[iter_indices][sorted_indices[-n_candidates_to_keep:]])
self.n_remaining_candidates_ = len(candidate_params)
self.n_required_iterations_ = n_required_iterations
self.n_possible_iterations_ = n_possible_iterations
self.n_iterations_ = n_iterations
|
def _run_search(self, evaluate_candidates):
<DeepExtract>
pass
</DeepExtract>
if self.resource != 'n_samples' and any((self.resource in candidate for candidate in candidate_params)):
raise ValueError(f'Cannot use parameter {self.resource} as the resource since it is part of the searched parameters.')
n_required_iterations = 1 + floor(log(len(candidate_params), self.factor))
if self.min_resources == 'exhaust':
last_iteration = n_required_iterations - 1
self.min_resources_ = max(self.min_resources_, self.max_resources_ // self.factor ** last_iteration)
n_possible_iterations = 1 + floor(log(self.max_resources_ // self.min_resources_, self.factor))
if self.aggressive_elimination:
n_iterations = n_required_iterations
else:
n_iterations = min(n_possible_iterations, n_required_iterations)
if self.verbose:
print(f'n_iterations: {n_iterations}')
print(f'n_required_iterations: {n_required_iterations}')
print(f'n_possible_iterations: {n_possible_iterations}')
print(f'min_resources_: {self.min_resources_}')
print(f'max_resources_: {self.max_resources_}')
print(f'aggressive_elimination: {self.aggressive_elimination}')
print(f'factor: {self.factor}')
self.n_resources_ = []
self.n_candidates_ = []
for itr in range(n_iterations):
power = itr
if self.aggressive_elimination:
power = max(0, itr - n_required_iterations + n_possible_iterations)
n_resources = int(self.factor ** power * self.min_resources_)
n_resources = min(n_resources, self.max_resources_)
self.n_resources_.append(n_resources)
n_candidates = len(candidate_params)
self.n_candidates_.append(n_candidates)
if self.verbose:
print('-' * 10)
print(f'iter: {itr}')
print(f'n_candidates: {n_candidates}')
print(f'n_resources: {n_resources}')
if self.resource == 'n_samples':
cv = _SubsampleMetaSplitter(base_cv=self._checked_cv_orig, fraction=n_resources / self._n_samples_orig, subsample_test=True, random_state=self.random_state)
else:
candidate_params = [c.copy() for c in candidate_params]
for candidate in candidate_params:
candidate[self.resource] = n_resources
cv = self._checked_cv_orig
more_results = {'iter': [itr] * n_candidates, 'n_resources': [n_resources] * n_candidates}
results = evaluate_candidates(candidate_params, cv, more_results=more_results)
n_candidates_to_keep = ceil(n_candidates / self.factor)
<DeepExtract>
(iteration, mean_test_score, params) = (np.asarray(a) for a in (results['iter'], results['mean_test_score'], results['params']))
iter_indices = np.flatnonzero(iteration == itr)
scores = mean_test_score[iter_indices]
sorted_indices = np.roll(np.argsort(scores), np.count_nonzero(np.isnan(scores)))
candidate_params = np.array(params[iter_indices][sorted_indices[-n_candidates_to_keep:]])
</DeepExtract>
self.n_remaining_candidates_ = len(candidate_params)
self.n_required_iterations_ = n_required_iterations
self.n_possible_iterations_ = n_possible_iterations
self.n_iterations_ = n_iterations
|
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array-like of shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
nk = np.exp(log_resp).sum(axis=0) + 10 * np.finfo(np.exp(log_resp).dtype).eps
means = np.dot(np.exp(log_resp).T, X) / nk[:, np.newaxis]
covariances = {'full': _estimate_gaussian_covariances_full, 'tied': _estimate_gaussian_covariances_tied, 'diag': _estimate_gaussian_covariances_diag, 'spherical': _estimate_gaussian_covariances_spherical}[self.covariance_type](np.exp(log_resp), X, nk, means, self.reg_covar)
(self.weights_, self.means_, self.covariances_) = (nk, means, covariances)
self.weights_ /= self.weights_.sum()
estimate_precision_error_message = 'Fitting the mixture model failed because some components have ill-defined empirical covariance (for instance caused by singleton or collapsed samples). Try to decrease the number of components, or increase reg_covar.'
if self.covariance_type == 'full':
(n_components, n_features, _) = self.covariances_.shape
precisions_chol = np.empty((n_components, n_features, n_features))
for (k, covariance) in enumerate(self.covariances_):
try:
cov_chol = linalg.cholesky(covariance, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[k] = linalg.solve_triangular(cov_chol, np.eye(n_features), lower=True).T
elif self.covariance_type == 'tied':
(_, n_features) = self.covariances_.shape
try:
cov_chol = linalg.cholesky(self.covariances_, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol = linalg.solve_triangular(cov_chol, np.eye(n_features), lower=True).T
else:
if np.any(np.less_equal(self.covariances_, 0.0)):
raise ValueError(estimate_precision_error_message)
precisions_chol = 1.0 / np.sqrt(self.covariances_)
self.precisions_cholesky_ = precisions_chol
</DeepExtract>
|
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array-like of shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
<DeepExtract>
nk = np.exp(log_resp).sum(axis=0) + 10 * np.finfo(np.exp(log_resp).dtype).eps
means = np.dot(np.exp(log_resp).T, X) / nk[:, np.newaxis]
covariances = {'full': _estimate_gaussian_covariances_full, 'tied': _estimate_gaussian_covariances_tied, 'diag': _estimate_gaussian_covariances_diag, 'spherical': _estimate_gaussian_covariances_spherical}[self.covariance_type](np.exp(log_resp), X, nk, means, self.reg_covar)
(self.weights_, self.means_, self.covariances_) = (nk, means, covariances)
</DeepExtract>
self.weights_ /= self.weights_.sum()
<DeepExtract>
estimate_precision_error_message = 'Fitting the mixture model failed because some components have ill-defined empirical covariance (for instance caused by singleton or collapsed samples). Try to decrease the number of components, or increase reg_covar.'
if self.covariance_type == 'full':
(n_components, n_features, _) = self.covariances_.shape
precisions_chol = np.empty((n_components, n_features, n_features))
for (k, covariance) in enumerate(self.covariances_):
try:
cov_chol = linalg.cholesky(covariance, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[k] = linalg.solve_triangular(cov_chol, np.eye(n_features), lower=True).T
elif self.covariance_type == 'tied':
(_, n_features) = self.covariances_.shape
try:
cov_chol = linalg.cholesky(self.covariances_, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol = linalg.solve_triangular(cov_chol, np.eye(n_features), lower=True).T
else:
if np.any(np.less_equal(self.covariances_, 0.0)):
raise ValueError(estimate_precision_error_message)
precisions_chol = 1.0 / np.sqrt(self.covariances_)
self.precisions_cholesky_ = precisions_chol
</DeepExtract>
|
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_sgd_averaged_computed_correctly(klass):
eta = 0.001
alpha = 0.01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
clf = klass(loss='squared_error', learning_rate='constant', eta0=eta, alpha=alpha, fit_intercept=True, max_iter=1, average=True, shuffle=False)
clf.fit(X, y)
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
if klass in (SparseSGDClassifier, SparseSGDRegressor):
decay = 0.01
for (i, entry) in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - eta * alpha
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
(average_weights, average_intercept) = (average_weights, average_intercept)
assert_array_almost_equal(clf.coef_, average_weights, decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
|
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_sgd_averaged_computed_correctly(klass):
eta = 0.001
alpha = 0.01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
clf = klass(loss='squared_error', learning_rate='constant', eta0=eta, alpha=alpha, fit_intercept=True, max_iter=1, average=True, shuffle=False)
clf.fit(X, y)
<DeepExtract>
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
if klass in (SparseSGDClassifier, SparseSGDRegressor):
decay = 0.01
for (i, entry) in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - eta * alpha
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
(average_weights, average_intercept) = (average_weights, average_intercept)
</DeepExtract>
assert_array_almost_equal(clf.coef_, average_weights, decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
|
def _fetch_brute_kddcup99(data_home=None, download_if_missing=True, percent10=True):
"""Load the kddcup99 dataset, downloading it if necessary.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
percent10 : bool, default=True
Whether to load only 10 percent of the data.
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray of shape (494021, 41)
Each row corresponds to the 41 features in the dataset.
target : ndarray of shape (494021,)
Each value corresponds to one of the 21 attack types or to the
label 'normal.'.
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
DESCR : str
Description of the kddcup99 dataset.
"""
data_home = get_data_home(data_home=data_home)
dir_suffix = '-py3'
if percent10:
kddcup_dir = join(data_home, 'kddcup99_10' + dir_suffix)
archive = ARCHIVE_10_PERCENT
else:
kddcup_dir = join(data_home, 'kddcup99' + dir_suffix)
archive = ARCHIVE
samples_path = join(kddcup_dir, 'samples')
targets_path = join(kddcup_dir, 'targets')
available = exists(samples_path)
dt = [('duration', int), ('protocol_type', 'S4'), ('service', 'S11'), ('flag', 'S6'), ('src_bytes', int), ('dst_bytes', int), ('land', int), ('wrong_fragment', int), ('urgent', int), ('hot', int), ('num_failed_logins', int), ('logged_in', int), ('num_compromised', int), ('root_shell', int), ('su_attempted', int), ('num_root', int), ('num_file_creations', int), ('num_shells', int), ('num_access_files', int), ('num_outbound_cmds', int), ('is_host_login', int), ('is_guest_login', int), ('count', int), ('srv_count', int), ('serror_rate', float), ('srv_serror_rate', float), ('rerror_rate', float), ('srv_rerror_rate', float), ('same_srv_rate', float), ('diff_srv_rate', float), ('srv_diff_host_rate', float), ('dst_host_count', int), ('dst_host_srv_count', int), ('dst_host_same_srv_rate', float), ('dst_host_diff_srv_rate', float), ('dst_host_same_src_port_rate', float), ('dst_host_srv_diff_host_rate', float), ('dst_host_serror_rate', float), ('dst_host_srv_serror_rate', float), ('dst_host_rerror_rate', float), ('dst_host_srv_rerror_rate', float), ('labels', 'S16')]
column_names = [c[0] for c in dt]
target_names = column_names[-1]
feature_names = column_names[:-1]
if available:
try:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
except Exception as e:
raise IOError(f'The cache for fetch_kddcup99 is invalid, please delete {str(kddcup_dir)} and run the fetch_kddcup99 again') from e
elif download_if_missing:
try:
os.makedirs(kddcup_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
logger.info('Downloading %s' % archive.url)
_fetch_remote(archive, dirname=kddcup_dir)
DT = np.dtype(dt)
logger.debug('extracting archive')
archive_path = join(kddcup_dir, archive.filename)
file_ = GzipFile(filename=archive_path, mode='r')
Xy = []
for line in file_.readlines():
line = line.decode()
Xy.append(line.replace('\n', '').split(','))
file_.close()
logger.debug('extraction done')
os.remove(archive_path)
Xy = np.asarray(Xy, dtype=object)
for j in range(42):
Xy[:, j] = Xy[:, j].astype(DT[j])
X = Xy[:, :-1]
y = Xy[:, -1]
joblib.dump(X, samples_path, compress=0)
joblib.dump(y, targets_path, compress=0)
else:
raise IOError('Data not found and `download_if_missing` is False')
return Bunch(data=X, target=y, feature_names=feature_names, target_names=[target_names])
|
def _fetch_brute_kddcup99(data_home=None, download_if_missing=True, percent10=True):
"""Load the kddcup99 dataset, downloading it if necessary.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
percent10 : bool, default=True
Whether to load only 10 percent of the data.
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray of shape (494021, 41)
Each row corresponds to the 41 features in the dataset.
target : ndarray of shape (494021,)
Each value corresponds to one of the 21 attack types or to the
label 'normal.'.
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
DESCR : str
Description of the kddcup99 dataset.
"""
data_home = get_data_home(data_home=data_home)
dir_suffix = '-py3'
if percent10:
kddcup_dir = join(data_home, 'kddcup99_10' + dir_suffix)
archive = ARCHIVE_10_PERCENT
else:
kddcup_dir = join(data_home, 'kddcup99' + dir_suffix)
archive = ARCHIVE
samples_path = join(kddcup_dir, 'samples')
targets_path = join(kddcup_dir, 'targets')
available = exists(samples_path)
dt = [('duration', int), ('protocol_type', 'S4'), ('service', 'S11'), ('flag', 'S6'), ('src_bytes', int), ('dst_bytes', int), ('land', int), ('wrong_fragment', int), ('urgent', int), ('hot', int), ('num_failed_logins', int), ('logged_in', int), ('num_compromised', int), ('root_shell', int), ('su_attempted', int), ('num_root', int), ('num_file_creations', int), ('num_shells', int), ('num_access_files', int), ('num_outbound_cmds', int), ('is_host_login', int), ('is_guest_login', int), ('count', int), ('srv_count', int), ('serror_rate', float), ('srv_serror_rate', float), ('rerror_rate', float), ('srv_rerror_rate', float), ('same_srv_rate', float), ('diff_srv_rate', float), ('srv_diff_host_rate', float), ('dst_host_count', int), ('dst_host_srv_count', int), ('dst_host_same_srv_rate', float), ('dst_host_diff_srv_rate', float), ('dst_host_same_src_port_rate', float), ('dst_host_srv_diff_host_rate', float), ('dst_host_serror_rate', float), ('dst_host_srv_serror_rate', float), ('dst_host_rerror_rate', float), ('dst_host_srv_rerror_rate', float), ('labels', 'S16')]
column_names = [c[0] for c in dt]
target_names = column_names[-1]
feature_names = column_names[:-1]
if available:
try:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
except Exception as e:
raise IOError(f'The cache for fetch_kddcup99 is invalid, please delete {str(kddcup_dir)} and run the fetch_kddcup99 again') from e
elif download_if_missing:
<DeepExtract>
try:
os.makedirs(kddcup_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
</DeepExtract>
logger.info('Downloading %s' % archive.url)
_fetch_remote(archive, dirname=kddcup_dir)
DT = np.dtype(dt)
logger.debug('extracting archive')
archive_path = join(kddcup_dir, archive.filename)
file_ = GzipFile(filename=archive_path, mode='r')
Xy = []
for line in file_.readlines():
line = line.decode()
Xy.append(line.replace('\n', '').split(','))
file_.close()
logger.debug('extraction done')
os.remove(archive_path)
Xy = np.asarray(Xy, dtype=object)
for j in range(42):
Xy[:, j] = Xy[:, j].astype(DT[j])
X = Xy[:, :-1]
y = Xy[:, -1]
joblib.dump(X, samples_path, compress=0)
joblib.dump(y, targets_path, compress=0)
else:
raise IOError('Data not found and `download_if_missing` is False')
return Bunch(data=X, target=y, feature_names=feature_names, target_names=[target_names])
|
def _ridge_regression(X, y, alpha, sample_weight=None, solver='auto', max_iter=None, tol=0.0001, verbose=0, positive=False, random_state=None, return_n_iter=False, return_intercept=False, X_scale=None, X_offset=None, check_input=True, fit_intercept=False):
has_sw = sample_weight is not None
if solver == 'auto':
if positive:
solver = 'lbfgs'
elif return_intercept:
solver = 'sag'
elif not sparse.issparse(X):
solver = 'cholesky'
else:
solver = 'sparse_cg'
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga', 'lbfgs'):
raise ValueError("Known solvers are 'sparse_cg', 'cholesky', 'svd' 'lsqr', 'sag', 'saga' or 'lbfgs'. Got %s." % solver)
if positive and solver != 'lbfgs':
raise ValueError(f"When positive=True, only 'lbfgs' solver can be used. Please change solver {solver} to 'lbfgs' or set positive=False.")
if solver == 'lbfgs' and (not positive):
raise ValueError("'lbfgs' solver can be used only when positive=True. Please use another solver.")
if return_intercept and solver != 'sag':
raise ValueError("In Ridge, only 'sag' solver can directly fit the intercept. Please change solver to 'sag' or set return_intercept=False.")
if check_input:
_dtype = [np.float64, np.float32]
if sparse.issparse(X) and solver in ['auto', 'sag', 'saga']:
_accept_sparse = 'csr'
else:
_accept_sparse = ['csr', 'csc', 'coo']
X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype, order='C')
y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None)
check_consistent_length(X, y)
(n_samples, n_features) = X.shape
if y.ndim > 2:
raise ValueError('Target y has the wrong shape %s' % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
(n_samples_, n_targets) = y.shape
if n_samples != n_samples_:
raise ValueError('Number of samples in X and y does not correspond: %d != %d' % (n_samples, n_samples_))
if has_sw:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if solver not in ['sag', 'saga']:
(X, y, sample_weight_sqrt) = _rescale_data(X, y, sample_weight)
if alpha is not None and (not isinstance(alpha, np.ndarray)):
alpha = check_scalar(alpha, 'alpha', target_type=numbers.Real, min_val=0.0, include_boundaries='left')
alpha = np.asarray(alpha, dtype=X.dtype).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError('Number of targets and number of penalties do not correspond: %d != %d' % (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
n_iter = None
if solver == 'sparse_cg':
if sample_weight_sqrt if has_sw else None is None:
sample_weight_sqrt if has_sw else None = np.ones(X.shape[0], dtype=X.dtype)
(n_samples, n_features) = X.shape
if X_offset is None or X_scale is None:
X1 = sp_linalg.aslinearoperator(X)
else:
X_offset_scale = X_offset / X_scale
X1 = _get_rescaled_operator(X, X_offset_scale, sample_weight_sqrt if has_sw else None)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
coef = X1.matvec(X1.rmatvec(x)) + curr_alpha * x
coef = _mv
else:
def create_mv(curr_alpha):
def _mv(x):
coef = X1.rmatvec(X1.matvec(x)) + curr_alpha * x
coef = _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
C = sp_linalg.LinearOperator((n_samples, n_samples), matvec=mv, dtype=X.dtype)
try:
(coef, info) = sp_linalg.cg(C, y_column, tol=tol, atol='legacy')
except TypeError:
(coef, info) = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator((n_features, n_features), matvec=mv, dtype=X.dtype)
try:
(coefs[i], info) = sp_linalg.cg(C, y_column, maxiter=max_iter, tol=tol, atol='legacy')
except TypeError:
(coefs[i], info) = sp_linalg.cg(C, y_column, maxiter=max_iter, tol=tol)
if info < 0:
raise ValueError('Failed with error code %d' % info)
if max_iter is None and info > 0 and verbose:
warnings.warn('sparse_cg did not converge after %d iterations.' % info, ConvergenceWarning)
coef = coefs
elif solver == 'lsqr':
if sample_weight_sqrt if has_sw else None is None:
sample_weight_sqrt if has_sw else None = np.ones(X.shape[0], dtype=X.dtype)
if sparse.issparse(X) and fit_intercept:
X_offset_scale = X_offset / X_scale
X1 = _get_rescaled_operator(X, X_offset_scale, sample_weight_sqrt if has_sw else None)
else:
X1 = X
(n_samples, n_features) = X.shape
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X1, y_column, damp=sqrt_alpha[i], atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
(coef, n_iter) = (coefs, n_iter)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) or sample_weight not in [1.0, None]
if has_sw:
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
K.flat[::n_samples + 1] += alpha[0]
try:
dual_coef = linalg.solve(K, y, assume_a='pos', overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn('Singular matrix in solving dual problem. Using least-squares solution instead.')
dual_coef = linalg.lstsq(K, y)[0]
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
dual_coef = dual_coef
else:
dual_coefs = np.empty([n_targets, n_samples], K.dtype)
for (dual_coef, target, current_alpha) in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, assume_a='pos', overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
dual_coef = dual_coefs.T
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
solver = 'svd'
else:
try:
n_features = X.shape[1]
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
coef = linalg.solve(A, Xy, assume_a='pos', overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features], dtype=X.dtype)
for (coef, target, current_alpha) in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, assume_a='pos', overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
coef = coefs
except linalg.LinAlgError:
solver = 'svd'
elif solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1],), dtype=X.dtype)
for (i, (alpha_i, target)) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1), dtype=X.dtype)}
(coef_, n_iter_, _) = sag_solver(X, target.ravel(), sample_weight, 'squared', alpha_i, 0, max_iter, tol, verbose, random_state, False, max_squared_sum, init, is_saga=solver == 'saga')
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
elif solver == 'lbfgs':
(n_samples, n_features) = X.shape
options = {}
if max_iter is not None:
options['maxiter'] = max_iter
config = {'method': 'L-BFGS-B', 'tol': tol, 'jac': True, 'options': options}
if positive:
config['bounds'] = [(0, np.inf)] * n_features
if X_offset is not None and X_scale is not None:
X_offset_scale = X_offset / X_scale
else:
X_offset_scale = None
if sample_weight_sqrt if has_sw else None is None:
sample_weight_sqrt if has_sw else None = np.ones(X.shape[0], dtype=X.dtype)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
for i in range(y.shape[1]):
x0 = np.zeros((n_features,))
y_column = y[:, i]
def func(w):
residual = X.dot(w) - y_column
if X_offset_scale is not None:
residual -= sample_weight_sqrt if has_sw else None * w.dot(X_offset_scale)
f = 0.5 * residual.dot(residual) + 0.5 * alpha[i] * w.dot(w)
grad = X.T @ residual + alpha[i] * w
if X_offset_scale is not None:
grad -= X_offset_scale * residual.dot(sample_weight_sqrt if has_sw else None)
coef = (f, grad)
result = optimize.minimize(func, x0, **config)
if not result['success']:
warnings.warn(f'The lbfgs solver did not converge. Try increasing max_iter or tol. Currently: max_iter={max_iter} and tol={tol}', ConvergenceWarning)
coefs[i] = result['x']
coef = coefs
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse inputs currently')
(U, s, Vt) = linalg.svd(X, full_matrices=False)
idx = s > 1e-15
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size), dtype=X.dtype)
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
coef = np.dot(Vt.T, d_UT_y).T
if ravel:
coef = coef.ravel()
if return_n_iter and return_intercept:
return (coef, n_iter, intercept)
elif return_intercept:
return (coef, intercept)
elif return_n_iter:
return (coef, n_iter)
else:
return coef
|
def _ridge_regression(X, y, alpha, sample_weight=None, solver='auto', max_iter=None, tol=0.0001, verbose=0, positive=False, random_state=None, return_n_iter=False, return_intercept=False, X_scale=None, X_offset=None, check_input=True, fit_intercept=False):
has_sw = sample_weight is not None
if solver == 'auto':
if positive:
solver = 'lbfgs'
elif return_intercept:
solver = 'sag'
elif not sparse.issparse(X):
solver = 'cholesky'
else:
solver = 'sparse_cg'
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga', 'lbfgs'):
raise ValueError("Known solvers are 'sparse_cg', 'cholesky', 'svd' 'lsqr', 'sag', 'saga' or 'lbfgs'. Got %s." % solver)
if positive and solver != 'lbfgs':
raise ValueError(f"When positive=True, only 'lbfgs' solver can be used. Please change solver {solver} to 'lbfgs' or set positive=False.")
if solver == 'lbfgs' and (not positive):
raise ValueError("'lbfgs' solver can be used only when positive=True. Please use another solver.")
if return_intercept and solver != 'sag':
raise ValueError("In Ridge, only 'sag' solver can directly fit the intercept. Please change solver to 'sag' or set return_intercept=False.")
if check_input:
_dtype = [np.float64, np.float32]
<DeepExtract>
if sparse.issparse(X) and solver in ['auto', 'sag', 'saga']:
_accept_sparse = 'csr'
else:
_accept_sparse = ['csr', 'csc', 'coo']
</DeepExtract>
X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype, order='C')
y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None)
check_consistent_length(X, y)
(n_samples, n_features) = X.shape
if y.ndim > 2:
raise ValueError('Target y has the wrong shape %s' % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
(n_samples_, n_targets) = y.shape
if n_samples != n_samples_:
raise ValueError('Number of samples in X and y does not correspond: %d != %d' % (n_samples, n_samples_))
if has_sw:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if solver not in ['sag', 'saga']:
(X, y, sample_weight_sqrt) = _rescale_data(X, y, sample_weight)
if alpha is not None and (not isinstance(alpha, np.ndarray)):
alpha = check_scalar(alpha, 'alpha', target_type=numbers.Real, min_val=0.0, include_boundaries='left')
alpha = np.asarray(alpha, dtype=X.dtype).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError('Number of targets and number of penalties do not correspond: %d != %d' % (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
n_iter = None
if solver == 'sparse_cg':
<DeepExtract>
if sample_weight_sqrt if has_sw else None is None:
sample_weight_sqrt if has_sw else None = np.ones(X.shape[0], dtype=X.dtype)
(n_samples, n_features) = X.shape
if X_offset is None or X_scale is None:
X1 = sp_linalg.aslinearoperator(X)
else:
X_offset_scale = X_offset / X_scale
X1 = _get_rescaled_operator(X, X_offset_scale, sample_weight_sqrt if has_sw else None)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
coef = X1.matvec(X1.rmatvec(x)) + curr_alpha * x
coef = _mv
else:
def create_mv(curr_alpha):
def _mv(x):
coef = X1.rmatvec(X1.matvec(x)) + curr_alpha * x
coef = _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
C = sp_linalg.LinearOperator((n_samples, n_samples), matvec=mv, dtype=X.dtype)
try:
(coef, info) = sp_linalg.cg(C, y_column, tol=tol, atol='legacy')
except TypeError:
(coef, info) = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator((n_features, n_features), matvec=mv, dtype=X.dtype)
try:
(coefs[i], info) = sp_linalg.cg(C, y_column, maxiter=max_iter, tol=tol, atol='legacy')
except TypeError:
(coefs[i], info) = sp_linalg.cg(C, y_column, maxiter=max_iter, tol=tol)
if info < 0:
raise ValueError('Failed with error code %d' % info)
if max_iter is None and info > 0 and verbose:
warnings.warn('sparse_cg did not converge after %d iterations.' % info, ConvergenceWarning)
coef = coefs
</DeepExtract>
elif solver == 'lsqr':
<DeepExtract>
if sample_weight_sqrt if has_sw else None is None:
sample_weight_sqrt if has_sw else None = np.ones(X.shape[0], dtype=X.dtype)
if sparse.issparse(X) and fit_intercept:
X_offset_scale = X_offset / X_scale
X1 = _get_rescaled_operator(X, X_offset_scale, sample_weight_sqrt if has_sw else None)
else:
X1 = X
(n_samples, n_features) = X.shape
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X1, y_column, damp=sqrt_alpha[i], atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
(coef, n_iter) = (coefs, n_iter)
</DeepExtract>
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
<DeepExtract>
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) or sample_weight not in [1.0, None]
if has_sw:
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
K.flat[::n_samples + 1] += alpha[0]
try:
dual_coef = linalg.solve(K, y, assume_a='pos', overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn('Singular matrix in solving dual problem. Using least-squares solution instead.')
dual_coef = linalg.lstsq(K, y)[0]
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
dual_coef = dual_coef
else:
dual_coefs = np.empty([n_targets, n_samples], K.dtype)
for (dual_coef, target, current_alpha) in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, assume_a='pos', overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
dual_coef = dual_coefs.T
</DeepExtract>
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
solver = 'svd'
else:
try:
<DeepExtract>
n_features = X.shape[1]
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
coef = linalg.solve(A, Xy, assume_a='pos', overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features], dtype=X.dtype)
for (coef, target, current_alpha) in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, assume_a='pos', overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
coef = coefs
</DeepExtract>
except linalg.LinAlgError:
solver = 'svd'
elif solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1],), dtype=X.dtype)
for (i, (alpha_i, target)) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1), dtype=X.dtype)}
(coef_, n_iter_, _) = sag_solver(X, target.ravel(), sample_weight, 'squared', alpha_i, 0, max_iter, tol, verbose, random_state, False, max_squared_sum, init, is_saga=solver == 'saga')
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
elif solver == 'lbfgs':
<DeepExtract>
(n_samples, n_features) = X.shape
options = {}
if max_iter is not None:
options['maxiter'] = max_iter
config = {'method': 'L-BFGS-B', 'tol': tol, 'jac': True, 'options': options}
if positive:
config['bounds'] = [(0, np.inf)] * n_features
if X_offset is not None and X_scale is not None:
X_offset_scale = X_offset / X_scale
else:
X_offset_scale = None
if sample_weight_sqrt if has_sw else None is None:
sample_weight_sqrt if has_sw else None = np.ones(X.shape[0], dtype=X.dtype)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
for i in range(y.shape[1]):
x0 = np.zeros((n_features,))
y_column = y[:, i]
def func(w):
residual = X.dot(w) - y_column
if X_offset_scale is not None:
residual -= sample_weight_sqrt if has_sw else None * w.dot(X_offset_scale)
f = 0.5 * residual.dot(residual) + 0.5 * alpha[i] * w.dot(w)
grad = X.T @ residual + alpha[i] * w
if X_offset_scale is not None:
grad -= X_offset_scale * residual.dot(sample_weight_sqrt if has_sw else None)
coef = (f, grad)
result = optimize.minimize(func, x0, **config)
if not result['success']:
warnings.warn(f'The lbfgs solver did not converge. Try increasing max_iter or tol. Currently: max_iter={max_iter} and tol={tol}', ConvergenceWarning)
coefs[i] = result['x']
coef = coefs
</DeepExtract>
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse inputs currently')
<DeepExtract>
(U, s, Vt) = linalg.svd(X, full_matrices=False)
idx = s > 1e-15
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size), dtype=X.dtype)
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
coef = np.dot(Vt.T, d_UT_y).T
</DeepExtract>
if ravel:
coef = coef.ravel()
if return_n_iter and return_intercept:
return (coef, n_iter, intercept)
elif return_intercept:
return (coef, intercept)
elif return_n_iter:
return (coef, n_iter)
else:
return coef
|
def bench_b(power_list):
(n_samples, n_features) = (1000, 10000)
data_params = {'n_samples': n_samples, 'n_features': n_features, 'tail_strength': 0.7, 'random_state': random_state}
dataset_name = 'low rank matrix %d x %d' % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
if False:
print('... computing %s norm ...' % 2)
if 2 == 2:
v0 = _init_arpack_v0(min(X.shape), 0)
value = sp.sparse.linalg.svds(X, k=1, return_singular_vectors=False, v0=v0)
elif sp.sparse.issparse(X):
value = sp.sparse.linalg.norm(X, ord=2)
else:
value = sp.linalg.norm(X, ord=2)
X_spectral_norm = value
if False:
print('... computing %s norm ...' % 'fro')
if 'fro' == 2:
v0 = _init_arpack_v0(min(X.shape), random_state)
value = sp.sparse.linalg.svds(X, k=1, return_singular_vectors=False, v0=v0)
elif sp.sparse.issparse(X):
value = sp.sparse.linalg.norm(X, ord='fro')
else:
value = sp.linalg.norm(X, ord='fro')
X_fro_norm = value
for n_comp in [int(rank / 2), rank, rank * 2]:
label = 'rank=%d, n_comp=%d' % (rank, n_comp)
print(label)
for pi in power_list:
print('... running SVD ...')
if method != 'fbpca':
gc.collect()
t0 = time()
(U, mu, V) = randomized_svd(X, n_comp, n_oversamples=2, n_iter=pi, power_iteration_normalizer='LU', random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
(U, mu, V) = fbpca.pca(X, n_comp, raw=True, n_iter=pi, l=2 + n_comp)
call_time = time() - t0
(U, s, V, _) = (U, mu, V, call_time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm)
if not sp.sparse.issparse(X) or X.shape[0] * X.shape[1] * X.dtype.itemsize < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
f = norm_diff(A, norm='fro')
print('... computing fro norm by batches...')
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = 0.0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
f = np.sqrt(cum_norm)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = '%s: spectral norm diff vs n power iteration' % dataset_name
plt.figure()
for l in sorted(all_spectral.keys()):
plt.plot(power_iter, all_spectral[l], label=l, marker='o')
plt.legend(loc='lower right', prop={'size': 10})
plt.suptitle(title)
plt.ylabel('norm discrepancy')
plt.xlabel('n_iter')
title = '%s: Frobenius norm diff vs n power iteration' % dataset_name
plt.figure()
for l in sorted(all_frobenius.keys()):
plt.plot(power_iter, all_frobenius[l], label=l, marker='o')
plt.legend(loc='lower right', prop={'size': 10})
plt.suptitle(title)
plt.ylabel('norm discrepancy')
plt.xlabel('n_iter')
</DeepExtract>
|
def bench_b(power_list):
(n_samples, n_features) = (1000, 10000)
data_params = {'n_samples': n_samples, 'n_features': n_features, 'tail_strength': 0.7, 'random_state': random_state}
dataset_name = 'low rank matrix %d x %d' % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
<DeepExtract>
if False:
print('... computing %s norm ...' % 2)
if 2 == 2:
v0 = _init_arpack_v0(min(X.shape), 0)
value = sp.sparse.linalg.svds(X, k=1, return_singular_vectors=False, v0=v0)
elif sp.sparse.issparse(X):
value = sp.sparse.linalg.norm(X, ord=2)
else:
value = sp.linalg.norm(X, ord=2)
X_spectral_norm = value
</DeepExtract>
<DeepExtract>
if False:
print('... computing %s norm ...' % 'fro')
if 'fro' == 2:
v0 = _init_arpack_v0(min(X.shape), random_state)
value = sp.sparse.linalg.svds(X, k=1, return_singular_vectors=False, v0=v0)
elif sp.sparse.issparse(X):
value = sp.sparse.linalg.norm(X, ord='fro')
else:
value = sp.linalg.norm(X, ord='fro')
X_fro_norm = value
</DeepExtract>
for n_comp in [int(rank / 2), rank, rank * 2]:
label = 'rank=%d, n_comp=%d' % (rank, n_comp)
print(label)
for pi in power_list:
<DeepExtract>
print('... running SVD ...')
if method != 'fbpca':
gc.collect()
t0 = time()
(U, mu, V) = randomized_svd(X, n_comp, n_oversamples=2, n_iter=pi, power_iteration_normalizer='LU', random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
(U, mu, V) = fbpca.pca(X, n_comp, raw=True, n_iter=pi, l=2 + n_comp)
call_time = time() - t0
(U, s, V, _) = (U, mu, V, call_time)
</DeepExtract>
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm)
<DeepExtract>
if not sp.sparse.issparse(X) or X.shape[0] * X.shape[1] * X.dtype.itemsize < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
f = norm_diff(A, norm='fro')
print('... computing fro norm by batches...')
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = 0.0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
f = np.sqrt(cum_norm)
</DeepExtract>
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = '%s: spectral norm diff vs n power iteration' % dataset_name
<DeepExtract>
plt.figure()
for l in sorted(all_spectral.keys()):
plt.plot(power_iter, all_spectral[l], label=l, marker='o')
plt.legend(loc='lower right', prop={'size': 10})
plt.suptitle(title)
plt.ylabel('norm discrepancy')
plt.xlabel('n_iter')
</DeepExtract>
title = '%s: Frobenius norm diff vs n power iteration' % dataset_name
<DeepExtract>
plt.figure()
for l in sorted(all_frobenius.keys()):
plt.plot(power_iter, all_frobenius[l], label=l, marker='o')
plt.legend(loc='lower right', prop={'size': 10})
plt.suptitle(title)
plt.ylabel('norm discrepancy')
plt.xlabel('n_iter')
</DeepExtract>
|
def _fit_transform(self, X, compute_sources=False):
"""Fit the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
compute_sources : bool, default=False
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
S : ndarray of shape (n_samples, n_components) or None
Sources matrix. `None` if `compute_sources` is `False`.
"""
self._whiten = self.whiten
if self._whiten == 'warn':
warnings.warn("Starting in v1.3, whiten='unit-variance' will be used by default.", FutureWarning)
self._whiten = 'arbitrary-variance'
if self._whiten is True:
warnings.warn("Starting in v1.3, whiten=True should be specified as whiten='arbitrary-variance' (its current behaviour). This behavior is deprecated in 1.1 and will raise ValueError in 1.3.", FutureWarning, stacklevel=2)
self._whiten = 'arbitrary-variance'
XT = self._validate_data(X, copy=self._whiten, dtype=[np.float64, np.float32], ensure_min_samples=2).T
fun_args = {} if self.fun_args is None else self.fun_args
random_state = check_random_state(self.random_state)
alpha = fun_args.get('alpha', 1.0)
if not 1 <= alpha <= 2:
raise ValueError('alpha must be in [1,2]')
if self.fun == 'logcosh':
g = _logcosh
elif self.fun == 'exp':
g = _exp
elif self.fun == 'cube':
g = _cube
elif callable(self.fun):
def g(x, fun_args):
return self.fun(x, **fun_args)
(n_features, n_samples) = XT.shape
n_components = self.n_components
if not self._whiten and n_components is not None:
n_components = None
warnings.warn('Ignoring n_components with whiten=False.')
if n_components is None:
n_components = min(n_samples, n_features)
if n_components > min(n_samples, n_features):
n_components = min(n_samples, n_features)
warnings.warn('n_components is too large: it will be set to %s' % n_components)
if self._whiten:
X_mean = XT.mean(axis=-1)
XT -= X_mean[:, np.newaxis]
if self.whiten_solver == 'eigh':
(d, u) = linalg.eigh(XT.dot(X))
sort_indices = np.argsort(d)[::-1]
eps = np.finfo(d.dtype).eps
degenerate_idx = d < eps
if np.any(degenerate_idx):
warnings.warn("There are some small singular values, using whiten_solver = 'svd' might lead to more accurate results.")
d[degenerate_idx] = eps
np.sqrt(d, out=d)
(d, u) = (d[sort_indices], u[:, sort_indices])
elif self.whiten_solver == 'svd':
(u, d) = linalg.svd(XT, full_matrices=False, check_finite=False)[:2]
u *= np.sign(u[0])
K = (u / d).T[:n_components]
del u, d
X1 = np.dot(K, XT)
X1 *= np.sqrt(n_samples)
else:
X1 = as_float_array(XT, copy=False)
w_init = self.w_init
if w_init is None:
w_init = np.asarray(random_state.normal(size=(n_components, n_components)), dtype=X1.dtype)
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError('w_init has invalid shape -- should be %(shape)s' % {'shape': (n_components, n_components)})
kwargs = {'tol': self.tol, 'g': g, 'fun_args': fun_args, 'max_iter': self.max_iter, 'w_init': w_init}
if self.algorithm == 'parallel':
W = _sym_decorrelation(w_init)
del w_init
p_ = float(X1.shape[1])
for ii in range(max_iter):
(gwtx, g_wtx) = g(np.dot(W, X1), fun_args)
W1 = _sym_decorrelation(np.dot(gwtx, X1.T) / p_ - g_wtx[:, np.newaxis] * W)
del gwtx, g_wtx
lim = max(abs(abs(np.einsum('ij,ij->i', W1, W)) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn('FastICA did not converge. Consider increasing tolerance or the maximum number of iterations.', ConvergenceWarning)
(W, n_iter) = (W, ii + 1)
elif self.algorithm == 'deflation':
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X1.dtype)
n_iter = []
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
for i in range(max_iter):
(gwtx, g_wtx) = g(np.dot(w.T, X1), fun_args)
w1 = (X1 * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
(W, n_iter) = (W, max(n_iter))
del X1
self.n_iter_ = n_iter
if compute_sources:
if self._whiten:
S = np.linalg.multi_dot([W, K, XT]).T
else:
S = np.dot(W, XT).T
else:
S = None
if self._whiten:
if self._whiten == 'unit-variance':
if not compute_sources:
S = np.linalg.multi_dot([W, K, XT]).T
S_std = np.std(S, axis=0, keepdims=True)
S /= S_std
W /= S_std.T
self.components_ = np.dot(W, K)
self.mean_ = X_mean
self.whitening_ = K
else:
self.components_ = W
self.mixing_ = linalg.pinv(self.components_, check_finite=False)
self._unmixing = W
return S
|
def _fit_transform(self, X, compute_sources=False):
"""Fit the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
compute_sources : bool, default=False
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
S : ndarray of shape (n_samples, n_components) or None
Sources matrix. `None` if `compute_sources` is `False`.
"""
self._whiten = self.whiten
if self._whiten == 'warn':
warnings.warn("Starting in v1.3, whiten='unit-variance' will be used by default.", FutureWarning)
self._whiten = 'arbitrary-variance'
if self._whiten is True:
warnings.warn("Starting in v1.3, whiten=True should be specified as whiten='arbitrary-variance' (its current behaviour). This behavior is deprecated in 1.1 and will raise ValueError in 1.3.", FutureWarning, stacklevel=2)
self._whiten = 'arbitrary-variance'
XT = self._validate_data(X, copy=self._whiten, dtype=[np.float64, np.float32], ensure_min_samples=2).T
fun_args = {} if self.fun_args is None else self.fun_args
random_state = check_random_state(self.random_state)
alpha = fun_args.get('alpha', 1.0)
if not 1 <= alpha <= 2:
raise ValueError('alpha must be in [1,2]')
if self.fun == 'logcosh':
g = _logcosh
elif self.fun == 'exp':
g = _exp
elif self.fun == 'cube':
g = _cube
elif callable(self.fun):
def g(x, fun_args):
return self.fun(x, **fun_args)
(n_features, n_samples) = XT.shape
n_components = self.n_components
if not self._whiten and n_components is not None:
n_components = None
warnings.warn('Ignoring n_components with whiten=False.')
if n_components is None:
n_components = min(n_samples, n_features)
if n_components > min(n_samples, n_features):
n_components = min(n_samples, n_features)
warnings.warn('n_components is too large: it will be set to %s' % n_components)
if self._whiten:
X_mean = XT.mean(axis=-1)
XT -= X_mean[:, np.newaxis]
if self.whiten_solver == 'eigh':
(d, u) = linalg.eigh(XT.dot(X))
sort_indices = np.argsort(d)[::-1]
eps = np.finfo(d.dtype).eps
degenerate_idx = d < eps
if np.any(degenerate_idx):
warnings.warn("There are some small singular values, using whiten_solver = 'svd' might lead to more accurate results.")
d[degenerate_idx] = eps
np.sqrt(d, out=d)
(d, u) = (d[sort_indices], u[:, sort_indices])
elif self.whiten_solver == 'svd':
(u, d) = linalg.svd(XT, full_matrices=False, check_finite=False)[:2]
u *= np.sign(u[0])
K = (u / d).T[:n_components]
del u, d
X1 = np.dot(K, XT)
X1 *= np.sqrt(n_samples)
else:
X1 = as_float_array(XT, copy=False)
w_init = self.w_init
if w_init is None:
w_init = np.asarray(random_state.normal(size=(n_components, n_components)), dtype=X1.dtype)
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError('w_init has invalid shape -- should be %(shape)s' % {'shape': (n_components, n_components)})
kwargs = {'tol': self.tol, 'g': g, 'fun_args': fun_args, 'max_iter': self.max_iter, 'w_init': w_init}
if self.algorithm == 'parallel':
<DeepExtract>
W = _sym_decorrelation(w_init)
del w_init
p_ = float(X1.shape[1])
for ii in range(max_iter):
(gwtx, g_wtx) = g(np.dot(W, X1), fun_args)
W1 = _sym_decorrelation(np.dot(gwtx, X1.T) / p_ - g_wtx[:, np.newaxis] * W)
del gwtx, g_wtx
lim = max(abs(abs(np.einsum('ij,ij->i', W1, W)) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn('FastICA did not converge. Consider increasing tolerance or the maximum number of iterations.', ConvergenceWarning)
(W, n_iter) = (W, ii + 1)
</DeepExtract>
elif self.algorithm == 'deflation':
<DeepExtract>
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X1.dtype)
n_iter = []
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
for i in range(max_iter):
(gwtx, g_wtx) = g(np.dot(w.T, X1), fun_args)
w1 = (X1 * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
(W, n_iter) = (W, max(n_iter))
</DeepExtract>
del X1
self.n_iter_ = n_iter
if compute_sources:
if self._whiten:
S = np.linalg.multi_dot([W, K, XT]).T
else:
S = np.dot(W, XT).T
else:
S = None
if self._whiten:
if self._whiten == 'unit-variance':
if not compute_sources:
S = np.linalg.multi_dot([W, K, XT]).T
S_std = np.std(S, axis=0, keepdims=True)
S /= S_std
W /= S_std.T
self.components_ = np.dot(W, K)
self.mean_ = X_mean
self.whitening_ = K
else:
self.components_ = W
self.mixing_ = linalg.pinv(self.components_, check_finite=False)
self._unmixing = W
return S
|
def export(self, decision_tree):
if self.feature_names is not None:
if len(self.feature_names) != decision_tree.n_features_in_:
raise ValueError('Length of feature_names, %d does not match number of features, %d' % (len(self.feature_names), decision_tree.n_features_in_))
self.out_file.write('digraph Tree {\n')
self.out_file.write('node [shape=box')
rounded_filled = []
if self.filled:
rounded_filled.append('filled')
if self.rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
self.out_file.write(', style="%s", color="black"' % ', '.join(rounded_filled))
self.out_file.write(', fontname="%s"' % self.fontname)
self.out_file.write('] ;\n')
if self.leaves_parallel:
self.out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
self.out_file.write('edge [fontname="%s"] ;\n' % self.fontname)
if self.rotate:
self.out_file.write('rankdir=LR ;\n')
if isinstance(decision_tree, _tree.Tree):
if 0 == _tree.TREE_LEAF:
raise ValueError('Invalid node_id %s' % _tree.TREE_LEAF)
left_child = decision_tree.children_left[0]
right_child = decision_tree.children_right[0]
if self.max_depth is None or depth <= self.max_depth:
if left_child == _tree.TREE_LEAF:
self.ranks['leaves'].append(str(0))
elif str(depth) not in self.ranks:
self.ranks[str(depth)] = [str(0)]
else:
self.ranks[str(depth)].append(str(0))
self.out_file.write('%d [label=%s' % (0, self.node_to_str(decision_tree, 0, 'impurity')))
if self.filled:
self.out_file.write(', fillcolor="%s"' % self.get_fill_color(decision_tree, 0))
self.out_file.write('] ;\n')
if parent is not None:
self.out_file.write('%d -> %d' % (parent, 0))
if parent == 0:
angles = np.array([45, -45]) * ((self.rotate - 0.5) * -2)
self.out_file.write(' [labeldistance=2.5, labelangle=')
if 0 == 1:
self.out_file.write('%d, headlabel="True"]' % angles[0])
else:
self.out_file.write('%d, headlabel="False"]' % angles[1])
self.out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
self.recurse(decision_tree, left_child, criterion='impurity', parent=0, depth=depth + 1)
self.recurse(decision_tree, right_child, criterion='impurity', parent=0, depth=depth + 1)
else:
self.ranks['leaves'].append(str(0))
self.out_file.write('%d [label="(...)"' % 0)
if self.filled:
self.out_file.write(', fillcolor="#C0C0C0"')
self.out_file.write('] ;\n' % 0)
if parent is not None:
self.out_file.write('%d -> %d ;\n' % (parent, 0))
else:
if 0 == _tree.TREE_LEAF:
raise ValueError('Invalid node_id %s' % _tree.TREE_LEAF)
left_child = decision_tree.tree_.children_left[0]
right_child = decision_tree.tree_.children_right[0]
if self.max_depth is None or depth <= self.max_depth:
if left_child == _tree.TREE_LEAF:
self.ranks['leaves'].append(str(0))
elif str(depth) not in self.ranks:
self.ranks[str(depth)] = [str(0)]
else:
self.ranks[str(depth)].append(str(0))
self.out_file.write('%d [label=%s' % (0, self.node_to_str(decision_tree.tree_, 0, decision_tree.criterion)))
if self.filled:
self.out_file.write(', fillcolor="%s"' % self.get_fill_color(decision_tree.tree_, 0))
self.out_file.write('] ;\n')
if parent is not None:
self.out_file.write('%d -> %d' % (parent, 0))
if parent == 0:
angles = np.array([45, -45]) * ((self.rotate - 0.5) * -2)
self.out_file.write(' [labeldistance=2.5, labelangle=')
if 0 == 1:
self.out_file.write('%d, headlabel="True"]' % angles[0])
else:
self.out_file.write('%d, headlabel="False"]' % angles[1])
self.out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
self.recurse(decision_tree.tree_, left_child, criterion=decision_tree.criterion, parent=0, depth=depth + 1)
self.recurse(decision_tree.tree_, right_child, criterion=decision_tree.criterion, parent=0, depth=depth + 1)
else:
self.ranks['leaves'].append(str(0))
self.out_file.write('%d [label="(...)"' % 0)
if self.filled:
self.out_file.write(', fillcolor="#C0C0C0"')
self.out_file.write('] ;\n' % 0)
if parent is not None:
self.out_file.write('%d -> %d ;\n' % (parent, 0))
if self.leaves_parallel:
for rank in sorted(self.ranks):
self.out_file.write('{rank=same ; ' + '; '.join((r for r in self.ranks[rank])) + '} ;\n')
self.out_file.write('}')
</DeepExtract>
|
def export(self, decision_tree):
if self.feature_names is not None:
if len(self.feature_names) != decision_tree.n_features_in_:
raise ValueError('Length of feature_names, %d does not match number of features, %d' % (len(self.feature_names), decision_tree.n_features_in_))
<DeepExtract>
self.out_file.write('digraph Tree {\n')
self.out_file.write('node [shape=box')
rounded_filled = []
if self.filled:
rounded_filled.append('filled')
if self.rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
self.out_file.write(', style="%s", color="black"' % ', '.join(rounded_filled))
self.out_file.write(', fontname="%s"' % self.fontname)
self.out_file.write('] ;\n')
if self.leaves_parallel:
self.out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
self.out_file.write('edge [fontname="%s"] ;\n' % self.fontname)
if self.rotate:
self.out_file.write('rankdir=LR ;\n')
</DeepExtract>
if isinstance(decision_tree, _tree.Tree):
<DeepExtract>
if 0 == _tree.TREE_LEAF:
raise ValueError('Invalid node_id %s' % _tree.TREE_LEAF)
left_child = decision_tree.children_left[0]
right_child = decision_tree.children_right[0]
if self.max_depth is None or depth <= self.max_depth:
if left_child == _tree.TREE_LEAF:
self.ranks['leaves'].append(str(0))
elif str(depth) not in self.ranks:
self.ranks[str(depth)] = [str(0)]
else:
self.ranks[str(depth)].append(str(0))
self.out_file.write('%d [label=%s' % (0, self.node_to_str(decision_tree, 0, 'impurity')))
if self.filled:
self.out_file.write(', fillcolor="%s"' % self.get_fill_color(decision_tree, 0))
self.out_file.write('] ;\n')
if parent is not None:
self.out_file.write('%d -> %d' % (parent, 0))
if parent == 0:
angles = np.array([45, -45]) * ((self.rotate - 0.5) * -2)
self.out_file.write(' [labeldistance=2.5, labelangle=')
if 0 == 1:
self.out_file.write('%d, headlabel="True"]' % angles[0])
else:
self.out_file.write('%d, headlabel="False"]' % angles[1])
self.out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
self.recurse(decision_tree, left_child, criterion='impurity', parent=0, depth=depth + 1)
self.recurse(decision_tree, right_child, criterion='impurity', parent=0, depth=depth + 1)
else:
self.ranks['leaves'].append(str(0))
self.out_file.write('%d [label="(...)"' % 0)
if self.filled:
self.out_file.write(', fillcolor="#C0C0C0"')
self.out_file.write('] ;\n' % 0)
if parent is not None:
self.out_file.write('%d -> %d ;\n' % (parent, 0))
</DeepExtract>
else:
<DeepExtract>
if 0 == _tree.TREE_LEAF:
raise ValueError('Invalid node_id %s' % _tree.TREE_LEAF)
left_child = decision_tree.tree_.children_left[0]
right_child = decision_tree.tree_.children_right[0]
if self.max_depth is None or depth <= self.max_depth:
if left_child == _tree.TREE_LEAF:
self.ranks['leaves'].append(str(0))
elif str(depth) not in self.ranks:
self.ranks[str(depth)] = [str(0)]
else:
self.ranks[str(depth)].append(str(0))
self.out_file.write('%d [label=%s' % (0, self.node_to_str(decision_tree.tree_, 0, decision_tree.criterion)))
if self.filled:
self.out_file.write(', fillcolor="%s"' % self.get_fill_color(decision_tree.tree_, 0))
self.out_file.write('] ;\n')
if parent is not None:
self.out_file.write('%d -> %d' % (parent, 0))
if parent == 0:
angles = np.array([45, -45]) * ((self.rotate - 0.5) * -2)
self.out_file.write(' [labeldistance=2.5, labelangle=')
if 0 == 1:
self.out_file.write('%d, headlabel="True"]' % angles[0])
else:
self.out_file.write('%d, headlabel="False"]' % angles[1])
self.out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
self.recurse(decision_tree.tree_, left_child, criterion=decision_tree.criterion, parent=0, depth=depth + 1)
self.recurse(decision_tree.tree_, right_child, criterion=decision_tree.criterion, parent=0, depth=depth + 1)
else:
self.ranks['leaves'].append(str(0))
self.out_file.write('%d [label="(...)"' % 0)
if self.filled:
self.out_file.write(', fillcolor="#C0C0C0"')
self.out_file.write('] ;\n' % 0)
if parent is not None:
self.out_file.write('%d -> %d ;\n' % (parent, 0))
</DeepExtract>
<DeepExtract>
if self.leaves_parallel:
for rank in sorted(self.ranks):
self.out_file.write('{rank=same ; ' + '; '.join((r for r in self.ranks[rank])) + '} ;\n')
self.out_file.write('}')
</DeepExtract>
|
def _validate_input(self, X, in_fit):
if self.strategy in ('most_frequent', 'constant'):
if isinstance(X, list) and any((isinstance(elem, str) for row in X for elem in row)):
dtype = object
else:
dtype = None
else:
dtype = FLOAT_DTYPES
if not in_fit and self._fit_dtype.kind == 'O':
dtype = self._fit_dtype
if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
force_all_finite = 'allow-nan'
else:
force_all_finite = True
try:
X = self._validate_data(X, reset=in_fit, accept_sparse='csc', dtype=dtype, force_all_finite=force_all_finite, copy=self.copy)
except ValueError as ve:
if 'could not convert' in str(ve):
new_ve = ValueError('Cannot use {} strategy with non-numeric data:\n{}'.format(self.strategy, ve))
raise new_ve from None
else:
raise ve
if in_fit:
self._fit_dtype = X.dtype
if _is_pandas_na(self.missing_values):
return
if X.dtype.kind in ('f', 'i', 'u') and (not isinstance(self.missing_values, numbers.Real)):
raise ValueError("'X' and 'missing_values' types are expected to be both numerical. Got X.dtype={} and type(missing_values)={}.".format(X.dtype, type(self.missing_values)))
if X.dtype.kind not in ('i', 'u', 'f', 'O'):
raise ValueError('SimpleImputer does not support data with dtype {0}. Please provide either a numeric array (with a floating point or integer dtype) or categorical data represented either as an array with integer dtype or an array of string values with an object dtype.'.format(X.dtype))
return X
|
def _validate_input(self, X, in_fit):
if self.strategy in ('most_frequent', 'constant'):
if isinstance(X, list) and any((isinstance(elem, str) for row in X for elem in row)):
dtype = object
else:
dtype = None
else:
dtype = FLOAT_DTYPES
if not in_fit and self._fit_dtype.kind == 'O':
dtype = self._fit_dtype
if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
force_all_finite = 'allow-nan'
else:
force_all_finite = True
try:
X = self._validate_data(X, reset=in_fit, accept_sparse='csc', dtype=dtype, force_all_finite=force_all_finite, copy=self.copy)
except ValueError as ve:
if 'could not convert' in str(ve):
new_ve = ValueError('Cannot use {} strategy with non-numeric data:\n{}'.format(self.strategy, ve))
raise new_ve from None
else:
raise ve
if in_fit:
self._fit_dtype = X.dtype
<DeepExtract>
if _is_pandas_na(self.missing_values):
return
if X.dtype.kind in ('f', 'i', 'u') and (not isinstance(self.missing_values, numbers.Real)):
raise ValueError("'X' and 'missing_values' types are expected to be both numerical. Got X.dtype={} and type(missing_values)={}.".format(X.dtype, type(self.missing_values)))
</DeepExtract>
if X.dtype.kind not in ('i', 'u', 'f', 'O'):
raise ValueError('SimpleImputer does not support data with dtype {0}. Please provide either a numeric array (with a floating point or integer dtype) or categorical data represented either as an array with integer dtype or an array of string values with an object dtype.'.format(X.dtype))
return X
|
@pytest.mark.filterwarnings('ignore:WMinkowskiDistance:FutureWarning:sklearn')
@pytest.mark.parametrize('metric', neighbors.VALID_METRICS['brute'])
def test_valid_brute_metric_for_auto_algorithm(global_dtype, metric, n_samples=20, n_features=12):
X = rng.rand(n_samples, n_features).astype(global_dtype, copy=False)
Xcsr = csr_matrix(X)
rng = np.random.RandomState(1)
weights = rng.random_sample(n_features)
if metric == 'minkowski':
minkowski_kwargs = [dict(p=1.5), dict(p=2), dict(p=3), dict(p=np.inf)]
if sp_version >= parse_version('1.8.0.dev0'):
minkowski_kwargs.append(dict(p=3, w=rng.rand(n_features)))
metric_params_list = minkowski_kwargs
if metric == 'wminkowski':
weights /= weights.sum()
wminkowski_kwargs = [dict(p=1.5, w=weights)]
if sp_version < parse_version('1.8.0.dev0'):
wminkowski_kwargs.append(dict(p=3, w=rng.rand(n_features)))
metric_params_list = wminkowski_kwargs
if metric == 'seuclidean':
metric_params_list = [dict(V=rng.rand(n_features))]
if metric == 'mahalanobis':
A = rng.rand(n_features, n_features)
VI = A + A.T + 3 * np.eye(n_features)
metric_params_list = [dict(VI=VI)]
metric_params_list = [{}]
if metric == 'precomputed':
X_precomputed = rng.random_sample((10, 4))
Y_precomputed = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X_precomputed, metric='euclidean')
DYX = metrics.pairwise_distances(Y_precomputed, X_precomputed, metric='euclidean')
nb_p = neighbors.NearestNeighbors(n_neighbors=3, metric='precomputed')
nb_p.fit(DXX)
nb_p.kneighbors(DYX)
else:
for metric_params in metric_params_list:
nn = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto', metric=metric, metric_params=metric_params)
if metric == 'haversine':
feature_sl = slice(None, 2)
X = np.ascontiguousarray(X[:, feature_sl])
nn.fit(X)
nn.kneighbors(X)
if metric in VALID_METRICS_SPARSE['brute']:
nn = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto', metric=metric).fit(Xcsr)
nn.kneighbors(Xcsr)
|
@pytest.mark.filterwarnings('ignore:WMinkowskiDistance:FutureWarning:sklearn')
@pytest.mark.parametrize('metric', neighbors.VALID_METRICS['brute'])
def test_valid_brute_metric_for_auto_algorithm(global_dtype, metric, n_samples=20, n_features=12):
X = rng.rand(n_samples, n_features).astype(global_dtype, copy=False)
Xcsr = csr_matrix(X)
<DeepExtract>
rng = np.random.RandomState(1)
weights = rng.random_sample(n_features)
if metric == 'minkowski':
minkowski_kwargs = [dict(p=1.5), dict(p=2), dict(p=3), dict(p=np.inf)]
if sp_version >= parse_version('1.8.0.dev0'):
minkowski_kwargs.append(dict(p=3, w=rng.rand(n_features)))
metric_params_list = minkowski_kwargs
if metric == 'wminkowski':
weights /= weights.sum()
wminkowski_kwargs = [dict(p=1.5, w=weights)]
if sp_version < parse_version('1.8.0.dev0'):
wminkowski_kwargs.append(dict(p=3, w=rng.rand(n_features)))
metric_params_list = wminkowski_kwargs
if metric == 'seuclidean':
metric_params_list = [dict(V=rng.rand(n_features))]
if metric == 'mahalanobis':
A = rng.rand(n_features, n_features)
VI = A + A.T + 3 * np.eye(n_features)
metric_params_list = [dict(VI=VI)]
metric_params_list = [{}]
</DeepExtract>
if metric == 'precomputed':
X_precomputed = rng.random_sample((10, 4))
Y_precomputed = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X_precomputed, metric='euclidean')
DYX = metrics.pairwise_distances(Y_precomputed, X_precomputed, metric='euclidean')
nb_p = neighbors.NearestNeighbors(n_neighbors=3, metric='precomputed')
nb_p.fit(DXX)
nb_p.kneighbors(DYX)
else:
for metric_params in metric_params_list:
nn = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto', metric=metric, metric_params=metric_params)
if metric == 'haversine':
feature_sl = slice(None, 2)
X = np.ascontiguousarray(X[:, feature_sl])
nn.fit(X)
nn.kneighbors(X)
if metric in VALID_METRICS_SPARSE['brute']:
nn = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto', metric=metric).fit(Xcsr)
nn.kneighbors(Xcsr)
|
@ignore_warnings(category=FutureWarning)
def check_regressors_train(name, regressor_orig, readonly_memmap=False, X_dtype=np.float64):
global REGRESSION_DATASET
if REGRESSION_DATASET is None:
(X, y) = make_regression(n_samples=200, n_features=10, n_informative=1, bias=5.0, noise=20, random_state=42)
X = StandardScaler().fit_transform(X)
REGRESSION_DATASET = (X, y)
(X, y) = REGRESSION_DATASET
X = X.astype(X_dtype)
y = scale(y)
regressor = clone(regressor_orig)
if '1darray' in _safe_tags(regressor, key='X_types'):
X = X[:, 0]
if _safe_tags(regressor, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(regressor, key='X_types'):
X = (X - X.min()).astype(np.int32)
if regressor.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(regressor):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(regressor, key='pairwise'):
X = kernel(X, X)
X = X
if _safe_tags(regressor, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(regressor, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(regressor, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
if name in CROSS_DECOMPOSITION:
rnd = np.random.RandomState(0)
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
if readonly_memmap:
(X, y, y_) = create_memmap_backed_data([X, y, y_])
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
with raises(ValueError, err_msg=f'The classifier {name} does not raise an error when incorrect/malformed input data for fit is passed. The number of training examples is not the same as the number of labels. Perhaps use check_X_y in fit.'):
regressor.fit(X, y[:-1])
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert y_pred.shape == y_.shape
if not _safe_tags(regressor, key='poor_score'):
assert regressor.score(X, y_) > 0.5
|
@ignore_warnings(category=FutureWarning)
def check_regressors_train(name, regressor_orig, readonly_memmap=False, X_dtype=np.float64):
<DeepExtract>
global REGRESSION_DATASET
if REGRESSION_DATASET is None:
(X, y) = make_regression(n_samples=200, n_features=10, n_informative=1, bias=5.0, noise=20, random_state=42)
X = StandardScaler().fit_transform(X)
REGRESSION_DATASET = (X, y)
(X, y) = REGRESSION_DATASET
</DeepExtract>
X = X.astype(X_dtype)
y = scale(y)
regressor = clone(regressor_orig)
<DeepExtract>
if '1darray' in _safe_tags(regressor, key='X_types'):
X = X[:, 0]
if _safe_tags(regressor, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(regressor, key='X_types'):
X = (X - X.min()).astype(np.int32)
if regressor.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(regressor):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(regressor, key='pairwise'):
X = kernel(X, X)
X = X
</DeepExtract>
<DeepExtract>
if _safe_tags(regressor, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(regressor, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(regressor, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
if name in CROSS_DECOMPOSITION:
rnd = np.random.RandomState(0)
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
if readonly_memmap:
(X, y, y_) = create_memmap_backed_data([X, y, y_])
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
with raises(ValueError, err_msg=f'The classifier {name} does not raise an error when incorrect/malformed input data for fit is passed. The number of training examples is not the same as the number of labels. Perhaps use check_X_y in fit.'):
regressor.fit(X, y[:-1])
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert y_pred.shape == y_.shape
if not _safe_tags(regressor, key='poor_score'):
assert regressor.score(X, y_) > 0.5
|
def _minibatch_step(self, X, dictionary, random_state, step):
"""Perform the update on the dictionary for one minibatch."""
batch_size = X.shape[0]
(n_samples, n_features) = X.shape
n_components = dictionary.shape[0]
if self._fit_algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = self.alpha
if regularization is None:
regularization = 1.0
if gram is None and self._fit_algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None and self._fit_algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if effective_n_jobs(self.n_jobs) == 1 or self._fit_algorithm == 'threshold':
code = _sparse_encode_precomputed(X, dictionary, gram=gram, cov=cov, algorithm=self._fit_algorithm, regularization=regularization, copy_cov=copy_cov, init=init, max_iter=self.transform_max_iter, verbose=self.verbose, positive=self.positive_code)
code = code
n_samples = X.shape[0]
n_components = dictionary.shape[0]
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, effective_n_jobs(self.n_jobs)))
code_views = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)((delayed(_sparse_encode_precomputed)(X[this_slice], dictionary, gram=gram, cov=cov[:, this_slice] if cov is not None else None, algorithm=self._fit_algorithm, regularization=regularization, copy_cov=copy_cov, init=init[this_slice] if init is not None else None, max_iter=self.transform_max_iter, verbose=self.verbose, positive=self.positive_code) for this_slice in slices))
for (this_slice, this_view) in zip(slices, code_views):
code[this_slice] = this_view
code = code
batch_cost = (0.5 * ((X - code @ dictionary) ** 2).sum() + self.alpha * np.sum(np.abs(code))) / batch_size
if step < batch_size - 1:
theta = (step + 1) * batch_size
else:
theta = batch_size ** 2 + step + 1 - batch_size
beta = (theta + 1 - batch_size) / (theta + 1)
self._A *= beta
self._A += code.T @ code / batch_size
self._B *= beta
self._B += X.T @ code / batch_size
(n_samples, n_components) = code.shape
random_state = check_random_state(random_state)
if self._A is None:
self._A = code.T @ code
if self._B is None:
self._B = X.T @ code
n_unused = 0
for k in range(n_components):
if self._A[k, k] > 1e-06:
dictionary[k] += (self._B[:, k] - self._A[k] @ dictionary) / self._A[k, k]
else:
newd = X[random_state.choice(n_samples)]
noise_level = 0.01 * (newd.std() or 1)
noise = random_state.normal(0, noise_level, size=len(newd))
dictionary[k] = newd + noise
code[:, k] = 0
n_unused += 1
if self.positive_dict:
np.clip(dictionary[k], 0, None, out=dictionary[k])
dictionary[k] /= max(linalg.norm(dictionary[k]), 1)
if self.verbose and n_unused > 0:
print(f'{n_unused} unused atoms resampled.')
return batch_cost
|
def _minibatch_step(self, X, dictionary, random_state, step):
"""Perform the update on the dictionary for one minibatch."""
batch_size = X.shape[0]
<DeepExtract>
(n_samples, n_features) = X.shape
n_components = dictionary.shape[0]
if self._fit_algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = self.alpha
if regularization is None:
regularization = 1.0
if gram is None and self._fit_algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None and self._fit_algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if effective_n_jobs(self.n_jobs) == 1 or self._fit_algorithm == 'threshold':
code = _sparse_encode_precomputed(X, dictionary, gram=gram, cov=cov, algorithm=self._fit_algorithm, regularization=regularization, copy_cov=copy_cov, init=init, max_iter=self.transform_max_iter, verbose=self.verbose, positive=self.positive_code)
code = code
n_samples = X.shape[0]
n_components = dictionary.shape[0]
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, effective_n_jobs(self.n_jobs)))
code_views = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)((delayed(_sparse_encode_precomputed)(X[this_slice], dictionary, gram=gram, cov=cov[:, this_slice] if cov is not None else None, algorithm=self._fit_algorithm, regularization=regularization, copy_cov=copy_cov, init=init[this_slice] if init is not None else None, max_iter=self.transform_max_iter, verbose=self.verbose, positive=self.positive_code) for this_slice in slices))
for (this_slice, this_view) in zip(slices, code_views):
code[this_slice] = this_view
code = code
</DeepExtract>
batch_cost = (0.5 * ((X - code @ dictionary) ** 2).sum() + self.alpha * np.sum(np.abs(code))) / batch_size
<DeepExtract>
if step < batch_size - 1:
theta = (step + 1) * batch_size
else:
theta = batch_size ** 2 + step + 1 - batch_size
beta = (theta + 1 - batch_size) / (theta + 1)
self._A *= beta
self._A += code.T @ code / batch_size
self._B *= beta
self._B += X.T @ code / batch_size
</DeepExtract>
<DeepExtract>
(n_samples, n_components) = code.shape
random_state = check_random_state(random_state)
if self._A is None:
self._A = code.T @ code
if self._B is None:
self._B = X.T @ code
n_unused = 0
for k in range(n_components):
if self._A[k, k] > 1e-06:
dictionary[k] += (self._B[:, k] - self._A[k] @ dictionary) / self._A[k, k]
else:
newd = X[random_state.choice(n_samples)]
noise_level = 0.01 * (newd.std() or 1)
noise = random_state.normal(0, noise_level, size=len(newd))
dictionary[k] = newd + noise
code[:, k] = 0
n_unused += 1
if self.positive_dict:
np.clip(dictionary[k], 0, None, out=dictionary[k])
dictionary[k] /= max(linalg.norm(dictionary[k]), 1)
if self.verbose and n_unused > 0:
print(f'{n_unused} unused atoms resampled.')
</DeepExtract>
return batch_cost
|
@pytest.mark.parametrize('method', ['barnes_hut', 'exact'])
def test_uniform_grid(method):
"""Make sure that TSNE can approximately recover a uniform 2D grid
Due to ties in distances between point in X_2d_grid, this test is platform
dependent for ``method='barnes_hut'`` due to numerical imprecision.
Also, t-SNE is not assured to converge to the right solution because bad
initialization can lead to convergence to bad local minimum (the
optimization problem is non-convex). To avoid breaking the test too often,
we re-run t-SNE from the final point when the convergence is not good
enough.
"""
seeds = range(3)
n_iter = 500
for seed in seeds:
tsne = TSNE(n_components=2, init='random', random_state=seed, perplexity=50, n_iter=n_iter, method=method, learning_rate='auto')
Y = tsne.fit_transform(X_2d_grid)
try_name = '{}_{}'.format(method, seed)
try:
nn = NearestNeighbors(n_neighbors=1).fit(Y)
dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel()
assert dist_to_nn.min() > 0.1
smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn)
largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn)
assert smallest_to_mean > 0.5, try_name
assert largest_to_mean < 2, try_name
except AssertionError:
try_name += ':rerun'
tsne.init = Y
Y = tsne.fit_transform(X_2d_grid)
nn = NearestNeighbors(n_neighbors=1).fit(Y)
dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel()
assert dist_to_nn.min() > 0.1
smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn)
largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn)
assert smallest_to_mean > 0.5, try_name
assert largest_to_mean < 2, try_name
</DeepExtract>
|
@pytest.mark.parametrize('method', ['barnes_hut', 'exact'])
def test_uniform_grid(method):
"""Make sure that TSNE can approximately recover a uniform 2D grid
Due to ties in distances between point in X_2d_grid, this test is platform
dependent for ``method='barnes_hut'`` due to numerical imprecision.
Also, t-SNE is not assured to converge to the right solution because bad
initialization can lead to convergence to bad local minimum (the
optimization problem is non-convex). To avoid breaking the test too often,
we re-run t-SNE from the final point when the convergence is not good
enough.
"""
seeds = range(3)
n_iter = 500
for seed in seeds:
tsne = TSNE(n_components=2, init='random', random_state=seed, perplexity=50, n_iter=n_iter, method=method, learning_rate='auto')
Y = tsne.fit_transform(X_2d_grid)
try_name = '{}_{}'.format(method, seed)
try:
<DeepExtract>
nn = NearestNeighbors(n_neighbors=1).fit(Y)
dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel()
assert dist_to_nn.min() > 0.1
smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn)
largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn)
assert smallest_to_mean > 0.5, try_name
assert largest_to_mean < 2, try_name
</DeepExtract>
except AssertionError:
try_name += ':rerun'
tsne.init = Y
Y = tsne.fit_transform(X_2d_grid)
<DeepExtract>
nn = NearestNeighbors(n_neighbors=1).fit(Y)
dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel()
assert dist_to_nn.min() > 0.1
smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn)
largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn)
assert smallest_to_mean > 0.5, try_name
assert largest_to_mean < 2, try_name
</DeepExtract>
|
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_cache(monkeypatch, gzip_response, tmpdir):
def _mock_urlopen_raise(request, *args, **kwargs):
raise ValueError('This mechanism intends to test correct cachehandling. As such, urlopen should never be accessed. URL: %s' % request.get_full_url())
data_id = 61
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and gzip_response:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
(X_fetched, y_fetched) = fetch_openml(data_id=data_id, cache=True, data_home=cache_directory, return_X_y=True, as_frame=False, parser='liac-arff')
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen_raise)
(X_cached, y_cached) = fetch_openml(data_id=data_id, cache=True, data_home=cache_directory, return_X_y=True, as_frame=False, parser='liac-arff')
np.testing.assert_array_equal(X_fetched, X_cached)
np.testing.assert_array_equal(y_fetched, y_cached)
|
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_cache(monkeypatch, gzip_response, tmpdir):
def _mock_urlopen_raise(request, *args, **kwargs):
raise ValueError('This mechanism intends to test correct cachehandling. As such, urlopen should never be accessed. URL: %s' % request.get_full_url())
data_id = 61
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
<DeepExtract>
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and gzip_response:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
</DeepExtract>
(X_fetched, y_fetched) = fetch_openml(data_id=data_id, cache=True, data_home=cache_directory, return_X_y=True, as_frame=False, parser='liac-arff')
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen_raise)
(X_cached, y_cached) = fetch_openml(data_id=data_id, cache=True, data_home=cache_directory, return_X_y=True, as_frame=False, parser='liac-arff')
np.testing.assert_array_equal(X_fetched, X_cached)
np.testing.assert_array_equal(y_fetched, y_cached)
|
def test_tempmemmap(monkeypatch):
registration_counter = RegistrationCounter()
monkeypatch.setattr(atexit, 'register', registration_counter)
input_array = np.ones(3)
with TempMemmap(input_array) as data:
assert isinstance(data, np.memmap)
writeable = mmap_mode != 'r'
assert data.flags.writeable is writeable
np.testing.assert_array_equal(input_array, data)
temp_folder = os.path.dirname(data.filename)
if os.name != 'nt':
assert not os.path.exists(temp_folder)
assert registration_counter.nb_calls == 1
mmap_mode = 'r+'
with TempMemmap(input_array, mmap_mode=mmap_mode) as data:
assert isinstance(data, np.memmap)
writeable = mmap_mode != 'r'
assert data.flags.writeable is writeable
np.testing.assert_array_equal(input_array, data)
temp_folder = os.path.dirname(data.filename)
if os.name != 'nt':
assert not os.path.exists(temp_folder)
assert registration_counter.nb_calls == 2
|
def test_tempmemmap(monkeypatch):
registration_counter = RegistrationCounter()
monkeypatch.setattr(atexit, 'register', registration_counter)
input_array = np.ones(3)
with TempMemmap(input_array) as data:
<DeepExtract>
assert isinstance(data, np.memmap)
writeable = mmap_mode != 'r'
assert data.flags.writeable is writeable
np.testing.assert_array_equal(input_array, data)
</DeepExtract>
temp_folder = os.path.dirname(data.filename)
if os.name != 'nt':
assert not os.path.exists(temp_folder)
assert registration_counter.nb_calls == 1
mmap_mode = 'r+'
with TempMemmap(input_array, mmap_mode=mmap_mode) as data:
<DeepExtract>
assert isinstance(data, np.memmap)
writeable = mmap_mode != 'r'
assert data.flags.writeable is writeable
np.testing.assert_array_equal(input_array, data)
</DeepExtract>
temp_folder = os.path.dirname(data.filename)
if os.name != 'nt':
assert not os.path.exists(temp_folder)
assert registration_counter.nb_calls == 2
|
def fit(self, X, y=None, sample_weight=None):
"""Compute bisecting k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster.
.. note:: The data will be converted to C ordering,
which will cause a memory copy
if the given data is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable.
Returns
-------
self
Fitted estimator.
"""
self._validate_params()
X = self._validate_data(X, accept_sparse='csr', dtype=[np.float64, np.float32], order='C', copy=self.copy_x, accept_large_sparse=False)
self._check_params_vs_input(X)
self._random_state = check_random_state(self.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._n_threads = _openmp_effective_n_threads()
if self.algorithm == 'lloyd' or self.n_clusters == 1:
self._kmeans_single = _kmeans_single_lloyd
self._check_mkl_vcomp(X, X.shape[0])
else:
self._kmeans_single = _kmeans_single_elkan
if not sp.issparse(X):
self._X_mean = X.mean(axis=0)
X -= self._X_mean
self._bisecting_tree = _BisectingTree(indices=np.arange(X.shape[0]), center=X.mean(axis=0), score=0)
x_squared_norms = row_norms(X, squared=True)
for _ in range(self.n_clusters - 1):
cluster_to_bisect = self._bisecting_tree.get_cluster_to_bisect()
X = X[cluster_to_bisect.indices]
x_squared_norms = x_squared_norms[cluster_to_bisect.indices]
sample_weight = sample_weight[cluster_to_bisect.indices]
best_inertia = None
for _ in range(self.n_init):
centers_init = self._init_centroids(X, x_squared_norms=x_squared_norms, init=self.init, random_state=self._random_state, n_centroids=2, sample_weight=sample_weight)
(labels, inertia, centers, _) = self._kmeans_single(X, sample_weight, centers_init, max_iter=self.max_iter, verbose=self.verbose, tol=self.tol, n_threads=self._n_threads)
if best_inertia is None or inertia < best_inertia * (1 - 1e-06):
best_labels = labels
best_centers = centers
best_inertia = inertia
if self.verbose:
print(f'New centroids from bisection: {best_centers}')
if self.bisecting_strategy == 'biggest_inertia':
scores = self._inertia_per_cluster(X, best_centers, best_labels, sample_weight)
else:
scores = np.bincount(best_labels, minlength=2)
cluster_to_bisect.split(best_labels, best_centers, scores)
self.labels_ = np.full(X.shape[0], -1, dtype=np.int32)
self.cluster_centers_ = np.empty((self.n_clusters, X.shape[1]), dtype=X.dtype)
for (i, cluster_node) in enumerate(self._bisecting_tree.iter_leaves()):
self.labels_[cluster_node.indices] = i
self.cluster_centers_[i] = cluster_node.center
cluster_node.label = i
cluster_node.indices = None
if not sp.issparse(X):
X += self._X_mean
self.cluster_centers_ += self._X_mean
_inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense
self.inertia_ = _inertia(X, sample_weight, self.cluster_centers_, self.labels_, self._n_threads)
self._n_features_out = self.cluster_centers_.shape[0]
return self
|
def fit(self, X, y=None, sample_weight=None):
"""Compute bisecting k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster.
.. note:: The data will be converted to C ordering,
which will cause a memory copy
if the given data is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable.
Returns
-------
self
Fitted estimator.
"""
self._validate_params()
X = self._validate_data(X, accept_sparse='csr', dtype=[np.float64, np.float32], order='C', copy=self.copy_x, accept_large_sparse=False)
self._check_params_vs_input(X)
self._random_state = check_random_state(self.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._n_threads = _openmp_effective_n_threads()
if self.algorithm == 'lloyd' or self.n_clusters == 1:
self._kmeans_single = _kmeans_single_lloyd
self._check_mkl_vcomp(X, X.shape[0])
else:
self._kmeans_single = _kmeans_single_elkan
if not sp.issparse(X):
self._X_mean = X.mean(axis=0)
X -= self._X_mean
self._bisecting_tree = _BisectingTree(indices=np.arange(X.shape[0]), center=X.mean(axis=0), score=0)
x_squared_norms = row_norms(X, squared=True)
for _ in range(self.n_clusters - 1):
cluster_to_bisect = self._bisecting_tree.get_cluster_to_bisect()
<DeepExtract>
X = X[cluster_to_bisect.indices]
x_squared_norms = x_squared_norms[cluster_to_bisect.indices]
sample_weight = sample_weight[cluster_to_bisect.indices]
best_inertia = None
for _ in range(self.n_init):
centers_init = self._init_centroids(X, x_squared_norms=x_squared_norms, init=self.init, random_state=self._random_state, n_centroids=2, sample_weight=sample_weight)
(labels, inertia, centers, _) = self._kmeans_single(X, sample_weight, centers_init, max_iter=self.max_iter, verbose=self.verbose, tol=self.tol, n_threads=self._n_threads)
if best_inertia is None or inertia < best_inertia * (1 - 1e-06):
best_labels = labels
best_centers = centers
best_inertia = inertia
if self.verbose:
print(f'New centroids from bisection: {best_centers}')
if self.bisecting_strategy == 'biggest_inertia':
scores = self._inertia_per_cluster(X, best_centers, best_labels, sample_weight)
else:
scores = np.bincount(best_labels, minlength=2)
cluster_to_bisect.split(best_labels, best_centers, scores)
</DeepExtract>
self.labels_ = np.full(X.shape[0], -1, dtype=np.int32)
self.cluster_centers_ = np.empty((self.n_clusters, X.shape[1]), dtype=X.dtype)
for (i, cluster_node) in enumerate(self._bisecting_tree.iter_leaves()):
self.labels_[cluster_node.indices] = i
self.cluster_centers_[i] = cluster_node.center
cluster_node.label = i
cluster_node.indices = None
if not sp.issparse(X):
X += self._X_mean
self.cluster_centers_ += self._X_mean
_inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense
self.inertia_ = _inertia(X, sample_weight, self.cluster_centers_, self.labels_, self._n_threads)
self._n_features_out = self.cluster_centers_.shape[0]
return self
|
def test_enet_cv_positive_constraint():
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
(X, y, X_test, y_test) = (X, y, X_test, y_test)
max_iter = 500
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=0.1, max_iter=max_iter, cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert min(enetcv_unconstrained.coef_) < 0
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=0.1, max_iter=max_iter, cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert min(enetcv_constrained.coef_) >= 0
|
def test_enet_cv_positive_constraint():
<DeepExtract>
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
(X, y, X_test, y_test) = (X, y, X_test, y_test)
</DeepExtract>
max_iter = 500
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=0.1, max_iter=max_iter, cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert min(enetcv_unconstrained.coef_) < 0
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=0.1, max_iter=max_iter, cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert min(enetcv_constrained.coef_) >= 0
|
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError('zero-size array to reduction operation')
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index = np.flatnonzero(np.diff(mat.indptr))
mat = type(mat)((mat.data, mat.indices, mat.indptr), shape=mat.shape)
value = min_or_max.reduceat(mat.data, mat.indptr[major_index])
(major_index, value) = (major_index, value)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
if axis == 0:
res = sp.coo_matrix((value, (np.zeros(len(value)), major_index)), dtype=X.dtype, shape=(1, M))
else:
res = sp.coo_matrix((value, (major_index, np.zeros(len(value)))), dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
|
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError('zero-size array to reduction operation')
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
<DeepExtract>
major_index = np.flatnonzero(np.diff(mat.indptr))
mat = type(mat)((mat.data, mat.indices, mat.indptr), shape=mat.shape)
value = min_or_max.reduceat(mat.data, mat.indptr[major_index])
(major_index, value) = (major_index, value)
</DeepExtract>
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
if axis == 0:
res = sp.coo_matrix((value, (np.zeros(len(value)), major_index)), dtype=X.dtype, shape=(1, M))
else:
res = sp.coo_matrix((value, (major_index, np.zeros(len(value)))), dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
|
def test_multi_task_lasso_and_enet():
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
(X, y, X_test, y_test) = (X, y, X_test, y_test)
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-08).fit(X, Y)
assert 0 < clf.dual_gap_ < 1e-05
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-08).fit(X, Y)
assert 0 < clf.dual_gap_ < 1e-05
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1.0, tol=1e-08, max_iter=1)
warning_message = 'Objective did not converge. You might want to increase the number of iterations.'
with pytest.warns(ConvergenceWarning, match=warning_message):
clf.fit(X, Y)
|
def test_multi_task_lasso_and_enet():
<DeepExtract>
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
(X, y, X_test, y_test) = (X, y, X_test, y_test)
</DeepExtract>
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-08).fit(X, Y)
assert 0 < clf.dual_gap_ < 1e-05
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-08).fit(X, Y)
assert 0 < clf.dual_gap_ < 1e-05
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1.0, tol=1e-08, max_iter=1)
warning_message = 'Objective did not converge. You might want to increase the number of iterations.'
with pytest.warns(ConvergenceWarning, match=warning_message):
clf.fit(X, Y)
|
def _parallel_func(X, y, fit_params, func):
"""Runs func in parallel on X and y"""
self.transformer_list = list(self.transformer_list)
(names, transformers) = zip(*self.transformer_list)
self._validate_names(names)
for t in transformers:
if t in ('drop', 'passthrough'):
continue
if not (hasattr(t, 'fit') or hasattr(t, 'fit_transform')) or not hasattr(t, 'transform'):
raise TypeError("All estimators should implement fit and transform. '%s' (type %s) doesn't" % (t, type(t)))
if not self.transformer_weights:
return
transformer_names = set((name for (name, _) in self.transformer_list))
for name in self.transformer_weights:
if name not in transformer_names:
raise ValueError(f'Attempting to weight transformer "{name}", but it is not present in transformer_list.')
transformers = list(self._iter())
return Parallel(n_jobs=self.n_jobs)((delayed(func)(transformer, X, y, weight, message_clsname='FeatureUnion', message=self._log_message(name, idx, len(transformers)), **fit_params) for (idx, (name, transformer, weight)) in enumerate(transformers, 1)))
|
def _parallel_func(X, y, fit_params, func):
"""Runs func in parallel on X and y"""
self.transformer_list = list(self.transformer_list)
<DeepExtract>
(names, transformers) = zip(*self.transformer_list)
self._validate_names(names)
for t in transformers:
if t in ('drop', 'passthrough'):
continue
if not (hasattr(t, 'fit') or hasattr(t, 'fit_transform')) or not hasattr(t, 'transform'):
raise TypeError("All estimators should implement fit and transform. '%s' (type %s) doesn't" % (t, type(t)))
</DeepExtract>
<DeepExtract>
if not self.transformer_weights:
return
transformer_names = set((name for (name, _) in self.transformer_list))
for name in self.transformer_weights:
if name not in transformer_names:
raise ValueError(f'Attempting to weight transformer "{name}", but it is not present in transformer_list.')
</DeepExtract>
transformers = list(self._iter())
return Parallel(n_jobs=self.n_jobs)((delayed(func)(transformer, X, y, weight, message_clsname='FeatureUnion', message=self._log_message(name, idx, len(transformers)), **fit_params) for (idx, (name, transformer, weight)) in enumerate(transformers, 1)))
|
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
Returns
-------
self : object
Returns an instance of self.
"""
self._validate_params()
_normalize = _deprecate_normalize(self.normalize, estimator_name=self.__class__.__name__)
(X, y) = self._validate_data(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
(X, y, X_offset, y_offset, X_scale, Gram, Xy) = _pre_fit(X, y, None, self.precompute, _normalize, self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
X = check_array(X, order='F', copy=True)
True = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1:
True = True
if self.n_nonzero_coefs_ is None and self.tol is None:
self.n_nonzero_coefs_ = max(int(0.1 * X.shape[1]), 1)
if self.tol is None and self.n_nonzero_coefs_ > X.shape[1]:
raise ValueError('The number of atoms cannot be more than the number of features')
if False == 'auto':
False = X.shape[0] > X.shape[1]
if False:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if self.tol is not None:
norms_squared = np.sum(y ** 2, axis=0)
else:
norms_squared = None
(coef_, self.n_iter_) = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=self.n_nonzero_coefs_, tol=self.tol, norms_squared=norms_squared, copy_Gram=True, copy_Xy=False, return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(X, y[:, k], self.n_nonzero_coefs_, self.tol, copy_X=True, return_path=return_path)
if return_path:
(_, idx, coefs, n_iter) = out
coef = coef[:, :, :len(idx)]
for (n_active, x) in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
(x, idx, n_iter) = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if True:
(coef_, self.n_iter_) = (np.squeeze(coef), n_iters)
else:
(coef_, self.n_iter_) = np.squeeze(coef)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
Gram = check_array(Gram, order='F', copy=True)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
True = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if self.tol is not None:
norms_sq = [norms_sq]
if True or not Xy.flags.writeable:
Xy = Xy.copy()
if self.n_nonzero_coefs_ is None and self.tol is None:
self.n_nonzero_coefs_ = int(0.1 * len(Gram))
if self.tol is not None and norms_sq is None:
raise ValueError('Gram OMP needs the precomputed norms in order to evaluate the error sum of squares.')
if self.tol is not None and self.tol < 0:
raise ValueError('Epsilon cannot be negative')
if self.tol is None and self.n_nonzero_coefs_ <= 0:
raise ValueError('The number of atoms must be positive')
if self.tol is None and self.n_nonzero_coefs_ > len(Gram):
raise ValueError('The number of atoms cannot be more than the number of features')
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)), dtype=Gram.dtype)
else:
coef = np.zeros((len(Gram), Xy.shape[1]), dtype=Gram.dtype)
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(Gram, Xy[:, k], self.n_nonzero_coefs_, norms_sq[k] if self.tol is not None else None, self.tol, copy_Gram=True, copy_Xy=False, return_path=return_path)
if return_path:
(_, idx, coefs, n_iter) = out
coef = coef[:, :, :len(idx)]
for (n_active, x) in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
(x, idx, n_iter) = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if True:
(coef_, self.n_iter_) = (np.squeeze(coef), n_iters)
else:
(coef_, self.n_iter_) = np.squeeze(coef)
self.coef_ = coef_.T
self._set_intercept(X_offset, y_offset, X_scale)
return self
|
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
Returns
-------
self : object
Returns an instance of self.
"""
self._validate_params()
_normalize = _deprecate_normalize(self.normalize, estimator_name=self.__class__.__name__)
(X, y) = self._validate_data(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
(X, y, X_offset, y_offset, X_scale, Gram, Xy) = _pre_fit(X, y, None, self.precompute, _normalize, self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
<DeepExtract>
X = check_array(X, order='F', copy=True)
True = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1:
True = True
if self.n_nonzero_coefs_ is None and self.tol is None:
self.n_nonzero_coefs_ = max(int(0.1 * X.shape[1]), 1)
if self.tol is None and self.n_nonzero_coefs_ > X.shape[1]:
raise ValueError('The number of atoms cannot be more than the number of features')
if False == 'auto':
False = X.shape[0] > X.shape[1]
if False:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if self.tol is not None:
norms_squared = np.sum(y ** 2, axis=0)
else:
norms_squared = None
(coef_, self.n_iter_) = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=self.n_nonzero_coefs_, tol=self.tol, norms_squared=norms_squared, copy_Gram=True, copy_Xy=False, return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(X, y[:, k], self.n_nonzero_coefs_, self.tol, copy_X=True, return_path=return_path)
if return_path:
(_, idx, coefs, n_iter) = out
coef = coef[:, :, :len(idx)]
for (n_active, x) in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
(x, idx, n_iter) = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if True:
(coef_, self.n_iter_) = (np.squeeze(coef), n_iters)
else:
(coef_, self.n_iter_) = np.squeeze(coef)
</DeepExtract>
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
<DeepExtract>
Gram = check_array(Gram, order='F', copy=True)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
True = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if self.tol is not None:
norms_sq = [norms_sq]
if True or not Xy.flags.writeable:
Xy = Xy.copy()
if self.n_nonzero_coefs_ is None and self.tol is None:
self.n_nonzero_coefs_ = int(0.1 * len(Gram))
if self.tol is not None and norms_sq is None:
raise ValueError('Gram OMP needs the precomputed norms in order to evaluate the error sum of squares.')
if self.tol is not None and self.tol < 0:
raise ValueError('Epsilon cannot be negative')
if self.tol is None and self.n_nonzero_coefs_ <= 0:
raise ValueError('The number of atoms must be positive')
if self.tol is None and self.n_nonzero_coefs_ > len(Gram):
raise ValueError('The number of atoms cannot be more than the number of features')
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)), dtype=Gram.dtype)
else:
coef = np.zeros((len(Gram), Xy.shape[1]), dtype=Gram.dtype)
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(Gram, Xy[:, k], self.n_nonzero_coefs_, norms_sq[k] if self.tol is not None else None, self.tol, copy_Gram=True, copy_Xy=False, return_path=return_path)
if return_path:
(_, idx, coefs, n_iter) = out
coef = coef[:, :, :len(idx)]
for (n_active, x) in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
(x, idx, n_iter) = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if True:
(coef_, self.n_iter_) = (np.squeeze(coef), n_iters)
else:
(coef_, self.n_iter_) = np.squeeze(coef)
</DeepExtract>
self.coef_ = coef_.T
self._set_intercept(X_offset, y_offset, X_scale)
return self
|
def main(argv):
from optparse import OptionParser
op = OptionParser()
op.add_option('--output', action='store', type='str', dest='output', help='Path where to dump data.')
op = op
(opts, args) = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title('Scikit-learn Libsvm GUI')
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
|
def main(argv):
<DeepExtract>
from optparse import OptionParser
op = OptionParser()
op.add_option('--output', action='store', type='str', dest='output', help='Path where to dump data.')
op = op
</DeepExtract>
(opts, args) = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title('Scikit-learn Libsvm GUI')
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
|
def _load_arff_response(url: str, data_home: Optional[str], parser: str, output_type: str, openml_columns_info: dict, feature_names_to_select: List[str], target_names_to_select: List[str], shape: Optional[Tuple[int, int]], md5_checksum: str, n_retries: int=3, delay: float=1.0):
"""Load the ARFF data associated with the OpenML URL.
In addition of loading the data, this function will also check the
integrity of the downloaded file from OpenML using MD5 checksum.
Parameters
----------
url : str
The URL of the ARFF file on OpenML.
data_home : str
The location where to cache the data.
parser : {"liac-arff", "pandas"}
The parser used to parse the ARFF file.
output_type : {"numpy", "pandas", "sparse"}
The type of the arrays that will be returned. The possibilities are:
- `"numpy"`: both `X` and `y` will be NumPy arrays;
- `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array;
- `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a
pandas Series or DataFrame.
openml_columns_info : dict
The information provided by OpenML regarding the columns of the ARFF
file.
feature_names_to_select : list of str
The list of the features to be selected.
target_names_to_select : list of str
The list of the target variables to be selected.
shape : tuple or None
With `parser="liac-arff"`, when using a generator to load the data,
one needs to provide the shape of the data beforehand.
md5_checksum : str
The MD5 checksum provided by OpenML to check the data integrity.
Returns
-------
X : {ndarray, sparse matrix, dataframe}
The data matrix.
y : {ndarray, dataframe, series}
The target.
frame : dataframe or None
A dataframe containing both `X` and `y`. `None` if
`output_array_type != "pandas"`.
categories : list of str or None
The names of the features that are categorical. `None` if
`output_array_type == "pandas"`.
"""
def is_gzip_encoded(_fsrc):
gzip_file = _fsrc.info().get('Content-Encoding', '') == 'gzip'
req = Request(_OPENML_PREFIX + url)
req.add_header('Accept-encoding', 'gzip')
if data_home is None:
fsrc = _retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(req)
if is_gzip_encoded(fsrc):
gzip_file = gzip.GzipFile(fileobj=fsrc, mode='rb')
gzip_file = fsrc
local_path = _get_local_path(url, data_home)
(dir_name, file_name) = os.path.split(local_path)
if not os.path.exists(local_path):
os.makedirs(dir_name, exist_ok=True)
try:
with TemporaryDirectory(dir=dir_name) as tmpdir:
with closing(_retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(req)) as fsrc:
opener: Callable
if is_gzip_encoded(fsrc):
opener = open
else:
opener = gzip.GzipFile
with opener(os.path.join(tmpdir, file_name), 'wb') as fdst:
shutil.copyfileobj(fsrc, fdst)
shutil.move(fdst.name, local_path)
except Exception:
if os.path.exists(local_path):
os.unlink(local_path)
raise
gzip_file = gzip.GzipFile(local_path, 'rb')
with closing(gzip_file):
md5 = hashlib.md5()
for chunk in iter(lambda : gzip_file.read(4096), b''):
md5.update(chunk)
actual_md5_checksum = md5.hexdigest()
if actual_md5_checksum != md5_checksum:
raise ValueError(f'md5 checksum of local file for {url} does not match description: expected: {md5_checksum} but got {actual_md5_checksum}. Downloaded file could have been modified / corrupted, clean cache and retry...')
def _open_url_and_load_gzip_file(url, data_home, n_retries, delay, arff_params):
def is_gzip_encoded(_fsrc):
gzip_file = _fsrc.info().get('Content-Encoding', '') == 'gzip'
req = Request(_OPENML_PREFIX + url)
req.add_header('Accept-encoding', 'gzip')
if data_home is None:
fsrc = _retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(req)
if is_gzip_encoded(fsrc):
gzip_file = gzip.GzipFile(fileobj=fsrc, mode='rb')
gzip_file = fsrc
local_path = _get_local_path(url, data_home)
(dir_name, file_name) = os.path.split(local_path)
if not os.path.exists(local_path):
os.makedirs(dir_name, exist_ok=True)
try:
with TemporaryDirectory(dir=dir_name) as tmpdir:
with closing(_retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(req)) as fsrc:
opener: Callable
if is_gzip_encoded(fsrc):
opener = open
else:
opener = gzip.GzipFile
with opener(os.path.join(tmpdir, file_name), 'wb') as fdst:
shutil.copyfileobj(fsrc, fdst)
shutil.move(fdst.name, local_path)
except Exception:
if os.path.exists(local_path):
os.unlink(local_path)
raise
gzip_file = gzip.GzipFile(local_path, 'rb')
with closing(gzip_file):
return load_arff_from_gzip_file(gzip_file, **arff_params)
arff_params = dict(parser=parser, output_type=output_type, openml_columns_info=openml_columns_info, feature_names_to_select=feature_names_to_select, target_names_to_select=target_names_to_select, shape=shape)
try:
gzip_file = _open_openml_url(url, data_home, n_retries=n_retries, delay=delay)
with closing(gzip_file):
(X, y, frame, categories) = load_arff_from_gzip_file(gzip_file, **arff_params)
except Exception as exc:
if parser == 'pandas':
from pandas.errors import ParserError
if isinstance(exc, ParserError):
arff_params['read_csv_kwargs'] = {'quotechar': "'"}
gzip_file = _open_openml_url(url, data_home, n_retries=n_retries, delay=delay)
with closing(gzip_file):
(X, y, frame, categories) = load_arff_from_gzip_file(gzip_file, **arff_params)
else:
raise
return (X, y, frame, categories)
|
def _load_arff_response(url: str, data_home: Optional[str], parser: str, output_type: str, openml_columns_info: dict, feature_names_to_select: List[str], target_names_to_select: List[str], shape: Optional[Tuple[int, int]], md5_checksum: str, n_retries: int=3, delay: float=1.0):
"""Load the ARFF data associated with the OpenML URL.
In addition of loading the data, this function will also check the
integrity of the downloaded file from OpenML using MD5 checksum.
Parameters
----------
url : str
The URL of the ARFF file on OpenML.
data_home : str
The location where to cache the data.
parser : {"liac-arff", "pandas"}
The parser used to parse the ARFF file.
output_type : {"numpy", "pandas", "sparse"}
The type of the arrays that will be returned. The possibilities are:
- `"numpy"`: both `X` and `y` will be NumPy arrays;
- `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array;
- `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a
pandas Series or DataFrame.
openml_columns_info : dict
The information provided by OpenML regarding the columns of the ARFF
file.
feature_names_to_select : list of str
The list of the features to be selected.
target_names_to_select : list of str
The list of the target variables to be selected.
shape : tuple or None
With `parser="liac-arff"`, when using a generator to load the data,
one needs to provide the shape of the data beforehand.
md5_checksum : str
The MD5 checksum provided by OpenML to check the data integrity.
Returns
-------
X : {ndarray, sparse matrix, dataframe}
The data matrix.
y : {ndarray, dataframe, series}
The target.
frame : dataframe or None
A dataframe containing both `X` and `y`. `None` if
`output_array_type != "pandas"`.
categories : list of str or None
The names of the features that are categorical. `None` if
`output_array_type == "pandas"`.
"""
<DeepExtract>
def is_gzip_encoded(_fsrc):
gzip_file = _fsrc.info().get('Content-Encoding', '') == 'gzip'
req = Request(_OPENML_PREFIX + url)
req.add_header('Accept-encoding', 'gzip')
if data_home is None:
fsrc = _retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(req)
if is_gzip_encoded(fsrc):
gzip_file = gzip.GzipFile(fileobj=fsrc, mode='rb')
gzip_file = fsrc
local_path = _get_local_path(url, data_home)
(dir_name, file_name) = os.path.split(local_path)
if not os.path.exists(local_path):
os.makedirs(dir_name, exist_ok=True)
try:
with TemporaryDirectory(dir=dir_name) as tmpdir:
with closing(_retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(req)) as fsrc:
opener: Callable
if is_gzip_encoded(fsrc):
opener = open
else:
opener = gzip.GzipFile
with opener(os.path.join(tmpdir, file_name), 'wb') as fdst:
shutil.copyfileobj(fsrc, fdst)
shutil.move(fdst.name, local_path)
except Exception:
if os.path.exists(local_path):
os.unlink(local_path)
raise
gzip_file = gzip.GzipFile(local_path, 'rb')
</DeepExtract>
with closing(gzip_file):
md5 = hashlib.md5()
for chunk in iter(lambda : gzip_file.read(4096), b''):
md5.update(chunk)
actual_md5_checksum = md5.hexdigest()
if actual_md5_checksum != md5_checksum:
raise ValueError(f'md5 checksum of local file for {url} does not match description: expected: {md5_checksum} but got {actual_md5_checksum}. Downloaded file could have been modified / corrupted, clean cache and retry...')
def _open_url_and_load_gzip_file(url, data_home, n_retries, delay, arff_params):
<DeepExtract>
def is_gzip_encoded(_fsrc):
gzip_file = _fsrc.info().get('Content-Encoding', '') == 'gzip'
req = Request(_OPENML_PREFIX + url)
req.add_header('Accept-encoding', 'gzip')
if data_home is None:
fsrc = _retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(req)
if is_gzip_encoded(fsrc):
gzip_file = gzip.GzipFile(fileobj=fsrc, mode='rb')
gzip_file = fsrc
local_path = _get_local_path(url, data_home)
(dir_name, file_name) = os.path.split(local_path)
if not os.path.exists(local_path):
os.makedirs(dir_name, exist_ok=True)
try:
with TemporaryDirectory(dir=dir_name) as tmpdir:
with closing(_retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(req)) as fsrc:
opener: Callable
if is_gzip_encoded(fsrc):
opener = open
else:
opener = gzip.GzipFile
with opener(os.path.join(tmpdir, file_name), 'wb') as fdst:
shutil.copyfileobj(fsrc, fdst)
shutil.move(fdst.name, local_path)
except Exception:
if os.path.exists(local_path):
os.unlink(local_path)
raise
gzip_file = gzip.GzipFile(local_path, 'rb')
</DeepExtract>
with closing(gzip_file):
return load_arff_from_gzip_file(gzip_file, **arff_params)
arff_params = dict(parser=parser, output_type=output_type, openml_columns_info=openml_columns_info, feature_names_to_select=feature_names_to_select, target_names_to_select=target_names_to_select, shape=shape)
try:
<DeepExtract>
gzip_file = _open_openml_url(url, data_home, n_retries=n_retries, delay=delay)
with closing(gzip_file):
(X, y, frame, categories) = load_arff_from_gzip_file(gzip_file, **arff_params)
</DeepExtract>
except Exception as exc:
if parser == 'pandas':
from pandas.errors import ParserError
if isinstance(exc, ParserError):
arff_params['read_csv_kwargs'] = {'quotechar': "'"}
<DeepExtract>
gzip_file = _open_openml_url(url, data_home, n_retries=n_retries, delay=delay)
with closing(gzip_file):
(X, y, frame, categories) = load_arff_from_gzip_file(gzip_file, **arff_params)
</DeepExtract>
else:
raise
return (X, y, frame, categories)
|
def fit(self, X, y=None):
"""Generate a sparse random projection matrix.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training set: only the shape is used to find optimal random
matrix dimensions based on the theory referenced in the
afore mentioned papers.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
BaseRandomProjection class instance.
"""
self._validate_params()
X = self._validate_data(X, accept_sparse=['csr', 'csc'], dtype=[np.float64, np.float32])
(n_samples, n_features) = X.shape
if self.n_components == 'auto':
self.eps = np.asarray(self.eps)
n_samples = np.asarray(n_samples)
if np.any(self.eps <= 0.0) or np.any(self.eps >= 1):
raise ValueError('The JL bound is defined for eps in ]0, 1[, got %r' % self.eps)
if np.any(n_samples <= 0):
raise ValueError('The JL bound is defined for n_samples greater than zero, got %r' % n_samples)
denominator = self.eps ** 2 / 2 - self.eps ** 3 / 3
self.n_components_ = (4 * np.log(n_samples) / denominator).astype(np.int64)
if self.n_components_ <= 0:
raise ValueError('eps=%f and n_samples=%d lead to a target dimension of %d which is invalid' % (self.eps, n_samples, self.n_components_))
elif self.n_components_ > n_features:
raise ValueError('eps=%f and n_samples=%d lead to a target dimension of %d which is larger than the original space with n_features=%d' % (self.eps, n_samples, self.n_components_, n_features))
else:
if self.n_components > n_features:
warnings.warn('The number of components is higher than the number of features: n_features < n_components (%s < %s).The dimensionality of the problem will not be reduced.' % (n_features, self.n_components), DataDimensionalityWarning)
self.n_components_ = self.n_components
self.components_ = self._make_random_matrix(self.n_components_, n_features).astype(X.dtype, copy=False)
if self.compute_inverse_components:
components = self.components_
if sp.issparse(components):
components = components.toarray()
self.inverse_components_ = linalg.pinv(components, check_finite=False)
self._n_features_out = self.n_components
return self
|
def fit(self, X, y=None):
"""Generate a sparse random projection matrix.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training set: only the shape is used to find optimal random
matrix dimensions based on the theory referenced in the
afore mentioned papers.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
BaseRandomProjection class instance.
"""
self._validate_params()
X = self._validate_data(X, accept_sparse=['csr', 'csc'], dtype=[np.float64, np.float32])
(n_samples, n_features) = X.shape
if self.n_components == 'auto':
<DeepExtract>
self.eps = np.asarray(self.eps)
n_samples = np.asarray(n_samples)
if np.any(self.eps <= 0.0) or np.any(self.eps >= 1):
raise ValueError('The JL bound is defined for eps in ]0, 1[, got %r' % self.eps)
if np.any(n_samples <= 0):
raise ValueError('The JL bound is defined for n_samples greater than zero, got %r' % n_samples)
denominator = self.eps ** 2 / 2 - self.eps ** 3 / 3
self.n_components_ = (4 * np.log(n_samples) / denominator).astype(np.int64)
</DeepExtract>
if self.n_components_ <= 0:
raise ValueError('eps=%f and n_samples=%d lead to a target dimension of %d which is invalid' % (self.eps, n_samples, self.n_components_))
elif self.n_components_ > n_features:
raise ValueError('eps=%f and n_samples=%d lead to a target dimension of %d which is larger than the original space with n_features=%d' % (self.eps, n_samples, self.n_components_, n_features))
else:
if self.n_components > n_features:
warnings.warn('The number of components is higher than the number of features: n_features < n_components (%s < %s).The dimensionality of the problem will not be reduced.' % (n_features, self.n_components), DataDimensionalityWarning)
self.n_components_ = self.n_components
self.components_ = self._make_random_matrix(self.n_components_, n_features).astype(X.dtype, copy=False)
if self.compute_inverse_components:
<DeepExtract>
components = self.components_
if sp.issparse(components):
components = components.toarray()
self.inverse_components_ = linalg.pinv(components, check_finite=False)
</DeepExtract>
self._n_features_out = self.n_components
return self
|
def transform(self, X):
"""Transform data X according to the fitted model.
.. versionchanged:: 0.18
*doc_topic_distr* is now normalized
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : ndarray of shape (n_samples, n_components)
Document topic distribution for X.
"""
check_is_fitted(self)
dtype = [np.float64, np.float32] if False else self.components_.dtype
X = self._validate_data(X, reset=False, accept_sparse='csr', dtype=dtype)
check_non_negative(X, 'LatentDirichletAllocation.transform')
X = X
(doc_topic_distr, _) = self._e_step(X, cal_sstats=False, random_init=False)
doc_topic_distr = doc_topic_distr
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
|
def transform(self, X):
"""Transform data X according to the fitted model.
.. versionchanged:: 0.18
*doc_topic_distr* is now normalized
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : ndarray of shape (n_samples, n_components)
Document topic distribution for X.
"""
check_is_fitted(self)
<DeepExtract>
dtype = [np.float64, np.float32] if False else self.components_.dtype
X = self._validate_data(X, reset=False, accept_sparse='csr', dtype=dtype)
check_non_negative(X, 'LatentDirichletAllocation.transform')
X = X
</DeepExtract>
<DeepExtract>
(doc_topic_distr, _) = self._e_step(X, cal_sstats=False, random_init=False)
doc_topic_distr = doc_topic_distr
</DeepExtract>
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
|
def get_impute_iterative(X_missing, y_missing):
imputer = IterativeImputer(missing_values=np.nan, add_indicator=True, random_state=0, n_nearest_features=3, max_iter=1, sample_posterior=True)
estimator = make_pipeline(imputer, regressor)
impute_scores = cross_val_score(estimator, X_missing, y_missing, scoring='neg_mean_squared_error', cv=N_SPLITS)
iterative_impute_scores = impute_scores
return (iterative_impute_scores.mean(), iterative_impute_scores.std())
|
def get_impute_iterative(X_missing, y_missing):
imputer = IterativeImputer(missing_values=np.nan, add_indicator=True, random_state=0, n_nearest_features=3, max_iter=1, sample_posterior=True)
<DeepExtract>
estimator = make_pipeline(imputer, regressor)
impute_scores = cross_val_score(estimator, X_missing, y_missing, scoring='neg_mean_squared_error', cv=N_SPLITS)
iterative_impute_scores = impute_scores
</DeepExtract>
return (iterative_impute_scores.mean(), iterative_impute_scores.std())
|
def test_logistic_regression_class_weights():
X_iris = scale(iris.data)
X = X_iris[45:, :]
y = iris.target[45:]
solvers = ('lbfgs', 'newton-cg')
classes = np.unique(y)
class_weight = compute_class_weight('balanced', classes=classes, y=y)
class_weight_dict = dict(zip(classes, class_weight))
class_weight_dict = class_weight_dict
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class='multinomial', class_weight='balanced')
clf2 = LogisticRegression(solver=solver, multi_class='multinomial', class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4)
X = X_iris[45:100, :]
y = iris.target[45:100]
classes = np.unique(y)
class_weight = compute_class_weight('balanced', classes=classes, y=y)
class_weight_dict = dict(zip(classes, class_weight))
class_weight_dict = class_weight_dict
for solver in set(SOLVERS) - set(('sag', 'saga')):
clf1 = LogisticRegression(solver=solver, multi_class='ovr', class_weight='balanced')
clf2 = LogisticRegression(solver=solver, multi_class='ovr', class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
|
def test_logistic_regression_class_weights():
X_iris = scale(iris.data)
X = X_iris[45:, :]
y = iris.target[45:]
solvers = ('lbfgs', 'newton-cg')
<DeepExtract>
classes = np.unique(y)
class_weight = compute_class_weight('balanced', classes=classes, y=y)
class_weight_dict = dict(zip(classes, class_weight))
class_weight_dict = class_weight_dict
</DeepExtract>
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class='multinomial', class_weight='balanced')
clf2 = LogisticRegression(solver=solver, multi_class='multinomial', class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4)
X = X_iris[45:100, :]
y = iris.target[45:100]
<DeepExtract>
classes = np.unique(y)
class_weight = compute_class_weight('balanced', classes=classes, y=y)
class_weight_dict = dict(zip(classes, class_weight))
class_weight_dict = class_weight_dict
</DeepExtract>
for solver in set(SOLVERS) - set(('sag', 'saga')):
clf1 = LogisticRegression(solver=solver, multi_class='ovr', class_weight='balanced')
clf2 = LogisticRegression(solver=solver, multi_class='ovr', class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
|
def partial_fit(self, X, y=None):
"""Update the model using the data in X as a mini-batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Return the instance itself.
"""
has_components = hasattr(self, 'components_')
if not has_components:
self._validate_params()
X = self._validate_data(X, dtype=[np.float64, np.float32], order='C', reset=not has_components)
if not has_components:
self._n_components = self.n_components
if self._n_components is None:
self._n_components = X.shape[1]
_check_positive_coding(self.fit_algorithm, self.positive_code)
self._fit_algorithm = 'lasso_' + self.fit_algorithm
self._batch_size = min(self.batch_size, X.shape[0])
self._random_state = check_random_state(self.random_state)
if self.dict_init is not None:
dictionary = self.dict_init
else:
(_, S, dictionary) = randomized_svd(X, self._n_components, random_state=self._random_state)
dictionary = S[:, np.newaxis] * dictionary
if self._n_components <= len(dictionary):
dictionary = dictionary[:self._n_components, :]
else:
dictionary = np.concatenate((dictionary, np.zeros((self._n_components - len(dictionary), dictionary.shape[1]), dtype=dictionary.dtype)))
dictionary = check_array(dictionary, order='F', dtype=X.dtype, copy=False)
dictionary = np.require(dictionary, requirements='W')
dictionary = dictionary
self.n_steps_ = 0
self._A = np.zeros((self._n_components, self._n_components), dtype=X.dtype)
self._B = np.zeros((X.shape[1], self._n_components), dtype=X.dtype)
else:
dictionary = self.components_
batch_size = X.shape[0]
code = _sparse_encode(X, dictionary, algorithm=self._fit_algorithm, alpha=self.alpha, n_jobs=self.n_jobs, positive=self.positive_code, max_iter=self.transform_max_iter, verbose=self.verbose)
batch_cost = (0.5 * ((X - code @ dictionary) ** 2).sum() + self.alpha * np.sum(np.abs(code))) / batch_size
self._update_inner_stats(X, code, batch_size, self.n_steps_)
_update_dict(dictionary, X, code, self._A, self._B, verbose=self.verbose, random_state=self._random_state, positive=self.positive_dict)
return batch_cost
self.components_ = dictionary
self.n_steps_ += 1
return self
|
def partial_fit(self, X, y=None):
"""Update the model using the data in X as a mini-batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Return the instance itself.
"""
has_components = hasattr(self, 'components_')
if not has_components:
self._validate_params()
X = self._validate_data(X, dtype=[np.float64, np.float32], order='C', reset=not has_components)
if not has_components:
<DeepExtract>
self._n_components = self.n_components
if self._n_components is None:
self._n_components = X.shape[1]
_check_positive_coding(self.fit_algorithm, self.positive_code)
self._fit_algorithm = 'lasso_' + self.fit_algorithm
self._batch_size = min(self.batch_size, X.shape[0])
</DeepExtract>
self._random_state = check_random_state(self.random_state)
<DeepExtract>
if self.dict_init is not None:
dictionary = self.dict_init
else:
(_, S, dictionary) = randomized_svd(X, self._n_components, random_state=self._random_state)
dictionary = S[:, np.newaxis] * dictionary
if self._n_components <= len(dictionary):
dictionary = dictionary[:self._n_components, :]
else:
dictionary = np.concatenate((dictionary, np.zeros((self._n_components - len(dictionary), dictionary.shape[1]), dtype=dictionary.dtype)))
dictionary = check_array(dictionary, order='F', dtype=X.dtype, copy=False)
dictionary = np.require(dictionary, requirements='W')
dictionary = dictionary
</DeepExtract>
self.n_steps_ = 0
self._A = np.zeros((self._n_components, self._n_components), dtype=X.dtype)
self._B = np.zeros((X.shape[1], self._n_components), dtype=X.dtype)
else:
dictionary = self.components_
<DeepExtract>
batch_size = X.shape[0]
code = _sparse_encode(X, dictionary, algorithm=self._fit_algorithm, alpha=self.alpha, n_jobs=self.n_jobs, positive=self.positive_code, max_iter=self.transform_max_iter, verbose=self.verbose)
batch_cost = (0.5 * ((X - code @ dictionary) ** 2).sum() + self.alpha * np.sum(np.abs(code))) / batch_size
self._update_inner_stats(X, code, batch_size, self.n_steps_)
_update_dict(dictionary, X, code, self._A, self._B, verbose=self.verbose, random_state=self._random_state, positive=self.positive_dict)
return batch_cost
</DeepExtract>
self.components_ = dictionary
self.n_steps_ += 1
return self
|
def test_enet_multitarget():
n_targets = 3
random_state = np.random.RandomState(seed)
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
(X, y) = (X, y)
estimator = ElasticNet(alpha=0.01, precompute=False)
estimator.fit(X, y)
(coef, intercept, dual_gap) = (estimator.coef_, estimator.intercept_, estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
|
def test_enet_multitarget():
n_targets = 3
<DeepExtract>
random_state = np.random.RandomState(seed)
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
(X, y) = (X, y)
</DeepExtract>
estimator = ElasticNet(alpha=0.01, precompute=False)
estimator.fit(X, y)
(coef, intercept, dual_gap) = (estimator.coef_, estimator.intercept_, estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
|
def test_tol_parameter():
X = StandardScaler().fit_transform(iris.data)
y = iris.target == 1
max_iter = 42
_update_kwargs(kwargs)
model_0 = linear_model.SGDClassifier(**kwargs)
model_0.fit(X, y)
assert max_iter == model_0.n_iter_
max_iter = 2000
_update_kwargs(kwargs)
model_1 = linear_model.SGDClassifier(**kwargs)
model_1.fit(X, y)
assert max_iter > model_1.n_iter_
assert model_1.n_iter_ > 5
_update_kwargs(kwargs)
model_2 = linear_model.SGDClassifier(**kwargs)
model_2.fit(X, y)
assert model_1.n_iter_ > model_2.n_iter_
assert model_2.n_iter_ > 3
_update_kwargs(kwargs)
model_3 = linear_model.SGDClassifier(**kwargs)
warning_message = 'Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit.'
with pytest.warns(ConvergenceWarning, match=warning_message):
model_3.fit(X, y)
assert model_3.n_iter_ == 3
|
def test_tol_parameter():
X = StandardScaler().fit_transform(iris.data)
y = iris.target == 1
max_iter = 42
<DeepExtract>
_update_kwargs(kwargs)
model_0 = linear_model.SGDClassifier(**kwargs)
</DeepExtract>
model_0.fit(X, y)
assert max_iter == model_0.n_iter_
max_iter = 2000
<DeepExtract>
_update_kwargs(kwargs)
model_1 = linear_model.SGDClassifier(**kwargs)
</DeepExtract>
model_1.fit(X, y)
assert max_iter > model_1.n_iter_
assert model_1.n_iter_ > 5
<DeepExtract>
_update_kwargs(kwargs)
model_2 = linear_model.SGDClassifier(**kwargs)
</DeepExtract>
model_2.fit(X, y)
assert model_1.n_iter_ > model_2.n_iter_
assert model_2.n_iter_ > 3
<DeepExtract>
_update_kwargs(kwargs)
model_3 = linear_model.SGDClassifier(**kwargs)
</DeepExtract>
warning_message = 'Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit.'
with pytest.warns(ConvergenceWarning, match=warning_message):
model_3.fit(X, y)
assert model_3.n_iter_ == 3
|
def test_multilabel_confusion_matrix_binary():
if dataset is None:
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if True:
(X, y) = (X[y < 2], y[y < 2])
(n_samples, n_features) = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
(X, y) = (X[p], y[p])
half = int(n_samples / 2)
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if True:
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
(y_true, y_pred, _) = (y_true, y_pred, probas_pred)
def test(y_true, y_pred):
cm = multilabel_confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[[17, 8], [3, 22]], [[22, 3], [8, 17]]])
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
(tp, fp, fn, tn) = cm.flatten()
num = tp * tn - fp * fn
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
cm = confusion_matrix([str(y) for y in y_true], [str(y) for y in y_pred])
assert_array_equal(cm, [[22, 3], [8, 17]])
(tp, fp, fn, tn) = cm.flatten()
num = tp * tn - fp * fn
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef([str(y) for y in y_true], [str(y) for y in y_pred])
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
</DeepExtract>
|
def test_multilabel_confusion_matrix_binary():
<DeepExtract>
if dataset is None:
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if True:
(X, y) = (X[y < 2], y[y < 2])
(n_samples, n_features) = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
(X, y) = (X[p], y[p])
half = int(n_samples / 2)
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if True:
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
(y_true, y_pred, _) = (y_true, y_pred, probas_pred)
</DeepExtract>
def test(y_true, y_pred):
cm = multilabel_confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[[17, 8], [3, 22]], [[22, 3], [8, 17]]])
<DeepExtract>
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
(tp, fp, fn, tn) = cm.flatten()
num = tp * tn - fp * fn
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
</DeepExtract>
<DeepExtract>
cm = confusion_matrix([str(y) for y in y_true], [str(y) for y in y_pred])
assert_array_equal(cm, [[22, 3], [8, 17]])
(tp, fp, fn, tn) = cm.flatten()
num = tp * tn - fp * fn
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef([str(y) for y in y_true], [str(y) for y in y_pred])
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
</DeepExtract>
|
def check_verbosity(verbose, evaluate_every, expected_lines, expected_perplexities):
n_components = 3
block = np.full((3, 3), n_components, dtype=int)
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
(n_components, X) = (n_components, X)
lda = LatentDirichletAllocation(n_components=n_components, max_iter=3, learning_method='batch', verbose=verbose, evaluate_every=evaluate_every, random_state=0)
out = StringIO()
(old_out, sys.stdout) = (sys.stdout, out)
try:
lda.fit(X)
finally:
sys.stdout = old_out
n_lines = out.getvalue().count('\n')
n_perplexity = out.getvalue().count('perplexity')
assert expected_lines == n_lines
assert expected_perplexities == n_perplexity
|
def check_verbosity(verbose, evaluate_every, expected_lines, expected_perplexities):
<DeepExtract>
n_components = 3
block = np.full((3, 3), n_components, dtype=int)
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
(n_components, X) = (n_components, X)
</DeepExtract>
lda = LatentDirichletAllocation(n_components=n_components, max_iter=3, learning_method='batch', verbose=verbose, evaluate_every=evaluate_every, random_state=0)
out = StringIO()
(old_out, sys.stdout) = (sys.stdout, out)
try:
lda.fit(X)
finally:
sys.stdout = old_out
n_lines = out.getvalue().count('\n')
n_perplexity = out.getvalue().count('perplexity')
assert expected_lines == n_lines
assert expected_perplexities == n_perplexity
|
def test_classification_report_multiclass_with_digits():
iris = datasets.load_iris()
if iris is None:
iris = datasets.load_iris()
X = iris.data
y = iris.target
if False:
(X, y) = (X[y < 2], y[y < 2])
(n_samples, n_features) = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
(X, y) = (X[p], y[p])
half = int(n_samples / 2)
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if False:
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
(y_true, y_pred, _) = (y_true, y_pred, probas_pred)
expected_report = ' precision recall f1-score support\n\n setosa 0.82609 0.79167 0.80851 24\n versicolor 0.33333 0.09677 0.15000 31\n virginica 0.41860 0.90000 0.57143 20\n\n accuracy 0.53333 75\n macro avg 0.52601 0.59615 0.50998 75\nweighted avg 0.51375 0.53333 0.47310 75\n'
report = classification_report(y_true, y_pred, labels=np.arange(len(iris.target_names)), target_names=iris.target_names, digits=5)
assert report == expected_report
|
def test_classification_report_multiclass_with_digits():
iris = datasets.load_iris()
<DeepExtract>
if iris is None:
iris = datasets.load_iris()
X = iris.data
y = iris.target
if False:
(X, y) = (X[y < 2], y[y < 2])
(n_samples, n_features) = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
(X, y) = (X[p], y[p])
half = int(n_samples / 2)
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if False:
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
(y_true, y_pred, _) = (y_true, y_pred, probas_pred)
</DeepExtract>
expected_report = ' precision recall f1-score support\n\n setosa 0.82609 0.79167 0.80851 24\n versicolor 0.33333 0.09677 0.15000 31\n virginica 0.41860 0.90000 0.57143 20\n\n accuracy 0.53333 75\n macro avg 0.52601 0.59615 0.50998 75\nweighted avg 0.51375 0.53333 0.47310 75\n'
report = classification_report(y_true, y_pred, labels=np.arange(len(iris.target_names)), target_names=iris.target_names, digits=5)
assert report == expected_report
|
def predict(self, X):
"""
Predict class labels for samples in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data matrix for which we want to get the predictions.
Returns
-------
y_pred : ndarray of shape (n_samples,)
Vector containing the class labels for each sample.
"""
(xp, _) = get_namespace(X)
check_is_fitted(self)
(xp, _) = get_namespace(X)
X = self._validate_data(X, accept_sparse='csr', reset=False)
scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
scores = xp.reshape(scores, -1) if scores.shape[1] == 1 else scores
if len(scores.shape) == 1:
indices = xp.astype(scores > 0, int)
else:
indices = xp.argmax(scores, axis=1)
return xp.take(self.classes_, indices, axis=0)
|
def predict(self, X):
"""
Predict class labels for samples in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data matrix for which we want to get the predictions.
Returns
-------
y_pred : ndarray of shape (n_samples,)
Vector containing the class labels for each sample.
"""
(xp, _) = get_namespace(X)
<DeepExtract>
check_is_fitted(self)
(xp, _) = get_namespace(X)
X = self._validate_data(X, accept_sparse='csr', reset=False)
scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
scores = xp.reshape(scores, -1) if scores.shape[1] == 1 else scores
</DeepExtract>
if len(scores.shape) == 1:
indices = xp.astype(scores > 0, int)
else:
indices = xp.argmax(scores, axis=1)
return xp.take(self.classes_, indices, axis=0)
|
def bench_scikit_transformer(X, transformer):
gc.collect()
clf = clone(transformer)
t_start = datetime.now()
clf.fit(X)
delta = datetime.now() - t_start
mu_second = 0.0 + 10 ** 6
time_to_fit = delta.seconds + delta.microseconds / mu_second
t_start = datetime.now()
clf.transform(X)
delta = datetime.now() - t_start
mu_second = 0.0 + 10 ** 6
time_to_transform = delta.seconds + delta.microseconds / mu_second
return (time_to_fit, time_to_transform)
|
def bench_scikit_transformer(X, transformer):
gc.collect()
clf = clone(transformer)
t_start = datetime.now()
clf.fit(X)
delta = datetime.now() - t_start
<DeepExtract>
mu_second = 0.0 + 10 ** 6
time_to_fit = delta.seconds + delta.microseconds / mu_second
</DeepExtract>
t_start = datetime.now()
clf.transform(X)
delta = datetime.now() - t_start
<DeepExtract>
mu_second = 0.0 + 10 ** 6
time_to_transform = delta.seconds + delta.microseconds / mu_second
</DeepExtract>
return (time_to_fit, time_to_transform)
|
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets.
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]`
is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR
format.
"""
if self.classes is not None:
return self.fit(y).transform(y)
self._validate_params()
self._cached_dict = None
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
indices = array.array('i')
indptr = array.array('i', [0])
unknown = set()
for labels in y:
index = set()
for label in labels:
try:
index.add(class_mapping[label])
except KeyError:
unknown.add(label)
indices.extend(index)
indptr.append(len(indices))
if unknown:
warnings.warn('unknown class(es) {0} will be ignored'.format(sorted(unknown, key=str)))
data = np.ones(len(indices), dtype=int)
yt = sp.csr_matrix((data, indices, indptr), shape=(len(indptr) - 1, len(class_mapping)))
tmp = sorted(class_mapping, key=class_mapping.get)
dtype = int if all((isinstance(c, int) for c in tmp)) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
(self.classes_, inverse) = np.unique(class_mapping, return_inverse=True)
yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype, copy=False)
if not self.sparse_output:
yt = yt.toarray()
return yt
|
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets.
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]`
is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR
format.
"""
if self.classes is not None:
return self.fit(y).transform(y)
self._validate_params()
self._cached_dict = None
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
<DeepExtract>
indices = array.array('i')
indptr = array.array('i', [0])
unknown = set()
for labels in y:
index = set()
for label in labels:
try:
index.add(class_mapping[label])
except KeyError:
unknown.add(label)
indices.extend(index)
indptr.append(len(indices))
if unknown:
warnings.warn('unknown class(es) {0} will be ignored'.format(sorted(unknown, key=str)))
data = np.ones(len(indices), dtype=int)
yt = sp.csr_matrix((data, indices, indptr), shape=(len(indptr) - 1, len(class_mapping)))
</DeepExtract>
tmp = sorted(class_mapping, key=class_mapping.get)
dtype = int if all((isinstance(c, int) for c in tmp)) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
(self.classes_, inverse) = np.unique(class_mapping, return_inverse=True)
yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype, copy=False)
if not self.sparse_output:
yt = yt.toarray()
return yt
|
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Ignored.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed values.
Notes
-----
This method returns a Fortran-ordered array. To convert it to a
C-ordered array, use 'np.ascontiguousarray'.
"""
self._validate_params()
if issparse(X):
raise TypeError('PCA does not support sparse input. See TruncatedSVD for a possible alternative.')
X = self._validate_data(X, dtype=[np.float64, np.float32], ensure_2d=True, copy=self.copy)
if self.n_components is None:
if self.svd_solver != 'arpack':
n_components = min(X.shape)
else:
n_components = min(X.shape) - 1
else:
n_components = self.n_components
self._fit_svd_solver = self.svd_solver
if self._fit_svd_solver == 'auto':
if max(X.shape) <= 500 or n_components == 'mle':
self._fit_svd_solver = 'full'
elif 1 <= n_components < 0.8 * min(X.shape):
self._fit_svd_solver = 'randomized'
else:
self._fit_svd_solver = 'full'
if self._fit_svd_solver == 'full':
(U, S, Vt) = self._fit_full(X, n_components)
elif self._fit_svd_solver in ['arpack', 'randomized']:
(U, S, Vt) = self._fit_truncated(X, n_components, self._fit_svd_solver)
U = U[:, :self.n_components_]
if self.whiten:
U *= sqrt(X.shape[0] - 1)
else:
U *= S[:self.n_components_]
return U
|
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Ignored.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed values.
Notes
-----
This method returns a Fortran-ordered array. To convert it to a
C-ordered array, use 'np.ascontiguousarray'.
"""
self._validate_params()
<DeepExtract>
if issparse(X):
raise TypeError('PCA does not support sparse input. See TruncatedSVD for a possible alternative.')
X = self._validate_data(X, dtype=[np.float64, np.float32], ensure_2d=True, copy=self.copy)
if self.n_components is None:
if self.svd_solver != 'arpack':
n_components = min(X.shape)
else:
n_components = min(X.shape) - 1
else:
n_components = self.n_components
self._fit_svd_solver = self.svd_solver
if self._fit_svd_solver == 'auto':
if max(X.shape) <= 500 or n_components == 'mle':
self._fit_svd_solver = 'full'
elif 1 <= n_components < 0.8 * min(X.shape):
self._fit_svd_solver = 'randomized'
else:
self._fit_svd_solver = 'full'
if self._fit_svd_solver == 'full':
(U, S, Vt) = self._fit_full(X, n_components)
elif self._fit_svd_solver in ['arpack', 'randomized']:
(U, S, Vt) = self._fit_truncated(X, n_components, self._fit_svd_solver)
</DeepExtract>
U = U[:, :self.n_components_]
if self.whiten:
U *= sqrt(X.shape[0] - 1)
else:
U *= S[:self.n_components_]
return U
|
@pytest.mark.parametrize('svd_solver', PCA_SOLVERS)
def test_pca_dtype_preservation(svd_solver):
X_64 = np.random.RandomState(0).rand(1000, 4).astype(np.float64, copy=False)
X_32 = X_64.astype(np.float32)
pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_64)
pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_32)
assert pca_64.components_.dtype == np.float64
assert pca_32.components_.dtype == np.float32
assert pca_64.transform(X_64).dtype == np.float64
assert pca_32.transform(X_32).dtype == np.float32
assert_allclose(pca_64.components_, pca_32.components_, rtol=0.0002)
X_i64 = np.random.RandomState(0).randint(0, 1000, (1000, 4))
X_i64 = X_i64.astype(np.int64, copy=False)
X_i32 = X_i64.astype(np.int32, copy=False)
pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i64)
pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i32)
assert pca_64.components_.dtype == np.float64
assert pca_32.components_.dtype == np.float64
assert pca_64.transform(X_i64).dtype == np.float64
assert pca_32.transform(X_i32).dtype == np.float64
assert_allclose(pca_64.components_, pca_32.components_, rtol=0.0001)
</DeepExtract>
|
@pytest.mark.parametrize('svd_solver', PCA_SOLVERS)
def test_pca_dtype_preservation(svd_solver):
<DeepExtract>
X_64 = np.random.RandomState(0).rand(1000, 4).astype(np.float64, copy=False)
X_32 = X_64.astype(np.float32)
pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_64)
pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_32)
assert pca_64.components_.dtype == np.float64
assert pca_32.components_.dtype == np.float32
assert pca_64.transform(X_64).dtype == np.float64
assert pca_32.transform(X_32).dtype == np.float32
assert_allclose(pca_64.components_, pca_32.components_, rtol=0.0002)
</DeepExtract>
<DeepExtract>
X_i64 = np.random.RandomState(0).randint(0, 1000, (1000, 4))
X_i64 = X_i64.astype(np.int64, copy=False)
X_i32 = X_i64.astype(np.int32, copy=False)
pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i64)
pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i32)
assert pca_64.components_.dtype == np.float64
assert pca_32.components_.dtype == np.float64
assert pca_64.transform(X_i64).dtype == np.float64
assert pca_32.transform(X_i32).dtype == np.float64
assert_allclose(pca_64.components_, pca_32.components_, rtol=0.0001)
</DeepExtract>
|
def test_huber_better_r2_score():
rng = np.random.RandomState(0)
(X, y) = make_regression(n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05)
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
(X, y) = (X, y)
huber = HuberRegressor(alpha=0.01)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
ridge = Ridge(alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert huber_score > ridge_score
assert ridge_outlier_score > huber_outlier_score
|
def test_huber_better_r2_score():
<DeepExtract>
rng = np.random.RandomState(0)
(X, y) = make_regression(n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05)
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
(X, y) = (X, y)
</DeepExtract>
huber = HuberRegressor(alpha=0.01)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
ridge = Ridge(alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert huber_score > ridge_score
assert ridge_outlier_score > huber_outlier_score
|
def build_analyzer(self):
"""Return a callable to process input data.
The callable handles preprocessing, tokenization, and n-grams generation.
Returns
-------
analyzer: callable
A function to handle preprocessing, tokenization
and n-grams generation.
"""
if callable(self.analyzer):
return partial(_analyze, analyzer=self.analyzer, decoder=self.decode)
if self.preprocessor is not None:
preprocess = self.preprocessor
if not self.strip_accents:
strip_accents = None
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' % self.strip_accents)
preprocess = partial(_preprocess, accent_function=strip_accents, lower=self.lowercase)
if self.analyzer == 'char':
return partial(_analyze, ngrams=self._char_ngrams, preprocessor=preprocess, decoder=self.decode)
elif self.analyzer == 'char_wb':
return partial(_analyze, ngrams=self._char_wb_ngrams, preprocessor=preprocess, decoder=self.decode)
elif self.analyzer == 'word':
stop_words = _check_stop_list(self.stop_words)
if self.tokenizer is not None:
tokenize = self.tokenizer
token_pattern = re.compile(self.token_pattern)
if token_pattern.groups > 1:
raise ValueError('More than 1 capturing group in token pattern. Only a single group should be captured.')
tokenize = token_pattern.findall
if id(self.stop_words) == getattr(self, '_stop_words_id', None):
return None
try:
inconsistent = set()
for w in stop_words or ():
tokens = list(tokenize(preprocess(w)))
for token in tokens:
if token not in stop_words:
inconsistent.add(token)
self._stop_words_id = id(self.stop_words)
if inconsistent:
warnings.warn('Your stop_words may be inconsistent with your preprocessing. Tokenizing the stop words generated tokens %r not in stop_words.' % sorted(inconsistent))
return not inconsistent
except Exception:
self._stop_words_id = id(self.stop_words)
return 'error'
return partial(_analyze, ngrams=self._word_ngrams, tokenizer=tokenize, preprocessor=preprocess, decoder=self.decode, stop_words=stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' % self.analyzer)
|
def build_analyzer(self):
"""Return a callable to process input data.
The callable handles preprocessing, tokenization, and n-grams generation.
Returns
-------
analyzer: callable
A function to handle preprocessing, tokenization
and n-grams generation.
"""
if callable(self.analyzer):
return partial(_analyze, analyzer=self.analyzer, decoder=self.decode)
<DeepExtract>
if self.preprocessor is not None:
preprocess = self.preprocessor
if not self.strip_accents:
strip_accents = None
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' % self.strip_accents)
preprocess = partial(_preprocess, accent_function=strip_accents, lower=self.lowercase)
</DeepExtract>
if self.analyzer == 'char':
return partial(_analyze, ngrams=self._char_ngrams, preprocessor=preprocess, decoder=self.decode)
elif self.analyzer == 'char_wb':
return partial(_analyze, ngrams=self._char_wb_ngrams, preprocessor=preprocess, decoder=self.decode)
elif self.analyzer == 'word':
<DeepExtract>
stop_words = _check_stop_list(self.stop_words)
</DeepExtract>
<DeepExtract>
if self.tokenizer is not None:
tokenize = self.tokenizer
token_pattern = re.compile(self.token_pattern)
if token_pattern.groups > 1:
raise ValueError('More than 1 capturing group in token pattern. Only a single group should be captured.')
tokenize = token_pattern.findall
</DeepExtract>
<DeepExtract>
if id(self.stop_words) == getattr(self, '_stop_words_id', None):
return None
try:
inconsistent = set()
for w in stop_words or ():
tokens = list(tokenize(preprocess(w)))
for token in tokens:
if token not in stop_words:
inconsistent.add(token)
self._stop_words_id = id(self.stop_words)
if inconsistent:
warnings.warn('Your stop_words may be inconsistent with your preprocessing. Tokenizing the stop words generated tokens %r not in stop_words.' % sorted(inconsistent))
return not inconsistent
except Exception:
self._stop_words_id = id(self.stop_words)
return 'error'
</DeepExtract>
return partial(_analyze, ngrams=self._word_ngrams, tokenizer=tokenize, preprocessor=preprocess, decoder=self.decode, stop_words=stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' % self.analyzer)
|
def fit(self, X, y=None, **params):
(W, H, self.n_iter) = self._fit_transform(X, W=W, H=H, update_H=True)
self.components_ = H
return W
return self
|
def fit(self, X, y=None, **params):
<DeepExtract>
(W, H, self.n_iter) = self._fit_transform(X, W=W, H=H, update_H=True)
self.components_ = H
return W
</DeepExtract>
return self
|
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Vector to be scored, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
C : ndarray of shape (n_samples,)
Estimated probabilities.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * S ** (-0.5))
norm2.append(np.sum(X2 ** 2, axis=1))
norm2 = np.array(norm2).T
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
d = -0.5 * (norm2 + u) + np.log(self.priors_)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
|
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Vector to be scored, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
C : ndarray of shape (n_samples,)
Estimated probabilities.
"""
<DeepExtract>
check_is_fitted(self)
X = self._validate_data(X, reset=False)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * S ** (-0.5))
norm2.append(np.sum(X2 ** 2, axis=1))
norm2 = np.array(norm2).T
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
d = -0.5 * (norm2 + u) + np.log(self.priors_)
</DeepExtract>
y_pred = self.classes_.take(d.argmax(1))
return y_pred
|
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of predictors.
Y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target vectors, where `n_samples` is the number of samples and
`n_targets` is the number of response variables.
Returns
-------
self : object
Fitted model.
"""
self._validate_params()
check_consistent_length(X, Y)
X = self._validate_data(X, dtype=np.float64, copy=self.copy, ensure_min_samples=2)
Y = check_array(Y, input_name='Y', dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
n_components = self.n_components
rank_upper_bound = p if self.deflation_mode == 'regression' else min(n, p, q)
if n_components > rank_upper_bound:
raise ValueError(f'`n_components` upper bound is {rank_upper_bound}. Got {n_components} instead. Reduce `n_components`.')
self._norm_y_weights = self.deflation_mode == 'canonical'
norm_y_weights = self._norm_y_weights
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
if self.scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
(Xk, Yk, self._x_mean, self._y_mean, self._x_std, self._y_std) = (X, Y, x_mean, y_mean, x_std, y_std)
self.x_weights_ = np.zeros((p, n_components))
self.y_weights_ = np.zeros((q, n_components))
self._x_scores = np.zeros((n, n_components))
self._y_scores = np.zeros((n, n_components))
self.x_loadings_ = np.zeros((p, n_components))
self.y_loadings_ = np.zeros((q, n_components))
self.n_iter_ = []
Y_eps = np.finfo(Yk.dtype).eps
for k in range(n_components):
if self.algorithm == 'nipals':
Yk_mask = np.all(np.abs(Yk) < 10 * Y_eps, axis=0)
Yk[:, Yk_mask] = 0.0
try:
eps = np.finfo(Xk.dtype).eps
try:
y_score = next((col for col in Yk.T if np.any(np.abs(col) > eps)))
except StopIteration as e:
raise StopIteration('Y residual is constant') from e
x_weights_old = 100
if self.mode == 'B':
(X_pinv, Y_pinv) = (_pinv2_old(Xk), _pinv2_old(Yk))
for i in range(self.max_iter):
if self.mode == 'B':
x_weights = np.dot(X_pinv, y_score)
else:
x_weights = np.dot(Xk.T, y_score) / np.dot(y_score, y_score)
x_weights /= np.sqrt(np.dot(x_weights, x_weights)) + eps
x_score = np.dot(Xk, x_weights)
if self.mode == 'B':
y_weights = np.dot(Y_pinv, x_score)
else:
y_weights = np.dot(Yk.T, x_score) / np.dot(x_score.T, x_score)
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights, y_weights)) + eps
y_score = np.dot(Yk, y_weights) / (np.dot(y_weights, y_weights) + eps)
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff, x_weights_diff) < self.tol or Yk.shape[1] == 1:
break
x_weights_old = x_weights
n_iter = i + 1
if n_iter == self.max_iter:
warnings.warn('Maximum number of iterations reached', ConvergenceWarning)
(x_weights, y_weights, n_iter_) = (x_weights, y_weights, n_iter)
except StopIteration as e:
if str(e) != 'Y residual is constant':
raise
warnings.warn(f'Y residual is constant at iteration {k}')
break
self.n_iter_.append(n_iter_)
elif self.algorithm == 'svd':
C = np.dot(Xk.T, Yk)
(U, _, Vt) = svd(C, full_matrices=False)
(x_weights, y_weights) = (U[:, 0], Vt[0, :])
biggest_abs_val_idx = np.argmax(np.abs(x_weights))
sign = np.sign(x_weights[biggest_abs_val_idx])
x_weights *= sign
y_weights *= sign
x_scores = np.dot(Xk, x_weights)
if norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
x_loadings = np.dot(x_scores, Xk) / np.dot(x_scores, x_scores)
Xk -= np.outer(x_scores, x_loadings)
if self.deflation_mode == 'canonical':
y_loadings = np.dot(y_scores, Yk) / np.dot(y_scores, y_scores)
Yk -= np.outer(y_scores, y_loadings)
if self.deflation_mode == 'regression':
y_loadings = np.dot(x_scores, Yk) / np.dot(x_scores, x_scores)
Yk -= np.outer(x_scores, y_loadings)
self.x_weights_[:, k] = x_weights
self.y_weights_[:, k] = y_weights
self._x_scores[:, k] = x_scores
self._y_scores[:, k] = y_scores
self.x_loadings_[:, k] = x_loadings
self.y_loadings_[:, k] = y_loadings
self.x_rotations_ = np.dot(self.x_weights_, pinv2(np.dot(self.x_loadings_.T, self.x_weights_), check_finite=False))
self.y_rotations_ = np.dot(self.y_weights_, pinv2(np.dot(self.y_loadings_.T, self.y_weights_), check_finite=False))
self._coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self._coef_ = (self._coef_ * self._y_std).T
self.intercept_ = self._y_mean
self._n_features_out = self.x_rotations_.shape[1]
return self
|
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of predictors.
Y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target vectors, where `n_samples` is the number of samples and
`n_targets` is the number of response variables.
Returns
-------
self : object
Fitted model.
"""
self._validate_params()
check_consistent_length(X, Y)
X = self._validate_data(X, dtype=np.float64, copy=self.copy, ensure_min_samples=2)
Y = check_array(Y, input_name='Y', dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
n_components = self.n_components
rank_upper_bound = p if self.deflation_mode == 'regression' else min(n, p, q)
if n_components > rank_upper_bound:
raise ValueError(f'`n_components` upper bound is {rank_upper_bound}. Got {n_components} instead. Reduce `n_components`.')
self._norm_y_weights = self.deflation_mode == 'canonical'
norm_y_weights = self._norm_y_weights
<DeepExtract>
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
if self.scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
(Xk, Yk, self._x_mean, self._y_mean, self._x_std, self._y_std) = (X, Y, x_mean, y_mean, x_std, y_std)
</DeepExtract>
self.x_weights_ = np.zeros((p, n_components))
self.y_weights_ = np.zeros((q, n_components))
self._x_scores = np.zeros((n, n_components))
self._y_scores = np.zeros((n, n_components))
self.x_loadings_ = np.zeros((p, n_components))
self.y_loadings_ = np.zeros((q, n_components))
self.n_iter_ = []
Y_eps = np.finfo(Yk.dtype).eps
for k in range(n_components):
if self.algorithm == 'nipals':
Yk_mask = np.all(np.abs(Yk) < 10 * Y_eps, axis=0)
Yk[:, Yk_mask] = 0.0
try:
<DeepExtract>
eps = np.finfo(Xk.dtype).eps
try:
y_score = next((col for col in Yk.T if np.any(np.abs(col) > eps)))
except StopIteration as e:
raise StopIteration('Y residual is constant') from e
x_weights_old = 100
if self.mode == 'B':
(X_pinv, Y_pinv) = (_pinv2_old(Xk), _pinv2_old(Yk))
for i in range(self.max_iter):
if self.mode == 'B':
x_weights = np.dot(X_pinv, y_score)
else:
x_weights = np.dot(Xk.T, y_score) / np.dot(y_score, y_score)
x_weights /= np.sqrt(np.dot(x_weights, x_weights)) + eps
x_score = np.dot(Xk, x_weights)
if self.mode == 'B':
y_weights = np.dot(Y_pinv, x_score)
else:
y_weights = np.dot(Yk.T, x_score) / np.dot(x_score.T, x_score)
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights, y_weights)) + eps
y_score = np.dot(Yk, y_weights) / (np.dot(y_weights, y_weights) + eps)
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff, x_weights_diff) < self.tol or Yk.shape[1] == 1:
break
x_weights_old = x_weights
n_iter = i + 1
if n_iter == self.max_iter:
warnings.warn('Maximum number of iterations reached', ConvergenceWarning)
(x_weights, y_weights, n_iter_) = (x_weights, y_weights, n_iter)
</DeepExtract>
except StopIteration as e:
if str(e) != 'Y residual is constant':
raise
warnings.warn(f'Y residual is constant at iteration {k}')
break
self.n_iter_.append(n_iter_)
elif self.algorithm == 'svd':
<DeepExtract>
C = np.dot(Xk.T, Yk)
(U, _, Vt) = svd(C, full_matrices=False)
(x_weights, y_weights) = (U[:, 0], Vt[0, :])
</DeepExtract>
<DeepExtract>
biggest_abs_val_idx = np.argmax(np.abs(x_weights))
sign = np.sign(x_weights[biggest_abs_val_idx])
x_weights *= sign
y_weights *= sign
</DeepExtract>
x_scores = np.dot(Xk, x_weights)
if norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
x_loadings = np.dot(x_scores, Xk) / np.dot(x_scores, x_scores)
Xk -= np.outer(x_scores, x_loadings)
if self.deflation_mode == 'canonical':
y_loadings = np.dot(y_scores, Yk) / np.dot(y_scores, y_scores)
Yk -= np.outer(y_scores, y_loadings)
if self.deflation_mode == 'regression':
y_loadings = np.dot(x_scores, Yk) / np.dot(x_scores, x_scores)
Yk -= np.outer(x_scores, y_loadings)
self.x_weights_[:, k] = x_weights
self.y_weights_[:, k] = y_weights
self._x_scores[:, k] = x_scores
self._y_scores[:, k] = y_scores
self.x_loadings_[:, k] = x_loadings
self.y_loadings_[:, k] = y_loadings
self.x_rotations_ = np.dot(self.x_weights_, pinv2(np.dot(self.x_loadings_.T, self.x_weights_), check_finite=False))
self.y_rotations_ = np.dot(self.y_weights_, pinv2(np.dot(self.y_loadings_.T, self.y_weights_), check_finite=False))
self._coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self._coef_ = (self._coef_ * self._y_std).T
self.intercept_ = self._y_mean
self._n_features_out = self.x_rotations_.shape[1]
return self
|
def fit(self, X, y=None):
"""Compute the embedding vectors for data X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted `LocallyLinearEmbedding` class instance.
"""
self._validate_params()
self.nbrs_ = NearestNeighbors(n_neighbors=self.n_neighbors, algorithm=self.neighbors_algorithm, n_jobs=self.n_jobs)
random_state = check_random_state(self.random_state)
X = self._validate_data(X, dtype=float)
self.nbrs_.fit(X)
(self.embedding_, self.reconstruction_error_) = locally_linear_embedding(X=self.nbrs_, n_neighbors=self.n_neighbors, n_components=self.n_components, eigen_solver=self.eigen_solver, tol=self.tol, max_iter=self.max_iter, method=self.method, hessian_tol=self.hessian_tol, modified_tol=self.modified_tol, random_state=random_state, reg=self.reg, n_jobs=self.n_jobs)
self._n_features_out = self.embedding_.shape[1]
return self
|
def fit(self, X, y=None):
"""Compute the embedding vectors for data X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted `LocallyLinearEmbedding` class instance.
"""
self._validate_params()
<DeepExtract>
self.nbrs_ = NearestNeighbors(n_neighbors=self.n_neighbors, algorithm=self.neighbors_algorithm, n_jobs=self.n_jobs)
random_state = check_random_state(self.random_state)
X = self._validate_data(X, dtype=float)
self.nbrs_.fit(X)
(self.embedding_, self.reconstruction_error_) = locally_linear_embedding(X=self.nbrs_, n_neighbors=self.n_neighbors, n_components=self.n_components, eigen_solver=self.eigen_solver, tol=self.tol, max_iter=self.max_iter, method=self.method, hessian_tol=self.hessian_tol, modified_tol=self.modified_tol, random_state=random_state, reg=self.reg, n_jobs=self.n_jobs)
self._n_features_out = self.embedding_.shape[1]
</DeepExtract>
return self
|
def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type):
"""Estimate the log Gaussian probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
means : array-like of shape (n_components, n_features)
precisions_chol : array-like
Cholesky decompositions of the precision matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
Returns
-------
log_prob : array, shape (n_samples, n_components)
"""
(n_samples, n_features) = X.shape
(n_components, _) = means.shape
if covariance_type == 'full':
(n_components, _, _) = precisions_chol.shape
log_det_chol = np.sum(np.log(precisions_chol.reshape(n_components, -1)[:, ::n_features + 1]), 1)
elif covariance_type == 'tied':
log_det_chol = np.sum(np.log(np.diag(precisions_chol)))
elif covariance_type == 'diag':
log_det_chol = np.sum(np.log(precisions_chol), axis=1)
else:
log_det_chol = n_features * np.log(precisions_chol)
log_det = log_det_chol
if covariance_type == 'full':
log_prob = np.empty((n_samples, n_components))
for (k, (mu, prec_chol)) in enumerate(zip(means, precisions_chol)):
y = np.dot(X, prec_chol) - np.dot(mu, prec_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == 'tied':
log_prob = np.empty((n_samples, n_components))
for (k, mu) in enumerate(means):
y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == 'diag':
precisions = precisions_chol ** 2
log_prob = np.sum(means ** 2 * precisions, 1) - 2.0 * np.dot(X, (means * precisions).T) + np.dot(X ** 2, precisions.T)
elif covariance_type == 'spherical':
precisions = precisions_chol ** 2
log_prob = np.sum(means ** 2, 1) * precisions - 2 * np.dot(X, means.T * precisions) + np.outer(row_norms(X, squared=True), precisions)
return -0.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det
|
def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type):
"""Estimate the log Gaussian probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
means : array-like of shape (n_components, n_features)
precisions_chol : array-like
Cholesky decompositions of the precision matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
Returns
-------
log_prob : array, shape (n_samples, n_components)
"""
(n_samples, n_features) = X.shape
(n_components, _) = means.shape
<DeepExtract>
if covariance_type == 'full':
(n_components, _, _) = precisions_chol.shape
log_det_chol = np.sum(np.log(precisions_chol.reshape(n_components, -1)[:, ::n_features + 1]), 1)
elif covariance_type == 'tied':
log_det_chol = np.sum(np.log(np.diag(precisions_chol)))
elif covariance_type == 'diag':
log_det_chol = np.sum(np.log(precisions_chol), axis=1)
else:
log_det_chol = n_features * np.log(precisions_chol)
log_det = log_det_chol
</DeepExtract>
if covariance_type == 'full':
log_prob = np.empty((n_samples, n_components))
for (k, (mu, prec_chol)) in enumerate(zip(means, precisions_chol)):
y = np.dot(X, prec_chol) - np.dot(mu, prec_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == 'tied':
log_prob = np.empty((n_samples, n_components))
for (k, mu) in enumerate(means):
y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == 'diag':
precisions = precisions_chol ** 2
log_prob = np.sum(means ** 2 * precisions, 1) - 2.0 * np.dot(X, (means * precisions).T) + np.dot(X ** 2, precisions.T)
elif covariance_type == 'spherical':
precisions = precisions_chol ** 2
log_prob = np.sum(means ** 2, 1) * precisions - 2 * np.dot(X, means.T * precisions) + np.outer(row_norms(X, squared=True), precisions)
return -0.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det
|
def obj_func(theta, eval_gradient=True):
if eval_gradient:
if theta is None:
if True:
raise ValueError('Gradient can only be evaluated for theta!=None')
(lml, grad) = self.log_marginal_likelihood_value_
if False:
kernel = self.kernel_.clone_with_theta(theta)
else:
kernel = self.kernel_
kernel.theta = theta
if True:
(K, K_gradient) = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
(Z, (pi, W_sr, L, b, a)) = self._posterior_mode(K, return_temporaries=True)
if not True:
(lml, grad) = Z
d_Z = np.empty(theta.shape[0])
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr))
C = solve(L, W_sr[:, np.newaxis] * K)
s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) * (pi * (1 - pi) * (1 - 2 * pi))
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j]
s_1 = 0.5 * a.T.dot(C).dot(a) - 0.5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi)
s_3 = b - K.dot(R.dot(b))
d_Z[j] = s_1 + s_2.T.dot(s_3)
(lml, grad) = (Z, d_Z)
return (-lml, -grad)
else:
return -self.log_marginal_likelihood(theta, clone_kernel=False)
|
def obj_func(theta, eval_gradient=True):
if eval_gradient:
<DeepExtract>
if theta is None:
if True:
raise ValueError('Gradient can only be evaluated for theta!=None')
(lml, grad) = self.log_marginal_likelihood_value_
if False:
kernel = self.kernel_.clone_with_theta(theta)
else:
kernel = self.kernel_
kernel.theta = theta
if True:
(K, K_gradient) = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
(Z, (pi, W_sr, L, b, a)) = self._posterior_mode(K, return_temporaries=True)
if not True:
(lml, grad) = Z
d_Z = np.empty(theta.shape[0])
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr))
C = solve(L, W_sr[:, np.newaxis] * K)
s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) * (pi * (1 - pi) * (1 - 2 * pi))
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j]
s_1 = 0.5 * a.T.dot(C).dot(a) - 0.5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi)
s_3 = b - K.dot(R.dot(b))
d_Z[j] = s_1 + s_2.T.dot(s_3)
(lml, grad) = (Z, d_Z)
</DeepExtract>
return (-lml, -grad)
else:
return -self.log_marginal_likelihood(theta, clone_kernel=False)
|
def test_multitask_enet_and_lasso_cv():
random_state = np.random.RandomState(0)
if 3 > 1:
w = random_state.randn(50, 3)
else:
w = random_state.randn(50)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, 50)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, 50)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
clf = MultiTaskElasticNetCV(cv=3).fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV(cv=3).fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
random_state = np.random.RandomState(0)
if 3 > 1:
w = random_state.randn(n_features, 3)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
clf = MultiTaskElasticNetCV(n_alphas=10, eps=0.001, max_iter=100, l1_ratio=[0.3, 0.5], tol=0.001, cv=3)
clf.fit(X, y)
assert 0.5 == clf.l1_ratio_
assert (3, X.shape[1]) == clf.coef_.shape
assert (3,) == clf.intercept_.shape
assert (2, 10, 3) == clf.mse_path_.shape
assert (2, 10) == clf.alphas_.shape
random_state = np.random.RandomState(0)
if 3 > 1:
w = random_state.randn(n_features, 3)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
clf = MultiTaskLassoCV(n_alphas=10, eps=0.001, max_iter=100, tol=0.001, cv=3)
clf.fit(X, y)
assert (3, X.shape[1]) == clf.coef_.shape
assert (3,) == clf.intercept_.shape
assert (10, 3) == clf.mse_path_.shape
assert 10 == len(clf.alphas_)
|
def test_multitask_enet_and_lasso_cv():
<DeepExtract>
random_state = np.random.RandomState(0)
if 3 > 1:
w = random_state.randn(50, 3)
else:
w = random_state.randn(50)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, 50)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, 50)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
</DeepExtract>
clf = MultiTaskElasticNetCV(cv=3).fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV(cv=3).fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
<DeepExtract>
random_state = np.random.RandomState(0)
if 3 > 1:
w = random_state.randn(n_features, 3)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
</DeepExtract>
clf = MultiTaskElasticNetCV(n_alphas=10, eps=0.001, max_iter=100, l1_ratio=[0.3, 0.5], tol=0.001, cv=3)
clf.fit(X, y)
assert 0.5 == clf.l1_ratio_
assert (3, X.shape[1]) == clf.coef_.shape
assert (3,) == clf.intercept_.shape
assert (2, 10, 3) == clf.mse_path_.shape
assert (2, 10) == clf.alphas_.shape
<DeepExtract>
random_state = np.random.RandomState(0)
if 3 > 1:
w = random_state.randn(n_features, 3)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
</DeepExtract>
clf = MultiTaskLassoCV(n_alphas=10, eps=0.001, max_iter=100, tol=0.001, cv=3)
clf.fit(X, y)
assert (3, X.shape[1]) == clf.coef_.shape
assert (3,) == clf.intercept_.shape
assert (10, 3) == clf.mse_path_.shape
assert 10 == len(clf.alphas_)
|
def test_confusion_matrix_binary():
if dataset is None:
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if True:
(X, y) = (X[y < 2], y[y < 2])
(n_samples, n_features) = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
(X, y) = (X[p], y[p])
half = int(n_samples / 2)
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if True:
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
(y_true, y_pred, _) = (y_true, y_pred, probas_pred)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
(tp, fp, fn, tn) = cm.flatten()
num = tp * tn - fp * fn
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
(tp, fp, fn, tn) = cm.flatten()
num = tp * tn - fp * fn
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
cm = confusion_matrix([str(y) for y in y_true], [str(y) for y in y_pred])
assert_array_equal(cm, [[22, 3], [8, 17]])
(tp, fp, fn, tn) = cm.flatten()
num = tp * tn - fp * fn
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef([str(y) for y in y_true], [str(y) for y in y_pred])
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
</DeepExtract>
|
def test_confusion_matrix_binary():
<DeepExtract>
if dataset is None:
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if True:
(X, y) = (X[y < 2], y[y < 2])
(n_samples, n_features) = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
(X, y) = (X[p], y[p])
half = int(n_samples / 2)
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if True:
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
(y_true, y_pred, _) = (y_true, y_pred, probas_pred)
</DeepExtract>
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
(tp, fp, fn, tn) = cm.flatten()
num = tp * tn - fp * fn
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
<DeepExtract>
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
(tp, fp, fn, tn) = cm.flatten()
num = tp * tn - fp * fn
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
</DeepExtract>
<DeepExtract>
cm = confusion_matrix([str(y) for y in y_true], [str(y) for y in y_pred])
assert_array_equal(cm, [[22, 3], [8, 17]])
(tp, fp, fn, tn) = cm.flatten()
num = tp * tn - fp * fn
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef([str(y) for y in y_true], [str(y) for y in y_pred])
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
</DeepExtract>
|
def predict(self, X):
"""
Predict labels (1 inlier, -1 outlier) of X according to fitted model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
check_is_fitted(self)
negative_mahal_dist = self.score_samples(X)
values = negative_mahal_dist - self.offset_
is_inlier = np.full(values.shape[0], -1, dtype=int)
is_inlier[values >= 0] = 1
return is_inlier
|
def predict(self, X):
"""
Predict labels (1 inlier, -1 outlier) of X according to fitted model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
<DeepExtract>
check_is_fitted(self)
negative_mahal_dist = self.score_samples(X)
values = negative_mahal_dist - self.offset_
</DeepExtract>
is_inlier = np.full(values.shape[0], -1, dtype=int)
is_inlier[values >= 0] = 1
return is_inlier
|
def test_stratified_shuffle_split_even():
n_folds = 5
n_splits = 1000
def assert_counts_are_ok(idx_counts, p):
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
prob = bf.pmf(count)
assert prob > threshold, 'An index is not drawn with chance corresponding to even draws'
for n_samples in (6, 22):
groups = np.array(n_samples // 2 * [0, 1])
splits = StratifiedShuffleSplit(n_splits=n_splits, test_size=1.0 / n_folds, random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits_actual = 0
for (train, test) in splits.split(X=np.ones(n_samples), y=groups):
n_splits_actual += 1
for (counter, ids) in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert n_splits_actual == n_splits
(n_train, n_test) = _validate_shuffle_split(n_samples, test_size=1.0 / n_folds, train_size=1.0 - 1.0 / n_folds)
assert len(train) == n_train
assert len(test) == n_test
assert len(set(train).intersection(test)) == 0
group_counts = np.unique(groups)
assert splits.test_size == 1.0 / n_folds
assert n_train + n_test == len(groups)
assert len(group_counts) == 2
ex_test_p = float(n_test) / n_samples
ex_train_p = float(n_train) / n_samples
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, ex_train_p)
for count in train_counts:
prob = bf.pmf(count)
assert prob > threshold, 'An index is not drawn with chance corresponding to even draws'
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, ex_test_p)
for count in test_counts:
prob = bf.pmf(count)
assert prob > threshold, 'An index is not drawn with chance corresponding to even draws'
</DeepExtract>
|
def test_stratified_shuffle_split_even():
n_folds = 5
n_splits = 1000
def assert_counts_are_ok(idx_counts, p):
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
prob = bf.pmf(count)
assert prob > threshold, 'An index is not drawn with chance corresponding to even draws'
for n_samples in (6, 22):
groups = np.array(n_samples // 2 * [0, 1])
splits = StratifiedShuffleSplit(n_splits=n_splits, test_size=1.0 / n_folds, random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits_actual = 0
for (train, test) in splits.split(X=np.ones(n_samples), y=groups):
n_splits_actual += 1
for (counter, ids) in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert n_splits_actual == n_splits
(n_train, n_test) = _validate_shuffle_split(n_samples, test_size=1.0 / n_folds, train_size=1.0 - 1.0 / n_folds)
assert len(train) == n_train
assert len(test) == n_test
assert len(set(train).intersection(test)) == 0
group_counts = np.unique(groups)
assert splits.test_size == 1.0 / n_folds
assert n_train + n_test == len(groups)
assert len(group_counts) == 2
ex_test_p = float(n_test) / n_samples
ex_train_p = float(n_train) / n_samples
<DeepExtract>
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, ex_train_p)
for count in train_counts:
prob = bf.pmf(count)
assert prob > threshold, 'An index is not drawn with chance corresponding to even draws'
</DeepExtract>
<DeepExtract>
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, ex_test_p)
for count in test_counts:
prob = bf.pmf(count)
assert prob > threshold, 'An index is not drawn with chance corresponding to even draws'
</DeepExtract>
|
def _fit(self, X, y=None, precomputed=False):
"""Fit the transformer on `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
If `precomputed=True`, then `X` is a mask of the input data.
precomputed : bool
Whether the input data is a mask.
Returns
-------
imputer_mask : {ndarray, sparse matrix} of shape (n_samples, n_features)
The imputer mask of the original data.
"""
if precomputed:
if not (hasattr(X, 'dtype') and X.dtype.kind == 'b'):
raise ValueError('precomputed is True but the input data is not a mask')
self._precomputed = True
else:
self._precomputed = False
if not self._precomputed:
if self.strategy in ('most_frequent', 'constant'):
if isinstance(X, list) and any((isinstance(elem, str) for row in X for elem in row)):
dtype = object
else:
dtype = None
else:
dtype = FLOAT_DTYPES
if not True and self._fit_dtype.kind == 'O':
dtype = self._fit_dtype
if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
force_all_finite = 'allow-nan'
else:
force_all_finite = True
try:
X = self._validate_data(X, reset=True, accept_sparse='csc', dtype=dtype, force_all_finite=force_all_finite, copy=self.copy)
except ValueError as ve:
if 'could not convert' in str(ve):
new_ve = ValueError('Cannot use {} strategy with non-numeric data:\n{}'.format(self.strategy, ve))
raise new_ve from None
else:
raise ve
if True:
self._fit_dtype = X.dtype
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ('i', 'u', 'f', 'O'):
raise ValueError('SimpleImputer does not support data with dtype {0}. Please provide either a numeric array (with a floating point or integer dtype) or categorical data represented either as an array with integer dtype or an array of string values with an object dtype.'.format(X.dtype))
X = X
else:
self._check_n_features(X, reset=True)
self._n_features = X.shape[1]
if not self._precomputed:
imputer_mask = _get_mask(X, self.missing_values)
else:
imputer_mask = X
if sp.issparse(X):
imputer_mask.eliminate_zeros()
if self.features == 'missing-only':
n_missing = imputer_mask.getnnz(axis=0)
if self.sparse is False:
imputer_mask = imputer_mask.toarray()
elif imputer_mask.format == 'csr':
imputer_mask = imputer_mask.tocsc()
else:
if not self._precomputed:
imputer_mask = _get_mask(X, self.missing_values)
else:
imputer_mask = X
if self.features == 'missing-only':
n_missing = imputer_mask.sum(axis=0)
if self.sparse is True:
imputer_mask = sp.csc_matrix(imputer_mask)
if self.features == 'all':
features_indices = np.arange(X.shape[1])
else:
features_indices = np.flatnonzero(n_missing)
missing_features_info = (imputer_mask, features_indices)
self.features_ = missing_features_info[1]
return missing_features_info[0]
|
def _fit(self, X, y=None, precomputed=False):
"""Fit the transformer on `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
If `precomputed=True`, then `X` is a mask of the input data.
precomputed : bool
Whether the input data is a mask.
Returns
-------
imputer_mask : {ndarray, sparse matrix} of shape (n_samples, n_features)
The imputer mask of the original data.
"""
if precomputed:
if not (hasattr(X, 'dtype') and X.dtype.kind == 'b'):
raise ValueError('precomputed is True but the input data is not a mask')
self._precomputed = True
else:
self._precomputed = False
if not self._precomputed:
<DeepExtract>
if self.strategy in ('most_frequent', 'constant'):
if isinstance(X, list) and any((isinstance(elem, str) for row in X for elem in row)):
dtype = object
else:
dtype = None
else:
dtype = FLOAT_DTYPES
if not True and self._fit_dtype.kind == 'O':
dtype = self._fit_dtype
if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
force_all_finite = 'allow-nan'
else:
force_all_finite = True
try:
X = self._validate_data(X, reset=True, accept_sparse='csc', dtype=dtype, force_all_finite=force_all_finite, copy=self.copy)
except ValueError as ve:
if 'could not convert' in str(ve):
new_ve = ValueError('Cannot use {} strategy with non-numeric data:\n{}'.format(self.strategy, ve))
raise new_ve from None
else:
raise ve
if True:
self._fit_dtype = X.dtype
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ('i', 'u', 'f', 'O'):
raise ValueError('SimpleImputer does not support data with dtype {0}. Please provide either a numeric array (with a floating point or integer dtype) or categorical data represented either as an array with integer dtype or an array of string values with an object dtype.'.format(X.dtype))
X = X
</DeepExtract>
else:
self._check_n_features(X, reset=True)
self._n_features = X.shape[1]
<DeepExtract>
if not self._precomputed:
imputer_mask = _get_mask(X, self.missing_values)
else:
imputer_mask = X
if sp.issparse(X):
imputer_mask.eliminate_zeros()
if self.features == 'missing-only':
n_missing = imputer_mask.getnnz(axis=0)
if self.sparse is False:
imputer_mask = imputer_mask.toarray()
elif imputer_mask.format == 'csr':
imputer_mask = imputer_mask.tocsc()
else:
if not self._precomputed:
imputer_mask = _get_mask(X, self.missing_values)
else:
imputer_mask = X
if self.features == 'missing-only':
n_missing = imputer_mask.sum(axis=0)
if self.sparse is True:
imputer_mask = sp.csc_matrix(imputer_mask)
if self.features == 'all':
features_indices = np.arange(X.shape[1])
else:
features_indices = np.flatnonzero(n_missing)
missing_features_info = (imputer_mask, features_indices)
</DeepExtract>
self.features_ = missing_features_info[1]
return missing_features_info[0]
|
def SGDOneClassSVM(**kwargs):
if 'random_state' not in kwargs:
kwargs['random_state'] = 42
if 'tol' not in kwargs:
kwargs['tol'] = None
if 'max_iter' not in kwargs:
kwargs['max_iter'] = 5
return linear_model.SGDOneClassSVM(**kwargs)
|
def SGDOneClassSVM(**kwargs):
<DeepExtract>
if 'random_state' not in kwargs:
kwargs['random_state'] = 42
if 'tol' not in kwargs:
kwargs['tol'] = None
if 'max_iter' not in kwargs:
kwargs['max_iter'] = 5
</DeepExtract>
return linear_model.SGDOneClassSVM(**kwargs)
|
@pytest.mark.parametrize('fit_intercept', [False, True])
def test_multinomial_coef_shape(fit_intercept):
"""Test that multinomial LinearModelLoss respects shape of coef."""
loss = LinearModelLoss(base_loss=HalfMultinomialLoss(), fit_intercept=fit_intercept)
(n_samples, n_features) = (10, 5)
rng = np.random.RandomState(42)
n_dof = n_features + loss.fit_intercept
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, random_state=rng)
coef = loss.init_zero_coef(X)
if loss.base_loss.is_multiclass:
n_classes = loss.base_loss.n_classes
coef.flat[:] = rng.uniform(low=coef_bound[0], high=coef_bound[1], size=n_classes * n_dof)
if loss.fit_intercept:
raw_prediction = X @ coef[:, :-1].T + coef[:, -1]
else:
raw_prediction = X @ coef.T
proba = loss.base_loss.link.inverse(raw_prediction)
def choice_vectorized(items, p):
s = p.cumsum(axis=1)
r = rng.rand(p.shape[0])[:, None]
k = (s < r).sum(axis=1)
(X, y, coef) = items[k]
y = choice_vectorized(np.arange(n_classes), p=proba).astype(np.float64)
else:
coef.flat[:] = rng.uniform(low=coef_bound[0], high=coef_bound[1], size=n_dof)
if loss.fit_intercept:
raw_prediction = X @ coef[:-1] + coef[-1]
else:
raw_prediction = X @ coef
y = loss.base_loss.link.inverse(raw_prediction + rng.uniform(low=-1, high=1, size=n_samples))
(X, y, coef) = (X, y, coef)
s = np.random.RandomState(42).randn(*coef.shape)
(l, g) = loss.loss_gradient(coef, X, y)
g1 = loss.gradient(coef, X, y)
(g2, hessp) = loss.gradient_hessian_product(coef, X, y)
h = hessp(s)
assert g.shape == coef.shape
assert h.shape == coef.shape
assert_allclose(g, g1)
assert_allclose(g, g2)
coef_r = coef.ravel(order='F')
s_r = s.ravel(order='F')
(l_r, g_r) = loss.loss_gradient(coef_r, X, y)
g1_r = loss.gradient(coef_r, X, y)
(g2_r, hessp_r) = loss.gradient_hessian_product(coef_r, X, y)
h_r = hessp_r(s_r)
assert g_r.shape == coef_r.shape
assert h_r.shape == coef_r.shape
assert_allclose(g_r, g1_r)
assert_allclose(g_r, g2_r)
assert_allclose(g, g_r.reshape(loss.base_loss.n_classes, -1, order='F'))
assert_allclose(h, h_r.reshape(loss.base_loss.n_classes, -1, order='F'))
|
@pytest.mark.parametrize('fit_intercept', [False, True])
def test_multinomial_coef_shape(fit_intercept):
"""Test that multinomial LinearModelLoss respects shape of coef."""
loss = LinearModelLoss(base_loss=HalfMultinomialLoss(), fit_intercept=fit_intercept)
(n_samples, n_features) = (10, 5)
<DeepExtract>
rng = np.random.RandomState(42)
n_dof = n_features + loss.fit_intercept
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, random_state=rng)
coef = loss.init_zero_coef(X)
if loss.base_loss.is_multiclass:
n_classes = loss.base_loss.n_classes
coef.flat[:] = rng.uniform(low=coef_bound[0], high=coef_bound[1], size=n_classes * n_dof)
if loss.fit_intercept:
raw_prediction = X @ coef[:, :-1].T + coef[:, -1]
else:
raw_prediction = X @ coef.T
proba = loss.base_loss.link.inverse(raw_prediction)
def choice_vectorized(items, p):
s = p.cumsum(axis=1)
r = rng.rand(p.shape[0])[:, None]
k = (s < r).sum(axis=1)
(X, y, coef) = items[k]
y = choice_vectorized(np.arange(n_classes), p=proba).astype(np.float64)
else:
coef.flat[:] = rng.uniform(low=coef_bound[0], high=coef_bound[1], size=n_dof)
if loss.fit_intercept:
raw_prediction = X @ coef[:-1] + coef[-1]
else:
raw_prediction = X @ coef
y = loss.base_loss.link.inverse(raw_prediction + rng.uniform(low=-1, high=1, size=n_samples))
(X, y, coef) = (X, y, coef)
</DeepExtract>
s = np.random.RandomState(42).randn(*coef.shape)
(l, g) = loss.loss_gradient(coef, X, y)
g1 = loss.gradient(coef, X, y)
(g2, hessp) = loss.gradient_hessian_product(coef, X, y)
h = hessp(s)
assert g.shape == coef.shape
assert h.shape == coef.shape
assert_allclose(g, g1)
assert_allclose(g, g2)
coef_r = coef.ravel(order='F')
s_r = s.ravel(order='F')
(l_r, g_r) = loss.loss_gradient(coef_r, X, y)
g1_r = loss.gradient(coef_r, X, y)
(g2_r, hessp_r) = loss.gradient_hessian_product(coef_r, X, y)
h_r = hessp_r(s_r)
assert g_r.shape == coef_r.shape
assert h_r.shape == coef_r.shape
assert_allclose(g_r, g1_r)
assert_allclose(g_r, g2_r)
assert_allclose(g, g_r.reshape(loss.base_loss.n_classes, -1, order='F'))
assert_allclose(h, h_r.reshape(loss.base_loss.n_classes, -1, order='F'))
|
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
Input data, of which specified subsets are used to fit the
transformers.
y : array-like of shape (n_samples,...), default=None
Targets for supervised learning.
Returns
-------
self : ColumnTransformer
This estimator.
"""
self._validate_params()
self._validate_params()
self._check_feature_names(X, reset=True)
X = _check_X(X)
self._check_n_features(X, reset=True)
self._validate_transformers()
self._validate_column_callables(X)
self._validate_remainder(X)
result = self._fit_transform(X, y, _fit_transform_one)
if not result:
self._update_fitted_transformers([])
return np.zeros((X.shape[0], 0))
(Xs, transformers) = zip(*result)
if any((sparse.issparse(X) for X in Xs)):
nnz = sum((X.nnz if sparse.issparse(X) else X.size for X in Xs))
total = sum((X.shape[0] * X.shape[1] if sparse.issparse(X) else X.size for X in Xs))
density = nnz / total
self.sparse_output_ = density < self.sparse_threshold
else:
self.sparse_output_ = False
self._update_fitted_transformers(transformers)
self._validate_output(Xs)
self._record_output_indices(Xs)
return self._hstack(list(Xs))
return self
|
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
Input data, of which specified subsets are used to fit the
transformers.
y : array-like of shape (n_samples,...), default=None
Targets for supervised learning.
Returns
-------
self : ColumnTransformer
This estimator.
"""
self._validate_params()
<DeepExtract>
self._validate_params()
self._check_feature_names(X, reset=True)
X = _check_X(X)
self._check_n_features(X, reset=True)
self._validate_transformers()
self._validate_column_callables(X)
self._validate_remainder(X)
result = self._fit_transform(X, y, _fit_transform_one)
if not result:
self._update_fitted_transformers([])
return np.zeros((X.shape[0], 0))
(Xs, transformers) = zip(*result)
if any((sparse.issparse(X) for X in Xs)):
nnz = sum((X.nnz if sparse.issparse(X) else X.size for X in Xs))
total = sum((X.shape[0] * X.shape[1] if sparse.issparse(X) else X.size for X in Xs))
density = nnz / total
self.sparse_output_ = density < self.sparse_threshold
else:
self.sparse_output_ = False
self._update_fitted_transformers(transformers)
self._validate_output(Xs)
self._record_output_indices(Xs)
return self._hstack(list(Xs))
</DeepExtract>
return self
|
def recurse(self, node, tree, ax, max_x, max_y, depth=0):
import matplotlib.pyplot as plt
kwargs = dict(bbox=self.bbox_args.copy(), ha='center', va='center', zorder=100 - 10 * depth, xycoords='axes fraction', arrowprops=self.arrow_args.copy())
kwargs['arrowprops']['edgecolor'] = plt.rcParams['text.color']
if self.fontsize is not None:
kwargs['fontsize'] = self.fontsize
xy = ((node.x + 0.5) / max_x, (max_y - node.y - 0.5) / max_y)
if self.max_depth is None or depth <= self.max_depth:
if self.filled:
if 'rgb' not in self.colors:
self.colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
self.colors['bounds'] = (np.min(-tree.impurity), np.max(-tree.impurity))
elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1:
self.colors['bounds'] = (np.min(tree.value), np.max(tree.value))
if tree.n_outputs == 1:
node_val = tree.value[node.tree.node_id][0, :] / tree.weighted_n_node_samples[node.tree.node_id]
if tree.n_classes[0] == 1:
node_val = tree.value[node.tree.node_id][0, :]
else:
node_val = -tree.impurity[node.tree.node_id]
kwargs['bbox']['fc'] = self.get_color(node_val)
else:
kwargs['bbox']['fc'] = ax.get_facecolor()
if node.parent is None:
ax.annotate(node.tree.label, xy, **kwargs)
else:
xy_parent = ((node.parent.x + 0.5) / max_x, (max_y - node.parent.y - 0.5) / max_y)
ax.annotate(node.tree.label, xy_parent, xy, **kwargs)
for child in node.children:
if tree == _tree.TREE_LEAF:
raise ValueError('Invalid node_id %s' % _tree.TREE_LEAF)
left_child = child.children_left[tree]
right_child = child.children_right[tree]
if self.max_depth is None or depth + 1 <= self.max_depth:
if left_child == _tree.TREE_LEAF:
self.ranks['leaves'].append(str(tree))
elif str(depth + 1) not in self.ranks:
self.ranks[str(depth + 1)] = [str(tree)]
else:
self.ranks[str(depth + 1)].append(str(tree))
self.out_file.write('%d [label=%s' % (tree, self.node_to_str(child, tree, ax)))
if self.filled:
self.out_file.write(', fillcolor="%s"' % self.get_fill_color(child, tree))
self.out_file.write('] ;\n')
if max_x is not None:
self.out_file.write('%d -> %d' % (max_x, tree))
if max_x == 0:
angles = np.array([45, -45]) * ((self.rotate - 0.5) * -2)
self.out_file.write(' [labeldistance=2.5, labelangle=')
if tree == 1:
self.out_file.write('%d, headlabel="True"]' % angles[0])
else:
self.out_file.write('%d, headlabel="False"]' % angles[1])
self.out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
self.recurse(child, left_child, criterion=ax, parent=tree, depth=depth + 1 + 1)
self.recurse(child, right_child, criterion=ax, parent=tree, depth=depth + 1 + 1)
else:
self.ranks['leaves'].append(str(tree))
self.out_file.write('%d [label="(...)"' % tree)
if self.filled:
self.out_file.write(', fillcolor="#C0C0C0"')
self.out_file.write('] ;\n' % tree)
if max_x is not None:
self.out_file.write('%d -> %d ;\n' % (max_x, tree))
else:
xy_parent = ((node.parent.x + 0.5) / max_x, (max_y - node.parent.y - 0.5) / max_y)
kwargs['bbox']['fc'] = 'grey'
ax.annotate('\n (...) \n', xy_parent, xy, **kwargs)
|
def recurse(self, node, tree, ax, max_x, max_y, depth=0):
import matplotlib.pyplot as plt
kwargs = dict(bbox=self.bbox_args.copy(), ha='center', va='center', zorder=100 - 10 * depth, xycoords='axes fraction', arrowprops=self.arrow_args.copy())
kwargs['arrowprops']['edgecolor'] = plt.rcParams['text.color']
if self.fontsize is not None:
kwargs['fontsize'] = self.fontsize
xy = ((node.x + 0.5) / max_x, (max_y - node.y - 0.5) / max_y)
if self.max_depth is None or depth <= self.max_depth:
if self.filled:
<DeepExtract>
if 'rgb' not in self.colors:
self.colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
self.colors['bounds'] = (np.min(-tree.impurity), np.max(-tree.impurity))
elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1:
self.colors['bounds'] = (np.min(tree.value), np.max(tree.value))
if tree.n_outputs == 1:
node_val = tree.value[node.tree.node_id][0, :] / tree.weighted_n_node_samples[node.tree.node_id]
if tree.n_classes[0] == 1:
node_val = tree.value[node.tree.node_id][0, :]
else:
node_val = -tree.impurity[node.tree.node_id]
kwargs['bbox']['fc'] = self.get_color(node_val)
</DeepExtract>
else:
kwargs['bbox']['fc'] = ax.get_facecolor()
if node.parent is None:
ax.annotate(node.tree.label, xy, **kwargs)
else:
xy_parent = ((node.parent.x + 0.5) / max_x, (max_y - node.parent.y - 0.5) / max_y)
ax.annotate(node.tree.label, xy_parent, xy, **kwargs)
for child in node.children:
<DeepExtract>
if tree == _tree.TREE_LEAF:
raise ValueError('Invalid node_id %s' % _tree.TREE_LEAF)
left_child = child.children_left[tree]
right_child = child.children_right[tree]
if self.max_depth is None or depth + 1 <= self.max_depth:
if left_child == _tree.TREE_LEAF:
self.ranks['leaves'].append(str(tree))
elif str(depth + 1) not in self.ranks:
self.ranks[str(depth + 1)] = [str(tree)]
else:
self.ranks[str(depth + 1)].append(str(tree))
self.out_file.write('%d [label=%s' % (tree, self.node_to_str(child, tree, ax)))
if self.filled:
self.out_file.write(', fillcolor="%s"' % self.get_fill_color(child, tree))
self.out_file.write('] ;\n')
if max_x is not None:
self.out_file.write('%d -> %d' % (max_x, tree))
if max_x == 0:
angles = np.array([45, -45]) * ((self.rotate - 0.5) * -2)
self.out_file.write(' [labeldistance=2.5, labelangle=')
if tree == 1:
self.out_file.write('%d, headlabel="True"]' % angles[0])
else:
self.out_file.write('%d, headlabel="False"]' % angles[1])
self.out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
self.recurse(child, left_child, criterion=ax, parent=tree, depth=depth + 1 + 1)
self.recurse(child, right_child, criterion=ax, parent=tree, depth=depth + 1 + 1)
else:
self.ranks['leaves'].append(str(tree))
self.out_file.write('%d [label="(...)"' % tree)
if self.filled:
self.out_file.write(', fillcolor="#C0C0C0"')
self.out_file.write('] ;\n' % tree)
if max_x is not None:
self.out_file.write('%d -> %d ;\n' % (max_x, tree))
</DeepExtract>
else:
xy_parent = ((node.parent.x + 0.5) / max_x, (max_y - node.parent.y - 0.5) / max_y)
kwargs['bbox']['fc'] = 'grey'
ax.annotate('\n (...) \n', xy_parent, xy, **kwargs)
|
def get_impute_zero_score(X_missing, y_missing):
imputer = SimpleImputer(missing_values=np.nan, add_indicator=True, strategy='constant', fill_value=0)
estimator = make_pipeline(imputer, regressor)
impute_scores = cross_val_score(estimator, X_missing, y_missing, scoring='neg_mean_squared_error', cv=N_SPLITS)
zero_impute_scores = impute_scores
return (zero_impute_scores.mean(), zero_impute_scores.std())
|
def get_impute_zero_score(X_missing, y_missing):
imputer = SimpleImputer(missing_values=np.nan, add_indicator=True, strategy='constant', fill_value=0)
<DeepExtract>
estimator = make_pipeline(imputer, regressor)
impute_scores = cross_val_score(estimator, X_missing, y_missing, scoring='neg_mean_squared_error', cv=N_SPLITS)
zero_impute_scores = impute_scores
</DeepExtract>
return (zero_impute_scores.mean(), zero_impute_scores.std())
|
@ignore_warnings(category=FutureWarning)
def check_dont_overwrite_parameters(name, estimator_orig):
if hasattr(estimator_orig.__init__, 'deprecated_original'):
return
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
if '1darray' in _safe_tags(estimator_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator_orig, key='pairwise'):
X = kernel(X, X)
X = X
y = X[:, 0].astype(int)
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
if hasattr(estimator, 'n_components'):
estimator.n_components = 1
if hasattr(estimator, 'n_clusters'):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [key for key in dict_after_fit.keys() if _is_public_parameter(key)]
attrs_added_by_fit = [key for key in public_keys_after_fit if key not in dict_before_fit.keys()]
assert not attrs_added_by_fit, 'Estimator adds public attribute(s) during the fit method. Estimators are only allowed to add private attributes either started with _ or ended with _ but %s added' % ', '.join(attrs_added_by_fit)
attrs_changed_by_fit = [key for key in public_keys_after_fit if dict_before_fit[key] is not dict_after_fit[key]]
assert not attrs_changed_by_fit, 'Estimator changes public attribute(s) during the fit method. Estimators are only allowed to change attributes started or ended with _, but %s changed' % ', '.join(attrs_changed_by_fit)
|
@ignore_warnings(category=FutureWarning)
def check_dont_overwrite_parameters(name, estimator_orig):
if hasattr(estimator_orig.__init__, 'deprecated_original'):
return
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
<DeepExtract>
if '1darray' in _safe_tags(estimator_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator_orig, key='pairwise'):
X = kernel(X, X)
X = X
</DeepExtract>
y = X[:, 0].astype(int)
<DeepExtract>
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
if hasattr(estimator, 'n_components'):
estimator.n_components = 1
if hasattr(estimator, 'n_clusters'):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [key for key in dict_after_fit.keys() if _is_public_parameter(key)]
attrs_added_by_fit = [key for key in public_keys_after_fit if key not in dict_before_fit.keys()]
assert not attrs_added_by_fit, 'Estimator adds public attribute(s) during the fit method. Estimators are only allowed to add private attributes either started with _ or ended with _ but %s added' % ', '.join(attrs_added_by_fit)
attrs_changed_by_fit = [key for key in public_keys_after_fit if dict_before_fit[key] is not dict_after_fit[key]]
assert not attrs_changed_by_fit, 'Estimator changes public attribute(s) during the fit method. Estimators are only allowed to change attributes started or ended with _, but %s changed' % ', '.join(attrs_changed_by_fit)
|
def _fit(self, X, y=None, **fit_params_steps):
self.steps = list(self.steps)
(names, estimators) = zip(*self.steps)
self._validate_names(names)
transformers = estimators[:-1]
estimator = estimators[-1]
for t in transformers:
if t is None or t == 'passthrough':
continue
if not (hasattr(t, 'fit') or hasattr(t, 'fit_transform')) or not hasattr(t, 'transform'):
raise TypeError("All intermediate steps should be transformers and implement fit and transform or be the string 'passthrough' '%s' (type %s) doesn't" % (t, type(t)))
if estimator is not None and estimator != 'passthrough' and (not hasattr(estimator, 'fit')):
raise TypeError("Last step of Pipeline should implement fit or be the string 'passthrough'. '%s' (type %s) doesn't" % (estimator, type(estimator)))
memory = check_memory(self.memory)
fit_transform_one_cached = memory.cache(_fit_transform_one)
for (step_idx, name, transformer) in self._iter(with_final=False, filter_passthrough=False):
if transformer is None or transformer == 'passthrough':
with _print_elapsed_time('Pipeline', self._log_message(step_idx)):
continue
if hasattr(memory, 'location') and memory.location is None:
cloned_transformer = transformer
else:
cloned_transformer = clone(transformer)
(X, fitted_transformer) = fit_transform_one_cached(cloned_transformer, X, y, None, message_clsname='Pipeline', message=self._log_message(step_idx), **fit_params_steps[name])
self.steps[step_idx] = (name, fitted_transformer)
return X
|
def _fit(self, X, y=None, **fit_params_steps):
self.steps = list(self.steps)
<DeepExtract>
(names, estimators) = zip(*self.steps)
self._validate_names(names)
transformers = estimators[:-1]
estimator = estimators[-1]
for t in transformers:
if t is None or t == 'passthrough':
continue
if not (hasattr(t, 'fit') or hasattr(t, 'fit_transform')) or not hasattr(t, 'transform'):
raise TypeError("All intermediate steps should be transformers and implement fit and transform or be the string 'passthrough' '%s' (type %s) doesn't" % (t, type(t)))
if estimator is not None and estimator != 'passthrough' and (not hasattr(estimator, 'fit')):
raise TypeError("Last step of Pipeline should implement fit or be the string 'passthrough'. '%s' (type %s) doesn't" % (estimator, type(estimator)))
</DeepExtract>
memory = check_memory(self.memory)
fit_transform_one_cached = memory.cache(_fit_transform_one)
for (step_idx, name, transformer) in self._iter(with_final=False, filter_passthrough=False):
if transformer is None or transformer == 'passthrough':
with _print_elapsed_time('Pipeline', self._log_message(step_idx)):
continue
if hasattr(memory, 'location') and memory.location is None:
cloned_transformer = transformer
else:
cloned_transformer = clone(transformer)
(X, fitted_transformer) = fit_transform_one_cached(cloned_transformer, X, y, None, message_clsname='Pipeline', message=self._log_message(step_idx), **fit_params_steps[name])
self.steps[step_idx] = (name, fitted_transformer)
return X
|
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Find the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int, default=None
Number of neighbors required for each sample. The default is the
value passed to the constructor.
return_distance : bool, default=True
Whether or not to return the distances.
Returns
-------
neigh_dist : ndarray of shape (n_queries, n_neighbors)
Array representing the lengths to points, only present if
return_distance=True.
neigh_ind : ndarray of shape (n_queries, n_neighbors)
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NearestNeighbors
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples)
NearestNeighbors(n_neighbors=1)
>>> print(neigh.kneighbors([[1., 1., 1.]]))
(array([[0.5]]), array([[2]]))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False)
array([[1],
[2]]...)
"""
check_is_fitted(self)
if n_neighbors is None:
n_neighbors = self.n_neighbors
elif n_neighbors <= 0:
raise ValueError('Expected n_neighbors > 0. Got %d' % n_neighbors)
elif not isinstance(n_neighbors, numbers.Integral):
raise TypeError('n_neighbors does not take %s value, enter integer value' % type(n_neighbors))
query_is_train = X is None
if query_is_train:
X = self._fit_X
n_neighbors += 1
elif self.metric == 'precomputed':
if not issparse(X):
X = check_array(X)
check_non_negative(X, whom='precomputed distance matrix.')
X = X
else:
graph = X
if graph.format not in ('csr', 'csc', 'coo', 'lil'):
raise TypeError('Sparse matrix in {!r} format is not supported due to its handling of explicit zeros'.format(graph.format))
copied = graph.format != 'csr'
graph = check_array(graph, accept_sparse='csr')
check_non_negative(graph, whom='precomputed distance matrix.')
graph = sort_graph_by_row_values(graph, copy=not copied, warn_when_not_sorted=True)
X = graph
else:
X = self._validate_data(X, accept_sparse='csr', reset=False, order='C')
n_samples_fit = self.n_samples_fit_
if n_neighbors > n_samples_fit:
raise ValueError('Expected n_neighbors <= n_samples, but n_samples = %d, n_neighbors = %d' % (n_samples_fit, n_neighbors))
n_jobs = effective_n_jobs(self.n_jobs)
chunked_results = None
use_pairwise_distances_reductions = self._fit_method == 'brute' and ArgKmin.is_usable_for(X if X is not None else self._fit_X, self._fit_X, self.effective_metric_)
if use_pairwise_distances_reductions:
results = ArgKmin.compute(X=X, Y=self._fit_X, k=n_neighbors, metric=self.effective_metric_, metric_kwargs=self.effective_metric_params_, strategy='auto', return_distance=return_distance)
elif self._fit_method == 'brute' and self.metric == 'precomputed' and issparse(X):
n_samples = X.shape[0]
assert X.format == 'csr'
row_nnz = np.diff(X.indptr)
row_nnz_min = row_nnz.min()
if n_neighbors is not None and row_nnz_min < n_neighbors:
raise ValueError('%d neighbors per samples are required, but some samples have only %d neighbors in precomputed graph matrix. Decrease number of neighbors used or recompute the graph with more neighbors.' % (n_neighbors, row_nnz_min))
def extract(a):
if row_nnz.max() == row_nnz_min:
results = a.reshape(n_samples, -1)[:, :n_neighbors]
else:
idx = np.tile(np.arange(n_neighbors), (n_samples, 1))
idx += X.indptr[:-1, None]
results = a.take(idx, mode='clip').reshape(n_samples, n_neighbors)
if return_distance:
results = (extract(X.data), extract(X.indices))
else:
results = extract(X.indices)
elif self._fit_method == 'brute':
reduce_func = partial(self._kneighbors_reduce_func, n_neighbors=n_neighbors, return_distance=return_distance)
if self.effective_metric_ == 'euclidean':
kwds = {'squared': True}
else:
kwds = self.effective_metric_params_
chunked_results = list(pairwise_distances_chunked(X, self._fit_X, reduce_func=reduce_func, metric=self.effective_metric_, n_jobs=n_jobs, **kwds))
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError("%s does not work with sparse matrices. Densify the data, or set algorithm='brute'" % self._fit_method)
chunked_results = Parallel(n_jobs, prefer='threads')((delayed(_tree_query_parallel_helper)(self._tree, X[s], n_neighbors, return_distance) for s in gen_even_slices(X.shape[0], n_jobs)))
else:
raise ValueError('internal: _fit_method not recognized')
if chunked_results is not None:
if return_distance:
(neigh_dist, neigh_ind) = zip(*chunked_results)
results = (np.vstack(neigh_dist), np.vstack(neigh_ind))
else:
results = np.vstack(chunked_results)
if not query_is_train:
return results
else:
if return_distance:
(neigh_dist, neigh_ind) = results
else:
neigh_ind = results
(n_queries, _) = X.shape
sample_range = np.arange(n_queries)[:, None]
sample_mask = neigh_ind != sample_range
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(neigh_ind[sample_mask], (n_queries, n_neighbors - 1))
if return_distance:
neigh_dist = np.reshape(neigh_dist[sample_mask], (n_queries, n_neighbors - 1))
return (neigh_dist, neigh_ind)
return neigh_ind
|
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Find the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int, default=None
Number of neighbors required for each sample. The default is the
value passed to the constructor.
return_distance : bool, default=True
Whether or not to return the distances.
Returns
-------
neigh_dist : ndarray of shape (n_queries, n_neighbors)
Array representing the lengths to points, only present if
return_distance=True.
neigh_ind : ndarray of shape (n_queries, n_neighbors)
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NearestNeighbors
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples)
NearestNeighbors(n_neighbors=1)
>>> print(neigh.kneighbors([[1., 1., 1.]]))
(array([[0.5]]), array([[2]]))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False)
array([[1],
[2]]...)
"""
check_is_fitted(self)
if n_neighbors is None:
n_neighbors = self.n_neighbors
elif n_neighbors <= 0:
raise ValueError('Expected n_neighbors > 0. Got %d' % n_neighbors)
elif not isinstance(n_neighbors, numbers.Integral):
raise TypeError('n_neighbors does not take %s value, enter integer value' % type(n_neighbors))
query_is_train = X is None
if query_is_train:
X = self._fit_X
n_neighbors += 1
elif self.metric == 'precomputed':
<DeepExtract>
if not issparse(X):
X = check_array(X)
check_non_negative(X, whom='precomputed distance matrix.')
X = X
else:
graph = X
if graph.format not in ('csr', 'csc', 'coo', 'lil'):
raise TypeError('Sparse matrix in {!r} format is not supported due to its handling of explicit zeros'.format(graph.format))
copied = graph.format != 'csr'
graph = check_array(graph, accept_sparse='csr')
check_non_negative(graph, whom='precomputed distance matrix.')
graph = sort_graph_by_row_values(graph, copy=not copied, warn_when_not_sorted=True)
X = graph
</DeepExtract>
else:
X = self._validate_data(X, accept_sparse='csr', reset=False, order='C')
n_samples_fit = self.n_samples_fit_
if n_neighbors > n_samples_fit:
raise ValueError('Expected n_neighbors <= n_samples, but n_samples = %d, n_neighbors = %d' % (n_samples_fit, n_neighbors))
n_jobs = effective_n_jobs(self.n_jobs)
chunked_results = None
use_pairwise_distances_reductions = self._fit_method == 'brute' and ArgKmin.is_usable_for(X if X is not None else self._fit_X, self._fit_X, self.effective_metric_)
if use_pairwise_distances_reductions:
results = ArgKmin.compute(X=X, Y=self._fit_X, k=n_neighbors, metric=self.effective_metric_, metric_kwargs=self.effective_metric_params_, strategy='auto', return_distance=return_distance)
elif self._fit_method == 'brute' and self.metric == 'precomputed' and issparse(X):
<DeepExtract>
n_samples = X.shape[0]
assert X.format == 'csr'
row_nnz = np.diff(X.indptr)
row_nnz_min = row_nnz.min()
if n_neighbors is not None and row_nnz_min < n_neighbors:
raise ValueError('%d neighbors per samples are required, but some samples have only %d neighbors in precomputed graph matrix. Decrease number of neighbors used or recompute the graph with more neighbors.' % (n_neighbors, row_nnz_min))
def extract(a):
if row_nnz.max() == row_nnz_min:
results = a.reshape(n_samples, -1)[:, :n_neighbors]
else:
idx = np.tile(np.arange(n_neighbors), (n_samples, 1))
idx += X.indptr[:-1, None]
results = a.take(idx, mode='clip').reshape(n_samples, n_neighbors)
if return_distance:
results = (extract(X.data), extract(X.indices))
else:
results = extract(X.indices)
</DeepExtract>
elif self._fit_method == 'brute':
reduce_func = partial(self._kneighbors_reduce_func, n_neighbors=n_neighbors, return_distance=return_distance)
if self.effective_metric_ == 'euclidean':
kwds = {'squared': True}
else:
kwds = self.effective_metric_params_
chunked_results = list(pairwise_distances_chunked(X, self._fit_X, reduce_func=reduce_func, metric=self.effective_metric_, n_jobs=n_jobs, **kwds))
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError("%s does not work with sparse matrices. Densify the data, or set algorithm='brute'" % self._fit_method)
chunked_results = Parallel(n_jobs, prefer='threads')((delayed(_tree_query_parallel_helper)(self._tree, X[s], n_neighbors, return_distance) for s in gen_even_slices(X.shape[0], n_jobs)))
else:
raise ValueError('internal: _fit_method not recognized')
if chunked_results is not None:
if return_distance:
(neigh_dist, neigh_ind) = zip(*chunked_results)
results = (np.vstack(neigh_dist), np.vstack(neigh_ind))
else:
results = np.vstack(chunked_results)
if not query_is_train:
return results
else:
if return_distance:
(neigh_dist, neigh_ind) = results
else:
neigh_ind = results
(n_queries, _) = X.shape
sample_range = np.arange(n_queries)[:, None]
sample_mask = neigh_ind != sample_range
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(neigh_ind[sample_mask], (n_queries, n_neighbors - 1))
if return_distance:
neigh_dist = np.reshape(neigh_dist[sample_mask], (n_queries, n_neighbors - 1))
return (neigh_dist, neigh_ind)
return neigh_ind
|
def predict(self, X):
"""Perform classification on test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Predicted target values for X.
"""
check_is_fitted(self)
n_samples = _num_samples(X)
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
if self._strategy == 'stratified':
check_is_fitted(self)
n_samples = _num_samples(X)
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
P = []
for k in range(self.n_outputs_):
if self._strategy == 'most_frequent':
ind = class_prior_[k].argmax()
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
elif self._strategy == 'prior':
out = np.ones((n_samples, 1)) * class_prior_[k]
elif self._strategy == 'stratified':
out = rs.multinomial(1, class_prior_[k], size=n_samples)
out = out.astype(np.float64)
elif self._strategy == 'uniform':
out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
out /= n_classes_[k]
elif self._strategy == 'constant':
ind = np.where(classes_[k] == constant[k])
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
P.append(out)
if self.n_outputs_ == 1:
P = P[0]
proba = P
if self.n_outputs_ == 1:
proba = [proba]
if self.sparse_output_:
class_prob = None
if self._strategy in ('most_frequent', 'prior'):
classes_ = [np.array([cp.argmax()]) for cp in class_prior_]
elif self._strategy == 'stratified':
class_prob = class_prior_
elif self._strategy == 'uniform':
raise ValueError('Sparse target prediction is not supported with the uniform strategy')
elif self._strategy == 'constant':
classes_ = [np.array([c]) for c in constant]
y = _random_choice_csc(n_samples, classes_, class_prob, self.random_state)
else:
if self._strategy in ('most_frequent', 'prior'):
y = np.tile([classes_[k][class_prior_[k].argmax()] for k in range(self.n_outputs_)], [n_samples, 1])
elif self._strategy == 'stratified':
y = np.vstack([classes_[k][proba[k].argmax(axis=1)] for k in range(self.n_outputs_)]).T
elif self._strategy == 'uniform':
ret = [classes_[k][rs.randint(n_classes_[k], size=n_samples)] for k in range(self.n_outputs_)]
y = np.vstack(ret).T
elif self._strategy == 'constant':
y = np.tile(self.constant, (n_samples, 1))
if self.n_outputs_ == 1:
y = np.ravel(y)
return y
|
def predict(self, X):
"""Perform classification on test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Predicted target values for X.
"""
check_is_fitted(self)
n_samples = _num_samples(X)
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
if self._strategy == 'stratified':
<DeepExtract>
check_is_fitted(self)
n_samples = _num_samples(X)
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
P = []
for k in range(self.n_outputs_):
if self._strategy == 'most_frequent':
ind = class_prior_[k].argmax()
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
elif self._strategy == 'prior':
out = np.ones((n_samples, 1)) * class_prior_[k]
elif self._strategy == 'stratified':
out = rs.multinomial(1, class_prior_[k], size=n_samples)
out = out.astype(np.float64)
elif self._strategy == 'uniform':
out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
out /= n_classes_[k]
elif self._strategy == 'constant':
ind = np.where(classes_[k] == constant[k])
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
P.append(out)
if self.n_outputs_ == 1:
P = P[0]
proba = P
</DeepExtract>
if self.n_outputs_ == 1:
proba = [proba]
if self.sparse_output_:
class_prob = None
if self._strategy in ('most_frequent', 'prior'):
classes_ = [np.array([cp.argmax()]) for cp in class_prior_]
elif self._strategy == 'stratified':
class_prob = class_prior_
elif self._strategy == 'uniform':
raise ValueError('Sparse target prediction is not supported with the uniform strategy')
elif self._strategy == 'constant':
classes_ = [np.array([c]) for c in constant]
y = _random_choice_csc(n_samples, classes_, class_prob, self.random_state)
else:
if self._strategy in ('most_frequent', 'prior'):
y = np.tile([classes_[k][class_prior_[k].argmax()] for k in range(self.n_outputs_)], [n_samples, 1])
elif self._strategy == 'stratified':
y = np.vstack([classes_[k][proba[k].argmax(axis=1)] for k in range(self.n_outputs_)]).T
elif self._strategy == 'uniform':
ret = [classes_[k][rs.randint(n_classes_[k], size=n_samples)] for k in range(self.n_outputs_)]
y = np.vstack(ret).T
elif self._strategy == 'constant':
y = np.tile(self.constant, (n_samples, 1))
if self.n_outputs_ == 1:
y = np.ravel(y)
return y
|
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the class with
the highest mean predicted probability. If base estimators do not
implement a ``predict_proba`` method, then it resorts to voting.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted classes.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=['csr', 'csc'], dtype=None, force_all_finite=False, reset=False)
(n_jobs, _, starts) = _partition_estimators(self.n_estimators, self.n_jobs)
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args())((delayed(_parallel_predict_proba)(self.estimators_[starts[i]:starts[i + 1]], self.estimators_features_[starts[i]:starts[i + 1]], X, self.n_classes_) for i in range(n_jobs)))
proba = sum(all_proba) / self.n_estimators
predicted_probabilitiy = proba
return self.classes_.take(np.argmax(predicted_probabilitiy, axis=1), axis=0)
|
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the class with
the highest mean predicted probability. If base estimators do not
implement a ``predict_proba`` method, then it resorts to voting.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted classes.
"""
<DeepExtract>
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=['csr', 'csc'], dtype=None, force_all_finite=False, reset=False)
(n_jobs, _, starts) = _partition_estimators(self.n_estimators, self.n_jobs)
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args())((delayed(_parallel_predict_proba)(self.estimators_[starts[i]:starts[i + 1]], self.estimators_features_[starts[i]:starts[i + 1]], X, self.n_classes_) for i in range(n_jobs)))
proba = sum(all_proba) / self.n_estimators
predicted_probabilitiy = proba
</DeepExtract>
return self.classes_.take(np.argmax(predicted_probabilitiy, axis=1), axis=0)
|
def __init__(self, X_binned, gradients, hessians, max_leaf_nodes=None, max_depth=None, min_samples_leaf=20, min_gain_to_split=0.0, n_bins=256, n_bins_non_missing=None, has_missing_values=False, is_categorical=None, monotonic_cst=None, interaction_cst=None, l2_regularization=0.0, min_hessian_to_split=0.001, shrinkage=1.0, n_threads=None):
if X_binned.dtype != np.uint8:
raise NotImplementedError('X_binned must be of type uint8.')
if not X_binned.flags.f_contiguous:
raise ValueError('X_binned should be passed as Fortran contiguous array for maximum efficiency.')
if min_gain_to_split < 0:
raise ValueError('min_gain_to_split={} must be positive.'.format(min_gain_to_split))
if min_hessian_to_split < 0:
raise ValueError('min_hessian_to_split={} must be positive.'.format(min_hessian_to_split))
n_threads = _openmp_effective_n_threads(n_threads)
if n_bins_non_missing is None:
n_bins_non_missing = n_bins - 1
if isinstance(n_bins_non_missing, numbers.Integral):
n_bins_non_missing = np.array([n_bins_non_missing] * X_binned.shape[1], dtype=np.uint32)
else:
n_bins_non_missing = np.asarray(n_bins_non_missing, dtype=np.uint32)
if isinstance(has_missing_values, bool):
has_missing_values = [has_missing_values] * X_binned.shape[1]
has_missing_values = np.asarray(has_missing_values, dtype=np.uint8)
if monotonic_cst is None:
monotonic_cst = np.full(shape=X_binned.shape[1], fill_value=MonotonicConstraint.NO_CST, dtype=np.int8)
else:
monotonic_cst = np.asarray(monotonic_cst, dtype=np.int8)
self.with_monotonic_cst = np.any(monotonic_cst != MonotonicConstraint.NO_CST)
if is_categorical is None:
is_categorical = np.zeros(shape=X_binned.shape[1], dtype=np.uint8)
else:
is_categorical = np.asarray(is_categorical, dtype=np.uint8)
if np.any(np.logical_and(is_categorical == 1, monotonic_cst != MonotonicConstraint.NO_CST)):
raise ValueError('Categorical features cannot have monotonic constraints.')
hessians_are_constant = hessians.shape[0] == 1
self.histogram_builder = HistogramBuilder(X_binned, n_bins, gradients, hessians, hessians_are_constant, n_threads)
missing_values_bin_idx = n_bins - 1
self.splitter = Splitter(X_binned, n_bins_non_missing, missing_values_bin_idx, has_missing_values, is_categorical, monotonic_cst, l2_regularization, min_hessian_to_split, min_samples_leaf, min_gain_to_split, hessians_are_constant, n_threads)
self.n_bins_non_missing = n_bins_non_missing
self.missing_values_bin_idx = missing_values_bin_idx
self.max_leaf_nodes = max_leaf_nodes
self.has_missing_values = has_missing_values
self.monotonic_cst = monotonic_cst
self.interaction_cst = interaction_cst
self.is_categorical = is_categorical
self.l2_regularization = l2_regularization
self.n_features = X_binned.shape[1]
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.X_binned = X_binned
self.min_gain_to_split = min_gain_to_split
self.shrinkage = shrinkage
self.n_threads = n_threads
self.splittable_nodes = []
self.finalized_leaves = []
self.total_find_split_time = 0.0
self.total_compute_hist_time = 0.0
self.total_apply_split_time = 0.0
self.n_categorical_splits = 0
n_samples = self.X_binned.shape[0]
depth = 0
sum_gradients = sum_parallel(gradients, self.n_threads)
if self.histogram_builder.hessians_are_constant:
sum_hessians = hessians[0] * n_samples
else:
sum_hessians = sum_parallel(hessians, self.n_threads)
self.root = TreeNode(depth=depth, sample_indices=self.splitter.partition, sum_gradients=sum_gradients, sum_hessians=sum_hessians, value=0)
self.root.partition_start = 0
self.root.partition_stop = n_samples
if self.root.n_samples < 2 * self.min_samples_leaf:
self._finalize_leaf(self.root)
return
if sum_hessians < self.splitter.min_hessian_to_split:
self._finalize_leaf(self.root)
return
if self.interaction_cst is not None:
self.root.interaction_cst_indices = range(len(self.interaction_cst))
allowed_features = set().union(*self.interaction_cst)
self.root.allowed_features = np.fromiter(allowed_features, dtype=np.uint32, count=len(allowed_features))
tic = time()
self.root.histograms = self.histogram_builder.compute_histograms_brute(self.root.sample_indices, self.root.allowed_features)
self.total_compute_hist_time += time() - tic
tic = time()
self._compute_best_split_and_push(self.root)
self.total_find_split_time += time() - tic
self.n_nodes = 1
|
def __init__(self, X_binned, gradients, hessians, max_leaf_nodes=None, max_depth=None, min_samples_leaf=20, min_gain_to_split=0.0, n_bins=256, n_bins_non_missing=None, has_missing_values=False, is_categorical=None, monotonic_cst=None, interaction_cst=None, l2_regularization=0.0, min_hessian_to_split=0.001, shrinkage=1.0, n_threads=None):
<DeepExtract>
if X_binned.dtype != np.uint8:
raise NotImplementedError('X_binned must be of type uint8.')
if not X_binned.flags.f_contiguous:
raise ValueError('X_binned should be passed as Fortran contiguous array for maximum efficiency.')
if min_gain_to_split < 0:
raise ValueError('min_gain_to_split={} must be positive.'.format(min_gain_to_split))
if min_hessian_to_split < 0:
raise ValueError('min_hessian_to_split={} must be positive.'.format(min_hessian_to_split))
</DeepExtract>
n_threads = _openmp_effective_n_threads(n_threads)
if n_bins_non_missing is None:
n_bins_non_missing = n_bins - 1
if isinstance(n_bins_non_missing, numbers.Integral):
n_bins_non_missing = np.array([n_bins_non_missing] * X_binned.shape[1], dtype=np.uint32)
else:
n_bins_non_missing = np.asarray(n_bins_non_missing, dtype=np.uint32)
if isinstance(has_missing_values, bool):
has_missing_values = [has_missing_values] * X_binned.shape[1]
has_missing_values = np.asarray(has_missing_values, dtype=np.uint8)
if monotonic_cst is None:
monotonic_cst = np.full(shape=X_binned.shape[1], fill_value=MonotonicConstraint.NO_CST, dtype=np.int8)
else:
monotonic_cst = np.asarray(monotonic_cst, dtype=np.int8)
self.with_monotonic_cst = np.any(monotonic_cst != MonotonicConstraint.NO_CST)
if is_categorical is None:
is_categorical = np.zeros(shape=X_binned.shape[1], dtype=np.uint8)
else:
is_categorical = np.asarray(is_categorical, dtype=np.uint8)
if np.any(np.logical_and(is_categorical == 1, monotonic_cst != MonotonicConstraint.NO_CST)):
raise ValueError('Categorical features cannot have monotonic constraints.')
hessians_are_constant = hessians.shape[0] == 1
self.histogram_builder = HistogramBuilder(X_binned, n_bins, gradients, hessians, hessians_are_constant, n_threads)
missing_values_bin_idx = n_bins - 1
self.splitter = Splitter(X_binned, n_bins_non_missing, missing_values_bin_idx, has_missing_values, is_categorical, monotonic_cst, l2_regularization, min_hessian_to_split, min_samples_leaf, min_gain_to_split, hessians_are_constant, n_threads)
self.n_bins_non_missing = n_bins_non_missing
self.missing_values_bin_idx = missing_values_bin_idx
self.max_leaf_nodes = max_leaf_nodes
self.has_missing_values = has_missing_values
self.monotonic_cst = monotonic_cst
self.interaction_cst = interaction_cst
self.is_categorical = is_categorical
self.l2_regularization = l2_regularization
self.n_features = X_binned.shape[1]
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.X_binned = X_binned
self.min_gain_to_split = min_gain_to_split
self.shrinkage = shrinkage
self.n_threads = n_threads
self.splittable_nodes = []
self.finalized_leaves = []
self.total_find_split_time = 0.0
self.total_compute_hist_time = 0.0
self.total_apply_split_time = 0.0
self.n_categorical_splits = 0
<DeepExtract>
n_samples = self.X_binned.shape[0]
depth = 0
sum_gradients = sum_parallel(gradients, self.n_threads)
if self.histogram_builder.hessians_are_constant:
sum_hessians = hessians[0] * n_samples
else:
sum_hessians = sum_parallel(hessians, self.n_threads)
self.root = TreeNode(depth=depth, sample_indices=self.splitter.partition, sum_gradients=sum_gradients, sum_hessians=sum_hessians, value=0)
self.root.partition_start = 0
self.root.partition_stop = n_samples
if self.root.n_samples < 2 * self.min_samples_leaf:
self._finalize_leaf(self.root)
return
if sum_hessians < self.splitter.min_hessian_to_split:
self._finalize_leaf(self.root)
return
if self.interaction_cst is not None:
self.root.interaction_cst_indices = range(len(self.interaction_cst))
allowed_features = set().union(*self.interaction_cst)
self.root.allowed_features = np.fromiter(allowed_features, dtype=np.uint32, count=len(allowed_features))
tic = time()
self.root.histograms = self.histogram_builder.compute_histograms_brute(self.root.sample_indices, self.root.allowed_features)
self.total_compute_hist_time += time() - tic
tic = time()
self._compute_best_split_and_push(self.root)
self.total_find_split_time += time() - tic
</DeepExtract>
self.n_nodes = 1
|
@ignore_warnings
def check_estimators_unfitted(name, estimator_orig):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise a NotFittedError.
"""
global REGRESSION_DATASET
if REGRESSION_DATASET is None:
(X, y) = make_regression(n_samples=200, n_features=10, n_informative=1, bias=5.0, noise=20, random_state=42)
X = StandardScaler().fit_transform(X)
REGRESSION_DATASET = (X, y)
(X, y) = REGRESSION_DATASET
estimator = clone(estimator_orig)
for method in ('decision_function', 'predict', 'predict_proba', 'predict_log_proba'):
if hasattr(estimator, method):
with raises(NotFittedError):
getattr(estimator, method)(X)
|
@ignore_warnings
def check_estimators_unfitted(name, estimator_orig):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise a NotFittedError.
"""
<DeepExtract>
global REGRESSION_DATASET
if REGRESSION_DATASET is None:
(X, y) = make_regression(n_samples=200, n_features=10, n_informative=1, bias=5.0, noise=20, random_state=42)
X = StandardScaler().fit_transform(X)
REGRESSION_DATASET = (X, y)
(X, y) = REGRESSION_DATASET
</DeepExtract>
estimator = clone(estimator_orig)
for method in ('decision_function', 'predict', 'predict_proba', 'predict_log_proba'):
if hasattr(estimator, method):
with raises(NotFittedError):
getattr(estimator, method)(X)
|
def fit(self, X, y, sample_weight=None):
if self.solver == 'lbfgs' and (not self.positive):
raise ValueError("'lbfgs' solver can be used only when positive=True. Please use another solver.")
if self.positive:
if self.solver not in ['auto', 'lbfgs']:
raise ValueError(f"solver='{self.solver}' does not support positive fitting. Please set the solver to 'auto' or 'lbfgs', or set `positive=False`")
else:
solver = self.solver
elif sparse.issparse(X) and self.fit_intercept:
if self.solver not in ['auto', 'lbfgs', 'lsqr', 'sag', 'sparse_cg']:
raise ValueError("solver='{}' does not support fitting the intercept on sparse data. Please set the solver to 'auto' or 'lsqr', 'sparse_cg', 'sag', 'lbfgs' or set `fit_intercept=False`".format(self.solver))
if self.solver in ['lsqr', 'lbfgs']:
solver = self.solver
elif self.solver == 'sag' and self.max_iter is None and (self.tol > 0.0001):
warnings.warn('"sag" solver requires many iterations to fit an intercept with sparse inputs. Either set the solver to "auto" or "sparse_cg", or set a low "tol" and a high "max_iter" (especially if inputs are not standardized).')
solver = 'sag'
else:
solver = 'sparse_cg'
else:
solver = self.solver
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
(X, y, X_offset, y_offset, X_scale) = _preprocess_data(X, y, self.fit_intercept, copy=self.copy_X, sample_weight=sample_weight)
if solver == 'sag' and sparse.issparse(X) and self.fit_intercept:
has_sw = sample_weight is not None
if 'sag' == 'auto':
if self.positive:
'sag' = 'lbfgs'
elif True:
'sag' = 'sag'
elif not sparse.issparse(X):
'sag' = 'cholesky'
else:
'sag' = 'sparse_cg'
if 'sag' not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga', 'lbfgs'):
raise ValueError("Known solvers are 'sparse_cg', 'cholesky', 'svd' 'lsqr', 'sag', 'saga' or 'lbfgs'. Got %s." % 'sag')
if self.positive and 'sag' != 'lbfgs':
raise ValueError(f"When positive=True, only 'lbfgs' solver can be used. Please change solver {'sag'} to 'lbfgs' or set positive=False.")
if 'sag' == 'lbfgs' and (not self.positive):
raise ValueError("'lbfgs' solver can be used only when positive=True. Please use another solver.")
if True and 'sag' != 'sag':
raise ValueError("In Ridge, only 'sag' solver can directly fit the intercept. Please change solver to 'sag' or set return_intercept=False.")
if False:
_dtype = [np.float64, np.float32]
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), 'sag')
X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype, order='C')
y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None)
check_consistent_length(X, y)
(n_samples, n_features) = X.shape
if y.ndim > 2:
raise ValueError('Target y has the wrong shape %s' % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
(n_samples_, n_targets) = y.shape
if n_samples != n_samples_:
raise ValueError('Number of samples in X and y does not correspond: %d != %d' % (n_samples, n_samples_))
if has_sw:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if 'sag' not in ['sag', 'saga']:
(X, y, sample_weight_sqrt) = _rescale_data(X, y, sample_weight)
if self.alpha is not None and (not isinstance(self.alpha, np.ndarray)):
self.alpha = check_scalar(self.alpha, 'alpha', target_type=numbers.Real, min_val=0.0, include_boundaries='left')
self.alpha = np.asarray(self.alpha, dtype=X.dtype).ravel()
if self.alpha.size not in [1, n_targets]:
raise ValueError('Number of targets and number of penalties do not correspond: %d != %d' % (self.alpha.size, n_targets))
if self.alpha.size == 1 and n_targets > 1:
self.alpha = np.repeat(self.alpha, n_targets)
n_iter = None
if 'sag' == 'sparse_cg':
coef = _solve_sparse_cg(X, y, self.alpha, max_iter=self.max_iter, tol=self.tol, verbose=verbose, X_offset=X_offset, X_scale=X_scale, sample_weight_sqrt=sample_weight_sqrt if has_sw else None)
elif 'sag' == 'lsqr':
(coef, n_iter) = _solve_lsqr(X, y, alpha=self.alpha, fit_intercept=fit_intercept, max_iter=self.max_iter, tol=self.tol, X_offset=X_offset, X_scale=X_scale, sample_weight_sqrt=sample_weight_sqrt if has_sw else None)
elif 'sag' == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, self.alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
'sag' = 'svd'
else:
try:
coef = _solve_cholesky(X, y, self.alpha)
except linalg.LinAlgError:
'sag' = 'svd'
elif 'sag' in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1],), dtype=X.dtype)
for (i, (alpha_i, target)) in enumerate(zip(self.alpha, y.T)):
init = {'coef': np.zeros((n_features + int(True), 1), dtype=X.dtype)}
(coef_, n_iter_, _) = sag_solver(X, target.ravel(), sample_weight, 'squared', alpha_i, 0, self.max_iter, self.tol, verbose, self.random_state, False, max_squared_sum, init, is_saga='sag' == 'saga')
if True:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
elif 'sag' == 'lbfgs':
coef = _solve_lbfgs(X, y, self.alpha, positive=self.positive, tol=self.tol, max_iter=self.max_iter, X_offset=X_offset, X_scale=X_scale, sample_weight_sqrt=sample_weight_sqrt if has_sw else None)
if 'sag' == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse inputs currently')
coef = _solve_svd(X, y, self.alpha)
if ravel:
coef = coef.ravel()
if True and True:
(self.coef_, self.n_iter_, self.intercept_) = (coef, n_iter, intercept)
elif True:
(self.coef_, self.n_iter_, self.intercept_) = (coef, intercept)
elif True:
(self.coef_, self.n_iter_, self.intercept_) = (coef, n_iter)
else:
(self.coef_, self.n_iter_, self.intercept_) = coef
self.intercept_ += y_offset
else:
if sparse.issparse(X) and self.fit_intercept:
params = {'X_offset': X_offset, 'X_scale': X_scale}
else:
params = {}
has_sw = sample_weight is not None
if solver == 'auto':
if self.positive:
solver = 'lbfgs'
elif False:
solver = 'sag'
elif not sparse.issparse(X):
solver = 'cholesky'
else:
solver = 'sparse_cg'
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga', 'lbfgs'):
raise ValueError("Known solvers are 'sparse_cg', 'cholesky', 'svd' 'lsqr', 'sag', 'saga' or 'lbfgs'. Got %s." % solver)
if self.positive and solver != 'lbfgs':
raise ValueError(f"When positive=True, only 'lbfgs' solver can be used. Please change solver {solver} to 'lbfgs' or set positive=False.")
if solver == 'lbfgs' and (not self.positive):
raise ValueError("'lbfgs' solver can be used only when positive=True. Please use another solver.")
if False and solver != 'sag':
raise ValueError("In Ridge, only 'sag' solver can directly fit the intercept. Please change solver to 'sag' or set return_intercept=False.")
if False:
_dtype = [np.float64, np.float32]
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver)
X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype, order='C')
y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None)
check_consistent_length(X, y)
(n_samples, n_features) = X.shape
if y.ndim > 2:
raise ValueError('Target y has the wrong shape %s' % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
(n_samples_, n_targets) = y.shape
if n_samples != n_samples_:
raise ValueError('Number of samples in X and y does not correspond: %d != %d' % (n_samples, n_samples_))
if has_sw:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if solver not in ['sag', 'saga']:
(X, y, sample_weight_sqrt) = _rescale_data(X, y, sample_weight)
if self.alpha is not None and (not isinstance(self.alpha, np.ndarray)):
self.alpha = check_scalar(self.alpha, 'alpha', target_type=numbers.Real, min_val=0.0, include_boundaries='left')
self.alpha = np.asarray(self.alpha, dtype=X.dtype).ravel()
if self.alpha.size not in [1, n_targets]:
raise ValueError('Number of targets and number of penalties do not correspond: %d != %d' % (self.alpha.size, n_targets))
if self.alpha.size == 1 and n_targets > 1:
self.alpha = np.repeat(self.alpha, n_targets)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, self.alpha, max_iter=self.max_iter, tol=self.tol, verbose=verbose, X_offset=X_offset, X_scale=X_scale, sample_weight_sqrt=sample_weight_sqrt if has_sw else None)
elif solver == 'lsqr':
(coef, n_iter) = _solve_lsqr(X, y, alpha=self.alpha, fit_intercept=self.fit_intercept, max_iter=self.max_iter, tol=self.tol, X_offset=X_offset, X_scale=X_scale, sample_weight_sqrt=sample_weight_sqrt if has_sw else None)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, self.alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, self.alpha)
except linalg.LinAlgError:
solver = 'svd'
elif solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1],), dtype=X.dtype)
for (i, (alpha_i, target)) in enumerate(zip(self.alpha, y.T)):
init = {'coef': np.zeros((n_features + int(False), 1), dtype=X.dtype)}
(coef_, n_iter_, _) = sag_solver(X, target.ravel(), sample_weight, 'squared', alpha_i, 0, self.max_iter, self.tol, verbose, self.random_state, False, max_squared_sum, init, is_saga=solver == 'saga')
if False:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
elif solver == 'lbfgs':
coef = _solve_lbfgs(X, y, self.alpha, positive=self.positive, tol=self.tol, max_iter=self.max_iter, X_offset=X_offset, X_scale=X_scale, sample_weight_sqrt=sample_weight_sqrt if has_sw else None)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse inputs currently')
coef = _solve_svd(X, y, self.alpha)
if ravel:
coef = coef.ravel()
if True and False:
(self.coef_, self.n_iter_) = (coef, n_iter, intercept)
elif False:
(self.coef_, self.n_iter_) = (coef, intercept)
elif True:
(self.coef_, self.n_iter_) = (coef, n_iter)
else:
(self.coef_, self.n_iter_) = coef
self._set_intercept(X_offset, y_offset, X_scale)
return self
|
def fit(self, X, y, sample_weight=None):
if self.solver == 'lbfgs' and (not self.positive):
raise ValueError("'lbfgs' solver can be used only when positive=True. Please use another solver.")
if self.positive:
if self.solver not in ['auto', 'lbfgs']:
raise ValueError(f"solver='{self.solver}' does not support positive fitting. Please set the solver to 'auto' or 'lbfgs', or set `positive=False`")
else:
solver = self.solver
elif sparse.issparse(X) and self.fit_intercept:
if self.solver not in ['auto', 'lbfgs', 'lsqr', 'sag', 'sparse_cg']:
raise ValueError("solver='{}' does not support fitting the intercept on sparse data. Please set the solver to 'auto' or 'lsqr', 'sparse_cg', 'sag', 'lbfgs' or set `fit_intercept=False`".format(self.solver))
if self.solver in ['lsqr', 'lbfgs']:
solver = self.solver
elif self.solver == 'sag' and self.max_iter is None and (self.tol > 0.0001):
warnings.warn('"sag" solver requires many iterations to fit an intercept with sparse inputs. Either set the solver to "auto" or "sparse_cg", or set a low "tol" and a high "max_iter" (especially if inputs are not standardized).')
solver = 'sag'
else:
solver = 'sparse_cg'
else:
solver = self.solver
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
(X, y, X_offset, y_offset, X_scale) = _preprocess_data(X, y, self.fit_intercept, copy=self.copy_X, sample_weight=sample_weight)
if solver == 'sag' and sparse.issparse(X) and self.fit_intercept:
<DeepExtract>
has_sw = sample_weight is not None
if 'sag' == 'auto':
if self.positive:
'sag' = 'lbfgs'
elif True:
'sag' = 'sag'
elif not sparse.issparse(X):
'sag' = 'cholesky'
else:
'sag' = 'sparse_cg'
if 'sag' not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga', 'lbfgs'):
raise ValueError("Known solvers are 'sparse_cg', 'cholesky', 'svd' 'lsqr', 'sag', 'saga' or 'lbfgs'. Got %s." % 'sag')
if self.positive and 'sag' != 'lbfgs':
raise ValueError(f"When positive=True, only 'lbfgs' solver can be used. Please change solver {'sag'} to 'lbfgs' or set positive=False.")
if 'sag' == 'lbfgs' and (not self.positive):
raise ValueError("'lbfgs' solver can be used only when positive=True. Please use another solver.")
if True and 'sag' != 'sag':
raise ValueError("In Ridge, only 'sag' solver can directly fit the intercept. Please change solver to 'sag' or set return_intercept=False.")
if False:
_dtype = [np.float64, np.float32]
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), 'sag')
X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype, order='C')
y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None)
check_consistent_length(X, y)
(n_samples, n_features) = X.shape
if y.ndim > 2:
raise ValueError('Target y has the wrong shape %s' % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
(n_samples_, n_targets) = y.shape
if n_samples != n_samples_:
raise ValueError('Number of samples in X and y does not correspond: %d != %d' % (n_samples, n_samples_))
if has_sw:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if 'sag' not in ['sag', 'saga']:
(X, y, sample_weight_sqrt) = _rescale_data(X, y, sample_weight)
if self.alpha is not None and (not isinstance(self.alpha, np.ndarray)):
self.alpha = check_scalar(self.alpha, 'alpha', target_type=numbers.Real, min_val=0.0, include_boundaries='left')
self.alpha = np.asarray(self.alpha, dtype=X.dtype).ravel()
if self.alpha.size not in [1, n_targets]:
raise ValueError('Number of targets and number of penalties do not correspond: %d != %d' % (self.alpha.size, n_targets))
if self.alpha.size == 1 and n_targets > 1:
self.alpha = np.repeat(self.alpha, n_targets)
n_iter = None
if 'sag' == 'sparse_cg':
coef = _solve_sparse_cg(X, y, self.alpha, max_iter=self.max_iter, tol=self.tol, verbose=verbose, X_offset=X_offset, X_scale=X_scale, sample_weight_sqrt=sample_weight_sqrt if has_sw else None)
elif 'sag' == 'lsqr':
(coef, n_iter) = _solve_lsqr(X, y, alpha=self.alpha, fit_intercept=fit_intercept, max_iter=self.max_iter, tol=self.tol, X_offset=X_offset, X_scale=X_scale, sample_weight_sqrt=sample_weight_sqrt if has_sw else None)
elif 'sag' == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, self.alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
'sag' = 'svd'
else:
try:
coef = _solve_cholesky(X, y, self.alpha)
except linalg.LinAlgError:
'sag' = 'svd'
elif 'sag' in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1],), dtype=X.dtype)
for (i, (alpha_i, target)) in enumerate(zip(self.alpha, y.T)):
init = {'coef': np.zeros((n_features + int(True), 1), dtype=X.dtype)}
(coef_, n_iter_, _) = sag_solver(X, target.ravel(), sample_weight, 'squared', alpha_i, 0, self.max_iter, self.tol, verbose, self.random_state, False, max_squared_sum, init, is_saga='sag' == 'saga')
if True:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
elif 'sag' == 'lbfgs':
coef = _solve_lbfgs(X, y, self.alpha, positive=self.positive, tol=self.tol, max_iter=self.max_iter, X_offset=X_offset, X_scale=X_scale, sample_weight_sqrt=sample_weight_sqrt if has_sw else None)
if 'sag' == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse inputs currently')
coef = _solve_svd(X, y, self.alpha)
if ravel:
coef = coef.ravel()
if True and True:
(self.coef_, self.n_iter_, self.intercept_) = (coef, n_iter, intercept)
elif True:
(self.coef_, self.n_iter_, self.intercept_) = (coef, intercept)
elif True:
(self.coef_, self.n_iter_, self.intercept_) = (coef, n_iter)
else:
(self.coef_, self.n_iter_, self.intercept_) = coef
</DeepExtract>
self.intercept_ += y_offset
else:
if sparse.issparse(X) and self.fit_intercept:
params = {'X_offset': X_offset, 'X_scale': X_scale}
else:
params = {}
<DeepExtract>
has_sw = sample_weight is not None
if solver == 'auto':
if self.positive:
solver = 'lbfgs'
elif False:
solver = 'sag'
elif not sparse.issparse(X):
solver = 'cholesky'
else:
solver = 'sparse_cg'
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga', 'lbfgs'):
raise ValueError("Known solvers are 'sparse_cg', 'cholesky', 'svd' 'lsqr', 'sag', 'saga' or 'lbfgs'. Got %s." % solver)
if self.positive and solver != 'lbfgs':
raise ValueError(f"When positive=True, only 'lbfgs' solver can be used. Please change solver {solver} to 'lbfgs' or set positive=False.")
if solver == 'lbfgs' and (not self.positive):
raise ValueError("'lbfgs' solver can be used only when positive=True. Please use another solver.")
if False and solver != 'sag':
raise ValueError("In Ridge, only 'sag' solver can directly fit the intercept. Please change solver to 'sag' or set return_intercept=False.")
if False:
_dtype = [np.float64, np.float32]
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver)
X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype, order='C')
y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None)
check_consistent_length(X, y)
(n_samples, n_features) = X.shape
if y.ndim > 2:
raise ValueError('Target y has the wrong shape %s' % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
(n_samples_, n_targets) = y.shape
if n_samples != n_samples_:
raise ValueError('Number of samples in X and y does not correspond: %d != %d' % (n_samples, n_samples_))
if has_sw:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if solver not in ['sag', 'saga']:
(X, y, sample_weight_sqrt) = _rescale_data(X, y, sample_weight)
if self.alpha is not None and (not isinstance(self.alpha, np.ndarray)):
self.alpha = check_scalar(self.alpha, 'alpha', target_type=numbers.Real, min_val=0.0, include_boundaries='left')
self.alpha = np.asarray(self.alpha, dtype=X.dtype).ravel()
if self.alpha.size not in [1, n_targets]:
raise ValueError('Number of targets and number of penalties do not correspond: %d != %d' % (self.alpha.size, n_targets))
if self.alpha.size == 1 and n_targets > 1:
self.alpha = np.repeat(self.alpha, n_targets)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, self.alpha, max_iter=self.max_iter, tol=self.tol, verbose=verbose, X_offset=X_offset, X_scale=X_scale, sample_weight_sqrt=sample_weight_sqrt if has_sw else None)
elif solver == 'lsqr':
(coef, n_iter) = _solve_lsqr(X, y, alpha=self.alpha, fit_intercept=self.fit_intercept, max_iter=self.max_iter, tol=self.tol, X_offset=X_offset, X_scale=X_scale, sample_weight_sqrt=sample_weight_sqrt if has_sw else None)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, self.alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, self.alpha)
except linalg.LinAlgError:
solver = 'svd'
elif solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1],), dtype=X.dtype)
for (i, (alpha_i, target)) in enumerate(zip(self.alpha, y.T)):
init = {'coef': np.zeros((n_features + int(False), 1), dtype=X.dtype)}
(coef_, n_iter_, _) = sag_solver(X, target.ravel(), sample_weight, 'squared', alpha_i, 0, self.max_iter, self.tol, verbose, self.random_state, False, max_squared_sum, init, is_saga=solver == 'saga')
if False:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
elif solver == 'lbfgs':
coef = _solve_lbfgs(X, y, self.alpha, positive=self.positive, tol=self.tol, max_iter=self.max_iter, X_offset=X_offset, X_scale=X_scale, sample_weight_sqrt=sample_weight_sqrt if has_sw else None)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse inputs currently')
coef = _solve_svd(X, y, self.alpha)
if ravel:
coef = coef.ravel()
if True and False:
(self.coef_, self.n_iter_) = (coef, n_iter, intercept)
elif False:
(self.coef_, self.n_iter_) = (coef, intercept)
elif True:
(self.coef_, self.n_iter_) = (coef, n_iter)
else:
(self.coef_, self.n_iter_) = coef
</DeepExtract>
self._set_intercept(X_offset, y_offset, X_scale)
return self
|
def encode_data(self, data, attributes):
current_row = 0
num_attributes = len(attributes)
for row in data:
new_data = []
if len(row) > 0 and max(row) >= num_attributes:
raise BadObject('Instance %d has %d attributes, expected %d' % (current_row, max(row) + 1, num_attributes))
for col in sorted(row):
v = row[col]
if v is None or v == '' or v != v:
s = '?'
else:
if _RE_QUOTE_CHARS.search(str(v)):
str(v) = "'%s'" % _RE_ESCAPE_CHARS.sub(_unescape_sub_callback, str(v))
str(v) = str(v)
new_data.append('%d %s' % (col, s))
current_row += 1
yield ' '.join(['{', ','.join(new_data), '}'])
|
def encode_data(self, data, attributes):
current_row = 0
num_attributes = len(attributes)
for row in data:
new_data = []
if len(row) > 0 and max(row) >= num_attributes:
raise BadObject('Instance %d has %d attributes, expected %d' % (current_row, max(row) + 1, num_attributes))
for col in sorted(row):
v = row[col]
if v is None or v == '' or v != v:
s = '?'
else:
<DeepExtract>
if _RE_QUOTE_CHARS.search(str(v)):
str(v) = "'%s'" % _RE_ESCAPE_CHARS.sub(_unescape_sub_callback, str(v))
str(v) = str(v)
</DeepExtract>
new_data.append('%d %s' % (col, s))
current_row += 1
yield ' '.join(['{', ','.join(new_data), '}'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.