before
stringlengths 87
36.6k
| after
stringlengths 116
37.2k
|
|---|---|
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Ignored.
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
if issparse(X):
raise TypeError('PCA does not support sparse input. See TruncatedSVD for a possible alternative.')
X = self._validate_data(X, dtype=[np.float64, np.float32], ensure_2d=True, copy=self.copy)
if self.n_components is None:
if self.svd_solver != 'arpack':
n_components = min(X.shape)
else:
n_components = min(X.shape) - 1
else:
n_components = self.n_components
self._fit_svd_solver = self.svd_solver
if self._fit_svd_solver == 'auto':
if max(X.shape) <= 500 or n_components == 'mle':
self._fit_svd_solver = 'full'
elif 1 <= n_components < 0.8 * min(X.shape):
self._fit_svd_solver = 'randomized'
else:
self._fit_svd_solver = 'full'
if self._fit_svd_solver == 'full':
return self._fit_full(X, n_components)
elif self._fit_svd_solver in ['arpack', 'randomized']:
return self._fit_truncated(X, n_components, self._fit_svd_solver)
return self
|
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Ignored.
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
<DeepExtract>
if issparse(X):
raise TypeError('PCA does not support sparse input. See TruncatedSVD for a possible alternative.')
X = self._validate_data(X, dtype=[np.float64, np.float32], ensure_2d=True, copy=self.copy)
if self.n_components is None:
if self.svd_solver != 'arpack':
n_components = min(X.shape)
else:
n_components = min(X.shape) - 1
else:
n_components = self.n_components
self._fit_svd_solver = self.svd_solver
if self._fit_svd_solver == 'auto':
if max(X.shape) <= 500 or n_components == 'mle':
self._fit_svd_solver = 'full'
elif 1 <= n_components < 0.8 * min(X.shape):
self._fit_svd_solver = 'randomized'
else:
self._fit_svd_solver = 'full'
if self._fit_svd_solver == 'full':
return self._fit_full(X, n_components)
elif self._fit_svd_solver in ['arpack', 'randomized']:
return self._fit_truncated(X, n_components, self._fit_svd_solver)
</DeepExtract>
return self
|
def learning_curve(estimator, X, y, *, groups=None, train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None, exploit_incremental_learning=False, n_jobs=None, pre_dispatch='all', verbose=0, shuffle=False, random_state=None, error_score=np.nan, return_times=False, fit_params=None):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
train_sizes : array-like of shape (n_ticks,), default=np.linspace(0.1, 1.0, 5)
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
scoring : str or callable, default=None
A str (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : bool, default=False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the different training and test sets.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
shuffle : bool, default=False
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : int, RandomState instance or None, default=None
Used when ``shuffle`` is True. Pass an int for reproducible
output across multiple function calls.
See :term:`Glossary <random_state>`.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
return_times : bool, default=False
Whether to return the fit and score times.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. versionadded:: 0.24
Returns
-------
train_sizes_abs : array of shape (n_unique_ticks,)
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array of shape (n_ticks, n_cv_folds)
Scores on test set.
fit_times : array of shape (n_ticks, n_cv_folds)
Times spent for fitting in seconds. Only present if ``return_times``
is True.
score_times : array of shape (n_ticks, n_cv_folds)
Times spent for scoring in seconds. Only present if ``return_times``
is True.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.tree import DecisionTreeClassifier
>>> from sklearn.model_selection import learning_curve
>>> X, y = make_classification(n_samples=100, n_features=10, random_state=42)
>>> tree = DecisionTreeClassifier(max_depth=4, random_state=42)
>>> train_size_abs, train_scores, test_scores = learning_curve(
... tree, X, y, train_sizes=[0.3, 0.6, 0.9]
... )
>>> for train_size, cv_train_scores, cv_test_scores in zip(
... train_size_abs, train_scores, test_scores
... ):
... print(f"{train_size} samples were used to train the model")
... print(f"The average train accuracy is {cv_train_scores.mean():.2f}")
... print(f"The average test accuracy is {cv_test_scores.mean():.2f}")
24 samples were used to train the model
The average train accuracy is 1.00
The average test accuracy is 0.85
48 samples were used to train the model
The average train accuracy is 1.00
The average test accuracy is 0.90
72 samples were used to train the model
The average train accuracy is 1.00
The average test accuracy is 0.93
"""
if exploit_incremental_learning and (not hasattr(estimator, 'partial_fit')):
raise ValueError('An estimator must support the partial_fit interface to exploit incremental learning')
(X, y, groups) = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.floating):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError('train_sizes has been interpreted as fractions of the maximum number of training samples and must be within (0, 1], but is within [%f, %f].' % (n_min_required_samples, n_max_required_samples))
train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype(dtype=int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples)
elif n_min_required_samples <= 0 or n_max_required_samples > n_max_training_samples:
raise ValueError('train_sizes has been interpreted as absolute numbers of training samples and must be within (0, %d], but is within [%d, %d].' % (n_max_training_samples, n_min_required_samples, n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number of ticks will be less than the size of 'train_sizes': %d instead of %d." % (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
train_sizes_abs = train_sizes_abs
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print('[learning_curve] Training set sizes: ' + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose)
if shuffle:
rng = check_random_state(random_state)
cv_iter = ((rng.permutation(train), test) for (train, test) in cv_iter)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel((delayed(_incremental_fit_estimator)(clone(estimator), X, y, classes, train, test, train_sizes_abs, scorer, verbose, return_times, error_score=error_score, fit_params=fit_params) for (train, test) in cv_iter))
out = np.asarray(out).transpose((2, 1, 0))
else:
train_test_proportions = []
for (train, test) in cv_iter:
for n_train_samples in train_sizes_abs:
train_test_proportions.append((train[:n_train_samples], test))
results = parallel((delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, parameters=None, fit_params=fit_params, return_train_score=True, error_score=error_score, return_times=return_times) for (train, test) in train_test_proportions))
results = {key: np.asarray([score[key] for score in results]) if isinstance(results[0][key], numbers.Number) else [score[key] for score in results] for key in results[0]}
train_scores = results['train_scores'].reshape(-1, n_unique_ticks).T
test_scores = results['test_scores'].reshape(-1, n_unique_ticks).T
out = [train_scores, test_scores]
if return_times:
fit_times = results['fit_time'].reshape(-1, n_unique_ticks).T
score_times = results['score_time'].reshape(-1, n_unique_ticks).T
out.extend([fit_times, score_times])
ret = (train_sizes_abs, out[0], out[1])
if return_times:
ret = ret + (out[2], out[3])
return ret
|
def learning_curve(estimator, X, y, *, groups=None, train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None, exploit_incremental_learning=False, n_jobs=None, pre_dispatch='all', verbose=0, shuffle=False, random_state=None, error_score=np.nan, return_times=False, fit_params=None):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
train_sizes : array-like of shape (n_ticks,), default=np.linspace(0.1, 1.0, 5)
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
scoring : str or callable, default=None
A str (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : bool, default=False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the different training and test sets.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
shuffle : bool, default=False
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : int, RandomState instance or None, default=None
Used when ``shuffle`` is True. Pass an int for reproducible
output across multiple function calls.
See :term:`Glossary <random_state>`.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
return_times : bool, default=False
Whether to return the fit and score times.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. versionadded:: 0.24
Returns
-------
train_sizes_abs : array of shape (n_unique_ticks,)
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array of shape (n_ticks, n_cv_folds)
Scores on test set.
fit_times : array of shape (n_ticks, n_cv_folds)
Times spent for fitting in seconds. Only present if ``return_times``
is True.
score_times : array of shape (n_ticks, n_cv_folds)
Times spent for scoring in seconds. Only present if ``return_times``
is True.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.tree import DecisionTreeClassifier
>>> from sklearn.model_selection import learning_curve
>>> X, y = make_classification(n_samples=100, n_features=10, random_state=42)
>>> tree = DecisionTreeClassifier(max_depth=4, random_state=42)
>>> train_size_abs, train_scores, test_scores = learning_curve(
... tree, X, y, train_sizes=[0.3, 0.6, 0.9]
... )
>>> for train_size, cv_train_scores, cv_test_scores in zip(
... train_size_abs, train_scores, test_scores
... ):
... print(f"{train_size} samples were used to train the model")
... print(f"The average train accuracy is {cv_train_scores.mean():.2f}")
... print(f"The average test accuracy is {cv_test_scores.mean():.2f}")
24 samples were used to train the model
The average train accuracy is 1.00
The average test accuracy is 0.85
48 samples were used to train the model
The average train accuracy is 1.00
The average test accuracy is 0.90
72 samples were used to train the model
The average train accuracy is 1.00
The average test accuracy is 0.93
"""
if exploit_incremental_learning and (not hasattr(estimator, 'partial_fit')):
raise ValueError('An estimator must support the partial_fit interface to exploit incremental learning')
(X, y, groups) = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
<DeepExtract>
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.floating):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError('train_sizes has been interpreted as fractions of the maximum number of training samples and must be within (0, 1], but is within [%f, %f].' % (n_min_required_samples, n_max_required_samples))
train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype(dtype=int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples)
elif n_min_required_samples <= 0 or n_max_required_samples > n_max_training_samples:
raise ValueError('train_sizes has been interpreted as absolute numbers of training samples and must be within (0, %d], but is within [%d, %d].' % (n_max_training_samples, n_min_required_samples, n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number of ticks will be less than the size of 'train_sizes': %d instead of %d." % (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
train_sizes_abs = train_sizes_abs
</DeepExtract>
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print('[learning_curve] Training set sizes: ' + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose)
if shuffle:
rng = check_random_state(random_state)
cv_iter = ((rng.permutation(train), test) for (train, test) in cv_iter)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel((delayed(_incremental_fit_estimator)(clone(estimator), X, y, classes, train, test, train_sizes_abs, scorer, verbose, return_times, error_score=error_score, fit_params=fit_params) for (train, test) in cv_iter))
out = np.asarray(out).transpose((2, 1, 0))
else:
train_test_proportions = []
for (train, test) in cv_iter:
for n_train_samples in train_sizes_abs:
train_test_proportions.append((train[:n_train_samples], test))
results = parallel((delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, parameters=None, fit_params=fit_params, return_train_score=True, error_score=error_score, return_times=return_times) for (train, test) in train_test_proportions))
<DeepExtract>
results = {key: np.asarray([score[key] for score in results]) if isinstance(results[0][key], numbers.Number) else [score[key] for score in results] for key in results[0]}
</DeepExtract>
train_scores = results['train_scores'].reshape(-1, n_unique_ticks).T
test_scores = results['test_scores'].reshape(-1, n_unique_ticks).T
out = [train_scores, test_scores]
if return_times:
fit_times = results['fit_time'].reshape(-1, n_unique_ticks).T
score_times = results['score_time'].reshape(-1, n_unique_ticks).T
out.extend([fit_times, score_times])
ret = (train_sizes_abs, out[0], out[1])
if return_times:
ret = ret + (out[2], out[3])
return ret
|
def fit(self, X, y=None):
"""Estimate the optimal parameter lambda for each feature.
The optimal lambda parameter for minimizing skewness is estimated on
each feature independently using maximum likelihood.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to estimate the optimal transformation parameters.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
self._validate_params()
X = self._check_input(X, in_fit=True, check_positive=True)
if not self.copy and (not False):
X = X.copy()
optim_function = {'box-cox': self._box_cox_optimize, 'yeo-johnson': self._yeo_johnson_optimize}[self.method]
with np.errstate(invalid='ignore'):
self.lambdas_ = np.array([optim_function(col) for col in X.T])
if self.standardize or False:
transform_function = {'box-cox': boxcox, 'yeo-johnson': self._yeo_johnson_transform}[self.method]
for (i, lmbda) in enumerate(self.lambdas_):
with np.errstate(invalid='ignore'):
X[:, i] = transform_function(X[:, i], lmbda)
if self.standardize:
self._scaler = StandardScaler(copy=False)
if False:
X = self._scaler.fit_transform(X)
else:
self._scaler.fit(X)
return X
return self
|
def fit(self, X, y=None):
"""Estimate the optimal parameter lambda for each feature.
The optimal lambda parameter for minimizing skewness is estimated on
each feature independently using maximum likelihood.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to estimate the optimal transformation parameters.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
self._validate_params()
<DeepExtract>
X = self._check_input(X, in_fit=True, check_positive=True)
if not self.copy and (not False):
X = X.copy()
optim_function = {'box-cox': self._box_cox_optimize, 'yeo-johnson': self._yeo_johnson_optimize}[self.method]
with np.errstate(invalid='ignore'):
self.lambdas_ = np.array([optim_function(col) for col in X.T])
if self.standardize or False:
transform_function = {'box-cox': boxcox, 'yeo-johnson': self._yeo_johnson_transform}[self.method]
for (i, lmbda) in enumerate(self.lambdas_):
with np.errstate(invalid='ignore'):
X[:, i] = transform_function(X[:, i], lmbda)
if self.standardize:
self._scaler = StandardScaler(copy=False)
if False:
X = self._scaler.fit_transform(X)
else:
self._scaler.fit(X)
return X
</DeepExtract>
return self
|
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
(X, y) = self._validate_data(X, y, accept_sparse=['csr', 'csc'], multi_output=True)
pass
score_func_ret = self.score_func(X, y)
if isinstance(score_func_ret, (list, tuple)):
(self.scores_, self.pvalues_) = score_func_ret
self.pvalues_ = np.asarray(self.pvalues_)
else:
self.scores_ = score_func_ret
self.pvalues_ = None
self.scores_ = np.asarray(self.scores_)
return self
|
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
(X, y) = self._validate_data(X, y, accept_sparse=['csr', 'csc'], multi_output=True)
<DeepExtract>
pass
</DeepExtract>
score_func_ret = self.score_func(X, y)
if isinstance(score_func_ret, (list, tuple)):
(self.scores_, self.pvalues_) = score_func_ret
self.pvalues_ = np.asarray(self.pvalues_)
else:
self.scores_ = score_func_ret
self.pvalues_ = None
self.scores_ = np.asarray(self.scores_)
return self
|
def _fit(X):
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
(n_rows, n_cols) = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
(normalized_data, row_diag, col_diag) = (an, row_diag, col_diag)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
(u, _, vt) = randomized_svd(normalized_data, n_sv, random_state=self.random_state, **kwargs)
elif self.svd_method == 'arpack':
(u, _, vt) = svds(normalized_data, k=n_sv, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
A = safe_sparse_dot(normalized_data.T, normalized_data)
random_state = check_random_state(self.random_state)
v0 = random_state.uniform(-1, 1, A.shape[0])
(_, v) = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(normalized_data, normalized_data.T)
random_state = check_random_state(self.random_state)
v0 = random_state.uniform(-1, 1, A.shape[0])
(_, u) = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, 1:]
vt = vt[1:]
(u, v) = (u, vt.T)
z = np.vstack((row_diag[:, np.newaxis] * u, col_diag[:, np.newaxis] * v))
if self.mini_batch:
model = MiniBatchKMeans(self.n_clusters, init=self.init, n_init=self.n_init, random_state=self.random_state)
else:
model = KMeans(self.n_clusters, init=self.init, n_init=self.n_init, random_state=self.random_state)
model.fit(z)
centroid = model.cluster_centers_
labels = model.labels_
(_, labels) = (centroid, labels)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack([self.row_labels_ == c for c in range(self.n_clusters)])
self.columns_ = np.vstack([self.column_labels_ == c for c in range(self.n_clusters)])
|
def _fit(X):
<DeepExtract>
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
(n_rows, n_cols) = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
(normalized_data, row_diag, col_diag) = (an, row_diag, col_diag)
</DeepExtract>
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
<DeepExtract>
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
(u, _, vt) = randomized_svd(normalized_data, n_sv, random_state=self.random_state, **kwargs)
elif self.svd_method == 'arpack':
(u, _, vt) = svds(normalized_data, k=n_sv, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
A = safe_sparse_dot(normalized_data.T, normalized_data)
random_state = check_random_state(self.random_state)
v0 = random_state.uniform(-1, 1, A.shape[0])
(_, v) = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(normalized_data, normalized_data.T)
random_state = check_random_state(self.random_state)
v0 = random_state.uniform(-1, 1, A.shape[0])
(_, u) = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, 1:]
vt = vt[1:]
(u, v) = (u, vt.T)
</DeepExtract>
z = np.vstack((row_diag[:, np.newaxis] * u, col_diag[:, np.newaxis] * v))
<DeepExtract>
if self.mini_batch:
model = MiniBatchKMeans(self.n_clusters, init=self.init, n_init=self.n_init, random_state=self.random_state)
else:
model = KMeans(self.n_clusters, init=self.init, n_init=self.n_init, random_state=self.random_state)
model.fit(z)
centroid = model.cluster_centers_
labels = model.labels_
(_, labels) = (centroid, labels)
</DeepExtract>
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack([self.row_labels_ == c for c in range(self.n_clusters)])
self.columns_ = np.vstack([self.column_labels_ == c for c in range(self.n_clusters)])
|
def test_predictor_from_grower():
n_bins = 256
rng = np.random.RandomState(42)
n_samples = 10000
X_binned = rng.randint(0, n_bins - 1, size=(n_samples, 2), dtype=X_BINNED_DTYPE)
X_binned = np.asfortranarray(X_binned)
def true_decision_function(input_features):
"""Ground truth decision function
This is a very simple yet asymmetric decision tree. Therefore the
grower code should have no trouble recovering the decision function
from 10000 training samples.
"""
if input_features[0] <= n_bins // 2:
(X_binned, all_gradients, all_hessians) = -1
else:
(X_binned, all_gradients, all_hessians) = -1 if input_features[1] <= n_bins // 3 else 1
target = np.array([true_decision_function(x) for x in X_binned], dtype=Y_DTYPE)
all_gradients = target.astype(G_H_DTYPE)
shape_hessians = 1 if constant_hessian else all_gradients.shape
all_hessians = np.ones(shape=shape_hessians, dtype=G_H_DTYPE)
(X_binned, all_gradients, all_hessians) = (X_binned, all_gradients, all_hessians)
grower = TreeGrower(X_binned, all_gradients, all_hessians, n_bins=n_bins, shrinkage=1.0, max_leaf_nodes=3, min_samples_leaf=5)
grower.grow()
assert grower.n_nodes == 5
predictor = grower.make_predictor(binning_thresholds=np.zeros((X_binned.shape[1], n_bins)))
assert predictor.nodes.shape[0] == 5
assert predictor.nodes['is_leaf'].sum() == 3
input_data = np.array([[0, 0], [42, 99], [128, 254], [129, 0], [129, 85], [254, 85], [129, 86], [129, 254], [242, 100]], dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
predictions = predictor.predict_binned(input_data, missing_values_bin_idx, n_threads)
expected_targets = [1, 1, 1, 1, 1, 1, -1, -1, -1]
assert np.allclose(predictions, expected_targets)
predictions = predictor.predict_binned(X_binned, missing_values_bin_idx, n_threads)
assert np.allclose(predictions, -all_gradients)
|
def test_predictor_from_grower():
n_bins = 256
<DeepExtract>
rng = np.random.RandomState(42)
n_samples = 10000
X_binned = rng.randint(0, n_bins - 1, size=(n_samples, 2), dtype=X_BINNED_DTYPE)
X_binned = np.asfortranarray(X_binned)
def true_decision_function(input_features):
"""Ground truth decision function
This is a very simple yet asymmetric decision tree. Therefore the
grower code should have no trouble recovering the decision function
from 10000 training samples.
"""
if input_features[0] <= n_bins // 2:
(X_binned, all_gradients, all_hessians) = -1
else:
(X_binned, all_gradients, all_hessians) = -1 if input_features[1] <= n_bins // 3 else 1
target = np.array([true_decision_function(x) for x in X_binned], dtype=Y_DTYPE)
all_gradients = target.astype(G_H_DTYPE)
shape_hessians = 1 if constant_hessian else all_gradients.shape
all_hessians = np.ones(shape=shape_hessians, dtype=G_H_DTYPE)
(X_binned, all_gradients, all_hessians) = (X_binned, all_gradients, all_hessians)
</DeepExtract>
grower = TreeGrower(X_binned, all_gradients, all_hessians, n_bins=n_bins, shrinkage=1.0, max_leaf_nodes=3, min_samples_leaf=5)
grower.grow()
assert grower.n_nodes == 5
predictor = grower.make_predictor(binning_thresholds=np.zeros((X_binned.shape[1], n_bins)))
assert predictor.nodes.shape[0] == 5
assert predictor.nodes['is_leaf'].sum() == 3
input_data = np.array([[0, 0], [42, 99], [128, 254], [129, 0], [129, 85], [254, 85], [129, 86], [129, 254], [242, 100]], dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
predictions = predictor.predict_binned(input_data, missing_values_bin_idx, n_threads)
expected_targets = [1, 1, 1, 1, 1, 1, -1, -1, -1]
assert np.allclose(predictions, expected_targets)
predictions = predictor.predict_binned(X_binned, missing_values_bin_idx, n_threads)
assert np.allclose(predictions, -all_gradients)
|
@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None], 'multioutput': [StrOptions({'raw_values', 'uniform_average', 'variance_weighted'}), 'array-like', None], 'force_finite': ['boolean']})
def r2_score(y_true, y_pred, *, sample_weight=None, multioutput='uniform_average', force_finite=True):
""":math:`R^2` (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). In the general case when the true y is
non-constant, a constant model that always predicts the average y
disregarding the input features would get a :math:`R^2` score of 0.0.
In the particular case when ``y_true`` is constant, the :math:`R^2` score
is not finite: it is either ``NaN`` (perfect predictions) or ``-Inf``
(imperfect predictions). To prevent such non-finite numbers to pollute
higher-level experiments such as a grid search cross-validation, by default
these cases are replaced with 1.0 (perfect predictions) or 0.0 (imperfect
predictions) respectively. You can set ``force_finite`` to ``False`` to
prevent this fix from happening.
Note: when the prediction residuals have zero mean, the :math:`R^2` score
is identical to the
:func:`Explained Variance score <explained_variance_score>`.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average', 'variance_weighted'}, array-like of shape (n_outputs,) or None, default='uniform_average'
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default is "uniform_average".
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
.. versionchanged:: 0.19
Default value of multioutput is 'uniform_average'.
force_finite : bool, default=True
Flag indicating if ``NaN`` and ``-Inf`` scores resulting from constant
data should be replaced with real numbers (``1.0`` if prediction is
perfect, ``0.0`` otherwise). Default is ``True``, a convenient setting
for hyperparameters' search procedures (e.g. grid search
cross-validation).
.. versionadded:: 1.1
Returns
-------
z : float or ndarray of floats
The :math:`R^2` score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, :math:`R^2` score may be negative (it need not
actually be the square of a quantity R).
This metric is not well-defined for single samples and will return a NaN
value if n_samples is less than two.
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred)
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred,
... multioutput='variance_weighted')
0.938...
>>> y_true = [1, 2, 3]
>>> y_pred = [1, 2, 3]
>>> r2_score(y_true, y_pred)
1.0
>>> y_true = [1, 2, 3]
>>> y_pred = [2, 2, 2]
>>> r2_score(y_true, y_pred)
0.0
>>> y_true = [1, 2, 3]
>>> y_pred = [3, 2, 1]
>>> r2_score(y_true, y_pred)
-3.0
>>> y_true = [-2, -2, -2]
>>> y_pred = [-2, -2, -2]
>>> r2_score(y_true, y_pred)
1.0
>>> r2_score(y_true, y_pred, force_finite=False)
nan
>>> y_true = [-2, -2, -2]
>>> y_pred = [-2, -2, -2 + 1e-8]
>>> r2_score(y_true, y_pred)
0.0
>>> r2_score(y_true, y_pred, force_finite=False)
-inf
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False, dtype=dtype)
y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError('y_true and y_pred have different number of output ({0}!={1})'.format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
allowed_multioutput_str = ('raw_values', 'uniform_average', 'variance_weighted')
if isinstance(multioutput, str):
if multioutput not in allowed_multioutput_str:
raise ValueError("Allowed 'multioutput' string values are {}. You provided multioutput={!r}".format(allowed_multioutput_str, multioutput))
elif multioutput is not None:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError('Custom weights are useful only in multi-output cases.')
elif n_outputs != len(multioutput):
raise ValueError('There must be equally many custom weights (%d) as outputs (%d).' % (len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
(y_type, y_true, y_pred, multioutput) = (y_type, y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
if _num_samples(y_pred) < 2:
msg = 'R^2 score is not well-defined with less than two samples.'
warnings.warn(msg, UndefinedMetricWarning)
return float('nan')
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.0
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0, dtype=np.float64)
denominator = (weight * (y_true - np.average(y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0, dtype=np.float64)
return _assemble_r2_explained_variance(numerator=numerator, denominator=denominator, n_outputs=y_true.shape[1], multioutput=multioutput, force_finite=force_finite)
|
@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None], 'multioutput': [StrOptions({'raw_values', 'uniform_average', 'variance_weighted'}), 'array-like', None], 'force_finite': ['boolean']})
def r2_score(y_true, y_pred, *, sample_weight=None, multioutput='uniform_average', force_finite=True):
""":math:`R^2` (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). In the general case when the true y is
non-constant, a constant model that always predicts the average y
disregarding the input features would get a :math:`R^2` score of 0.0.
In the particular case when ``y_true`` is constant, the :math:`R^2` score
is not finite: it is either ``NaN`` (perfect predictions) or ``-Inf``
(imperfect predictions). To prevent such non-finite numbers to pollute
higher-level experiments such as a grid search cross-validation, by default
these cases are replaced with 1.0 (perfect predictions) or 0.0 (imperfect
predictions) respectively. You can set ``force_finite`` to ``False`` to
prevent this fix from happening.
Note: when the prediction residuals have zero mean, the :math:`R^2` score
is identical to the
:func:`Explained Variance score <explained_variance_score>`.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average', 'variance_weighted'}, array-like of shape (n_outputs,) or None, default='uniform_average'
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default is "uniform_average".
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
.. versionchanged:: 0.19
Default value of multioutput is 'uniform_average'.
force_finite : bool, default=True
Flag indicating if ``NaN`` and ``-Inf`` scores resulting from constant
data should be replaced with real numbers (``1.0`` if prediction is
perfect, ``0.0`` otherwise). Default is ``True``, a convenient setting
for hyperparameters' search procedures (e.g. grid search
cross-validation).
.. versionadded:: 1.1
Returns
-------
z : float or ndarray of floats
The :math:`R^2` score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, :math:`R^2` score may be negative (it need not
actually be the square of a quantity R).
This metric is not well-defined for single samples and will return a NaN
value if n_samples is less than two.
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred)
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred,
... multioutput='variance_weighted')
0.938...
>>> y_true = [1, 2, 3]
>>> y_pred = [1, 2, 3]
>>> r2_score(y_true, y_pred)
1.0
>>> y_true = [1, 2, 3]
>>> y_pred = [2, 2, 2]
>>> r2_score(y_true, y_pred)
0.0
>>> y_true = [1, 2, 3]
>>> y_pred = [3, 2, 1]
>>> r2_score(y_true, y_pred)
-3.0
>>> y_true = [-2, -2, -2]
>>> y_pred = [-2, -2, -2]
>>> r2_score(y_true, y_pred)
1.0
>>> r2_score(y_true, y_pred, force_finite=False)
nan
>>> y_true = [-2, -2, -2]
>>> y_pred = [-2, -2, -2 + 1e-8]
>>> r2_score(y_true, y_pred)
0.0
>>> r2_score(y_true, y_pred, force_finite=False)
-inf
"""
<DeepExtract>
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False, dtype=dtype)
y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError('y_true and y_pred have different number of output ({0}!={1})'.format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
allowed_multioutput_str = ('raw_values', 'uniform_average', 'variance_weighted')
if isinstance(multioutput, str):
if multioutput not in allowed_multioutput_str:
raise ValueError("Allowed 'multioutput' string values are {}. You provided multioutput={!r}".format(allowed_multioutput_str, multioutput))
elif multioutput is not None:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError('Custom weights are useful only in multi-output cases.')
elif n_outputs != len(multioutput):
raise ValueError('There must be equally many custom weights (%d) as outputs (%d).' % (len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
(y_type, y_true, y_pred, multioutput) = (y_type, y_true, y_pred, multioutput)
</DeepExtract>
check_consistent_length(y_true, y_pred, sample_weight)
if _num_samples(y_pred) < 2:
msg = 'R^2 score is not well-defined with less than two samples.'
warnings.warn(msg, UndefinedMetricWarning)
return float('nan')
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.0
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0, dtype=np.float64)
denominator = (weight * (y_true - np.average(y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0, dtype=np.float64)
return _assemble_r2_explained_variance(numerator=numerator, denominator=denominator, n_outputs=y_true.shape[1], multioutput=multioutput, force_finite=force_finite)
|
@available_if(_final_estimator_has('fit_predict'))
def fit_predict(self, X, y=None, **fit_params):
"""Transform the data, and apply `fit_predict` with the final estimator.
Call `fit_transform` of each transformer in the pipeline. The
transformed data are finally passed to the final estimator that calls
`fit_predict` method. Only valid if the final estimator implements
`fit_predict`.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
y_pred : ndarray
Result of calling `fit_predict` on the final estimator.
"""
self._validate_params()
fit_params_steps = {name: {} for (name, step) in self.steps if step is not None}
for (pname, pval) in fit_params.items():
if '__' not in pname:
raise ValueError('Pipeline.fit does not accept the {} parameter. You can pass parameters to specific steps of your pipeline using the stepname__parameter format, e.g. `Pipeline.fit(X, y, logisticregression__sample_weight=sample_weight)`.'.format(pname))
(step, param) = pname.split('__', 1)
fit_params_steps[step][param] = pval
fit_params_steps = fit_params_steps
self.steps = list(self.steps)
self._validate_steps()
memory = check_memory(self.memory)
fit_transform_one_cached = memory.cache(_fit_transform_one)
for (step_idx, name, transformer) in self._iter(with_final=False, filter_passthrough=False):
if transformer is None or transformer == 'passthrough':
with _print_elapsed_time('Pipeline', self._log_message(step_idx)):
continue
if hasattr(memory, 'location') and memory.location is None:
cloned_transformer = transformer
else:
cloned_transformer = clone(transformer)
(X, fitted_transformer) = fit_transform_one_cached(cloned_transformer, X, y, None, message_clsname='Pipeline', message=self._log_message(step_idx), **fit_params_steps[name])
self.steps[step_idx] = (name, fitted_transformer)
Xt = X
fit_params_last_step = fit_params_steps[self.steps[-1][0]]
with _print_elapsed_time('Pipeline', self._log_message(len(self.steps) - 1)):
y_pred = self.steps[-1][1].fit_predict(Xt, y, **fit_params_last_step)
return y_pred
|
@available_if(_final_estimator_has('fit_predict'))
def fit_predict(self, X, y=None, **fit_params):
"""Transform the data, and apply `fit_predict` with the final estimator.
Call `fit_transform` of each transformer in the pipeline. The
transformed data are finally passed to the final estimator that calls
`fit_predict` method. Only valid if the final estimator implements
`fit_predict`.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
y_pred : ndarray
Result of calling `fit_predict` on the final estimator.
"""
self._validate_params()
<DeepExtract>
fit_params_steps = {name: {} for (name, step) in self.steps if step is not None}
for (pname, pval) in fit_params.items():
if '__' not in pname:
raise ValueError('Pipeline.fit does not accept the {} parameter. You can pass parameters to specific steps of your pipeline using the stepname__parameter format, e.g. `Pipeline.fit(X, y, logisticregression__sample_weight=sample_weight)`.'.format(pname))
(step, param) = pname.split('__', 1)
fit_params_steps[step][param] = pval
fit_params_steps = fit_params_steps
</DeepExtract>
<DeepExtract>
self.steps = list(self.steps)
self._validate_steps()
memory = check_memory(self.memory)
fit_transform_one_cached = memory.cache(_fit_transform_one)
for (step_idx, name, transformer) in self._iter(with_final=False, filter_passthrough=False):
if transformer is None or transformer == 'passthrough':
with _print_elapsed_time('Pipeline', self._log_message(step_idx)):
continue
if hasattr(memory, 'location') and memory.location is None:
cloned_transformer = transformer
else:
cloned_transformer = clone(transformer)
(X, fitted_transformer) = fit_transform_one_cached(cloned_transformer, X, y, None, message_clsname='Pipeline', message=self._log_message(step_idx), **fit_params_steps[name])
self.steps[step_idx] = (name, fitted_transformer)
Xt = X
</DeepExtract>
fit_params_last_step = fit_params_steps[self.steps[-1][0]]
with _print_elapsed_time('Pipeline', self._log_message(len(self.steps) - 1)):
y_pred = self.steps[-1][1].fit_predict(Xt, y, **fit_params_last_step)
return y_pred
|
def show_versions():
"""Print useful debugging information"
.. versionadded:: 0.20
"""
python = sys.version.replace('\n', ' ')
blob = [('python', python), ('executable', sys.executable), ('machine', platform.platform())]
sys_info = dict(blob)
deps = ['pip', 'setuptools', 'numpy', 'scipy', 'Cython', 'pandas', 'matplotlib', 'joblib', 'threadpoolctl']
deps_info = {'sklearn': __version__}
from importlib.metadata import version, PackageNotFoundError
for modname in deps:
try:
deps_info[modname] = version(modname)
except PackageNotFoundError:
deps_info[modname] = None
deps_info = deps_info
print('\nSystem:')
for (k, stat) in sys_info.items():
print('{k:>10}: {stat}'.format(k=k, stat=stat))
print('\nPython dependencies:')
for (k, stat) in deps_info.items():
print('{k:>13}: {stat}'.format(k=k, stat=stat))
print('\n{k}: {stat}'.format(k='Built with OpenMP', stat=_openmp_parallelism_enabled()))
threadpool_results = threadpool_info()
if threadpool_results:
print()
print('threadpoolctl info:')
for (i, result) in enumerate(threadpool_results):
for (key, val) in result.items():
print(f'{key:>15}: {val}')
if i != len(threadpool_results) - 1:
print()
|
def show_versions():
"""Print useful debugging information"
.. versionadded:: 0.20
"""
<DeepExtract>
python = sys.version.replace('\n', ' ')
blob = [('python', python), ('executable', sys.executable), ('machine', platform.platform())]
sys_info = dict(blob)
</DeepExtract>
<DeepExtract>
deps = ['pip', 'setuptools', 'numpy', 'scipy', 'Cython', 'pandas', 'matplotlib', 'joblib', 'threadpoolctl']
deps_info = {'sklearn': __version__}
from importlib.metadata import version, PackageNotFoundError
for modname in deps:
try:
deps_info[modname] = version(modname)
except PackageNotFoundError:
deps_info[modname] = None
deps_info = deps_info
</DeepExtract>
print('\nSystem:')
for (k, stat) in sys_info.items():
print('{k:>10}: {stat}'.format(k=k, stat=stat))
print('\nPython dependencies:')
for (k, stat) in deps_info.items():
print('{k:>13}: {stat}'.format(k=k, stat=stat))
print('\n{k}: {stat}'.format(k='Built with OpenMP', stat=_openmp_parallelism_enabled()))
threadpool_results = threadpool_info()
if threadpool_results:
print()
print('threadpoolctl info:')
for (i, result) in enumerate(threadpool_results):
for (key, val) in result.items():
print(f'{key:>15}: {val}')
if i != len(threadpool_results) - 1:
print()
|
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y.
:math:`K(X, Y) = (gamma <X, Y> + coef0)^{degree}`
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
degree : int, default=3
Kernel degree.
gamma : float, default=None
Coefficient of the vector inner product. If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Constant offset added to scaled inner product.
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
The polynomial kernel.
"""
(X, Y, dtype_float) = _return_float_dtype(X, Y)
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
else:
X = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError('Precomputed metric requires shape (n_queries, n_indexed). Got (%d, %d) for %d indexed.' % (X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError('Incompatible dimension for X and Y matrices: X.shape[1] == %d while Y.shape[1] == %d' % (X.shape[1], Y.shape[1]))
(X, Y) = (X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
|
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y.
:math:`K(X, Y) = (gamma <X, Y> + coef0)^{degree}`
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
degree : int, default=3
Kernel degree.
gamma : float, default=None
Coefficient of the vector inner product. If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Constant offset added to scaled inner product.
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
The polynomial kernel.
"""
<DeepExtract>
(X, Y, dtype_float) = _return_float_dtype(X, Y)
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
else:
X = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError('Precomputed metric requires shape (n_queries, n_indexed). Got (%d, %d) for %d indexed.' % (X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError('Incompatible dimension for X and Y matrices: X.shape[1] == %d while Y.shape[1] == %d' % (X.shape[1], Y.shape[1]))
(X, Y) = (X, Y)
</DeepExtract>
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
|
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10, scoring=None, fit_intercept=False, max_iter=100, tol=0.0001, class_weight=None, verbose=0, solver='lbfgs', penalty='l2', dual=False, intercept_scaling=1.0, multi_class='auto', random_state=None, max_squared_sum=None, sample_weight=None, l1_ratio=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, default=None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or list of floats, default=10
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool, default=False
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int, default=100
Maximum number of iterations for the solver.
tol : float, default=1e-4
Tolerance for stopping criteria.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, default='lbfgs'
Decides which solver to use.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default=1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'auto', 'ovr', 'multinomial'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray of shape (n_cs,)
Scores obtained for each Cs.
n_iter : ndarray of shape(n_cs,)
Actual number of iteration for each Cs.
"""
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = sample_weight[train]
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
if False:
X_train = check_array(X_train, accept_sparse='csr', dtype=np.float64, accept_large_sparse=solver not in ['liblinear', 'sag', 'saga'])
y_train = check_array(y_train, ensure_2d=False, dtype=None)
check_consistent_length(X_train, y_train)
(n_samples, n_features) = X_train.shape
classes = np.unique(y_train)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != 'multinomial':
if classes.size > 2:
raise ValueError('To fit OvR, use the pos_class argument')
pos_class = classes[1]
sample_weight = _check_sample_weight(sample_weight, X_train, dtype=X_train.dtype, copy=True)
if solver == 'newton-cholesky':
sw_sum = sample_weight.sum()
sample_weight = sample_weight / sw_sum
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes=classes, y=y_train)
sample_weight *= class_weight_[le.fit_transform(y_train)]
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept), dtype=X_train.dtype)
mask = y_train == pos_class
y_bin = np.ones(y_train.shape, dtype=X_train.dtype)
if solver in ['lbfgs', 'newton-cg', 'newton-cholesky']:
mask_classes = np.array([0, 1])
y_bin[~mask] = 0.0
else:
mask_classes = np.array([-1, 1])
y_bin[~mask] = -1.0
if class_weight == 'balanced':
class_weight_ = compute_class_weight(class_weight, classes=mask_classes, y=y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver in ['sag', 'saga', 'lbfgs', 'newton-cg']:
le = LabelEncoder()
Y_multi = le.fit_transform(y_train).astype(X_train.dtype, copy=False)
else:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y_train)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
w0 = np.zeros((classes.size, n_features + int(fit_intercept)), order='F', dtype=X_train.dtype)
if coef is not None:
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError('Initialization coef is of shape %d, expected shape %d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if coef.shape[0] != n_classes or coef.shape[1] not in (n_features, n_features + 1):
raise ValueError('Initialization coef is of shape (%d, %d), expected shape (%d, %d) or (%d, %d)' % (coef.shape[0], coef.shape[1], classes.size, n_features, classes.size, n_features + 1))
if n_classes == 1:
w0[0, :coef.shape[1]] = -coef
w0[1, :coef.shape[1]] = coef
else:
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel(order='F')
loss = LinearModelLoss(base_loss=HalfMultinomialLoss(n_classes=classes.size), fit_intercept=fit_intercept)
target = Y_multi
if solver in 'lbfgs':
func = loss.loss_gradient
elif solver == 'newton-cg':
func = loss.loss
grad = loss.gradient
hess = loss.gradient_hessian_product
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
loss = LinearModelLoss(base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept)
func = loss.loss_gradient
elif solver == 'newton-cg':
loss = LinearModelLoss(base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept)
func = loss.loss
grad = loss.gradient
hess = loss.gradient_hessian_product
elif solver == 'newton-cholesky':
loss = LinearModelLoss(base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept)
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for (i, C) in enumerate(Cs):
if solver == 'lbfgs':
l2_reg_strength = 1.0 / C
iprint = [-1, 50, 1, 100, 101][np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
opt_res = optimize.minimize(func, w0, method='L-BFGS-B', jac=True, args=(X_train, target, sample_weight, l2_reg_strength, n_threads), options={'iprint': iprint, 'gtol': tol, 'maxiter': max_iter})
n_iter_i = _check_optimize_result(solver, opt_res, max_iter, extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
(w0, loss) = (opt_res.x, opt_res.fun)
elif solver == 'newton-cg':
l2_reg_strength = 1.0 / C
args = (X_train, target, sample_weight, l2_reg_strength, n_threads)
(w0, n_iter_i) = _newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter, tol=tol)
elif solver == 'newton-cholesky':
l2_reg_strength = 1.0 / C / sw_sum
sol = NewtonCholeskySolver(coef=w0, linear_loss=loss, l2_reg_strength=l2_reg_strength, tol=tol, max_iter=max_iter, n_threads=n_threads, verbose=verbose)
w0 = sol.solve(X=X_train, y=target, sample_weight=sample_weight)
n_iter_i = sol.iteration
elif solver == 'liblinear':
(coef_, intercept_, n_iter_i) = _fit_liblinear(X_train, target, C, fit_intercept, intercept_scaling, None, penalty, dual, verbose, max_iter, tol, random_state, sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ['sag', 'saga']:
if multi_class == 'multinomial':
target = target.astype(X_train.dtype, copy=False)
loss = 'multinomial'
else:
loss = 'log'
if penalty == 'l1':
alpha = 0.0
beta = 1.0 / C
elif penalty == 'l2':
alpha = 1.0 / C
beta = 0.0
else:
alpha = 1.0 / C * (1 - l1_ratio)
beta = 1.0 / C * l1_ratio
(w0, n_iter_i, warm_start_sag) = sag_solver(X_train, target, sample_weight, loss, alpha, beta, max_iter, tol, verbose, random_state, False, max_squared_sum, warm_start_sag, is_saga=solver == 'saga')
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', 'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
n_classes = max(2, classes.size)
if solver in ['lbfgs', 'newton-cg']:
multi_w0 = np.reshape(w0, (n_classes, -1), order='F')
else:
multi_w0 = w0
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0.copy())
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
(coefs, Cs, n_iter) = (np.array(coefs), np.array(Cs), n_iter)
log_reg = LogisticRegression(solver=solver, multi_class=multi_class)
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError('multi_class should be either multinomial or ovr, got %d' % multi_class)
if pos_class is not None:
mask = y_test == pos_class
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.0
scores = list()
scoring = get_scorer(scoring)
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.0
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return (coefs, Cs, np.array(scores), n_iter)
|
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10, scoring=None, fit_intercept=False, max_iter=100, tol=0.0001, class_weight=None, verbose=0, solver='lbfgs', penalty='l2', dual=False, intercept_scaling=1.0, multi_class='auto', random_state=None, max_squared_sum=None, sample_weight=None, l1_ratio=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, default=None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or list of floats, default=10
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool, default=False
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int, default=100
Maximum number of iterations for the solver.
tol : float, default=1e-4
Tolerance for stopping criteria.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, default='lbfgs'
Decides which solver to use.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default=1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'auto', 'ovr', 'multinomial'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray of shape (n_cs,)
Scores obtained for each Cs.
n_iter : ndarray of shape(n_cs,)
Actual number of iteration for each Cs.
"""
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = sample_weight[train]
<DeepExtract>
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
if False:
X_train = check_array(X_train, accept_sparse='csr', dtype=np.float64, accept_large_sparse=solver not in ['liblinear', 'sag', 'saga'])
y_train = check_array(y_train, ensure_2d=False, dtype=None)
check_consistent_length(X_train, y_train)
(n_samples, n_features) = X_train.shape
classes = np.unique(y_train)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != 'multinomial':
if classes.size > 2:
raise ValueError('To fit OvR, use the pos_class argument')
pos_class = classes[1]
sample_weight = _check_sample_weight(sample_weight, X_train, dtype=X_train.dtype, copy=True)
if solver == 'newton-cholesky':
sw_sum = sample_weight.sum()
sample_weight = sample_weight / sw_sum
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes=classes, y=y_train)
sample_weight *= class_weight_[le.fit_transform(y_train)]
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept), dtype=X_train.dtype)
mask = y_train == pos_class
y_bin = np.ones(y_train.shape, dtype=X_train.dtype)
if solver in ['lbfgs', 'newton-cg', 'newton-cholesky']:
mask_classes = np.array([0, 1])
y_bin[~mask] = 0.0
else:
mask_classes = np.array([-1, 1])
y_bin[~mask] = -1.0
if class_weight == 'balanced':
class_weight_ = compute_class_weight(class_weight, classes=mask_classes, y=y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver in ['sag', 'saga', 'lbfgs', 'newton-cg']:
le = LabelEncoder()
Y_multi = le.fit_transform(y_train).astype(X_train.dtype, copy=False)
else:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y_train)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
w0 = np.zeros((classes.size, n_features + int(fit_intercept)), order='F', dtype=X_train.dtype)
if coef is not None:
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError('Initialization coef is of shape %d, expected shape %d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if coef.shape[0] != n_classes or coef.shape[1] not in (n_features, n_features + 1):
raise ValueError('Initialization coef is of shape (%d, %d), expected shape (%d, %d) or (%d, %d)' % (coef.shape[0], coef.shape[1], classes.size, n_features, classes.size, n_features + 1))
if n_classes == 1:
w0[0, :coef.shape[1]] = -coef
w0[1, :coef.shape[1]] = coef
else:
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel(order='F')
loss = LinearModelLoss(base_loss=HalfMultinomialLoss(n_classes=classes.size), fit_intercept=fit_intercept)
target = Y_multi
if solver in 'lbfgs':
func = loss.loss_gradient
elif solver == 'newton-cg':
func = loss.loss
grad = loss.gradient
hess = loss.gradient_hessian_product
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
loss = LinearModelLoss(base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept)
func = loss.loss_gradient
elif solver == 'newton-cg':
loss = LinearModelLoss(base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept)
func = loss.loss
grad = loss.gradient
hess = loss.gradient_hessian_product
elif solver == 'newton-cholesky':
loss = LinearModelLoss(base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept)
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for (i, C) in enumerate(Cs):
if solver == 'lbfgs':
l2_reg_strength = 1.0 / C
iprint = [-1, 50, 1, 100, 101][np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
opt_res = optimize.minimize(func, w0, method='L-BFGS-B', jac=True, args=(X_train, target, sample_weight, l2_reg_strength, n_threads), options={'iprint': iprint, 'gtol': tol, 'maxiter': max_iter})
n_iter_i = _check_optimize_result(solver, opt_res, max_iter, extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
(w0, loss) = (opt_res.x, opt_res.fun)
elif solver == 'newton-cg':
l2_reg_strength = 1.0 / C
args = (X_train, target, sample_weight, l2_reg_strength, n_threads)
(w0, n_iter_i) = _newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter, tol=tol)
elif solver == 'newton-cholesky':
l2_reg_strength = 1.0 / C / sw_sum
sol = NewtonCholeskySolver(coef=w0, linear_loss=loss, l2_reg_strength=l2_reg_strength, tol=tol, max_iter=max_iter, n_threads=n_threads, verbose=verbose)
w0 = sol.solve(X=X_train, y=target, sample_weight=sample_weight)
n_iter_i = sol.iteration
elif solver == 'liblinear':
(coef_, intercept_, n_iter_i) = _fit_liblinear(X_train, target, C, fit_intercept, intercept_scaling, None, penalty, dual, verbose, max_iter, tol, random_state, sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ['sag', 'saga']:
if multi_class == 'multinomial':
target = target.astype(X_train.dtype, copy=False)
loss = 'multinomial'
else:
loss = 'log'
if penalty == 'l1':
alpha = 0.0
beta = 1.0 / C
elif penalty == 'l2':
alpha = 1.0 / C
beta = 0.0
else:
alpha = 1.0 / C * (1 - l1_ratio)
beta = 1.0 / C * l1_ratio
(w0, n_iter_i, warm_start_sag) = sag_solver(X_train, target, sample_weight, loss, alpha, beta, max_iter, tol, verbose, random_state, False, max_squared_sum, warm_start_sag, is_saga=solver == 'saga')
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', 'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
n_classes = max(2, classes.size)
if solver in ['lbfgs', 'newton-cg']:
multi_w0 = np.reshape(w0, (n_classes, -1), order='F')
else:
multi_w0 = w0
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0.copy())
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
(coefs, Cs, n_iter) = (np.array(coefs), np.array(Cs), n_iter)
</DeepExtract>
log_reg = LogisticRegression(solver=solver, multi_class=multi_class)
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError('multi_class should be either multinomial or ovr, got %d' % multi_class)
if pos_class is not None:
mask = y_test == pos_class
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.0
scores = list()
scoring = get_scorer(scoring)
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.0
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return (coefs, Cs, np.array(scores), n_iter)
|
@validate_params({'X': [np.ndarray, 'sparse matrix'], 'min_samples': [Interval(Integral, 2, None, closed='left'), Interval(RealNotInt, 0, 1, closed='both')], 'max_eps': [Interval(Real, 0, None, closed='both')], 'metric': [StrOptions(set(_VALID_METRICS) | {'precomputed'}), callable], 'p': [Interval(Real, 0, None, closed='right'), None], 'metric_params': [dict, None], 'algorithm': [StrOptions({'auto', 'brute', 'ball_tree', 'kd_tree'})], 'leaf_size': [Interval(Integral, 1, None, closed='left')], 'n_jobs': [Integral, None]})
def compute_optics_graph(X, *, min_samples, max_eps, metric, p, metric_params, algorithm, leaf_size, n_jobs):
"""Compute the OPTICS reachability graph.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features), or (n_samples, n_samples) if metric='precomputed'
A feature array, or array of distances between samples if
metric='precomputed'.
min_samples : int > 1 or float between 0 and 1
The number of samples in a neighborhood for a point to be considered
as a core point. Expressed as an absolute number or a fraction of the
number of samples (rounded to be at least 2).
max_eps : float, default=np.inf
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Default value of ``np.inf`` will
identify clusters across all scales; reducing ``max_eps`` will result
in shorter run times.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string. If metric is
"precomputed", X is assumed to be a distance matrix and must be square.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
.. note::
`'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11.
p : int, default=2
Parameter for the Minkowski metric from
:class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`.
- 'kd_tree' will use :class:`KDTree`.
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method. (default)
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
ordering_ : array of shape (n_samples,)
The cluster ordered list of sample indices.
core_distances_ : array of shape (n_samples,)
Distance at which each sample becomes a core point, indexed by object
order. Points which will never be core have a distance of inf. Use
``clust.core_distances_[clust.ordering_]`` to access in cluster order.
reachability_ : array of shape (n_samples,)
Reachability distances per sample, indexed by object order. Use
``clust.reachability_[clust.ordering_]`` to access in cluster order.
predecessor_ : array of shape (n_samples,)
Point that a sample was reached from, indexed by object order.
Seed points have a predecessor of -1.
References
----------
.. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
and Jörg Sander. "OPTICS: ordering points to identify the clustering
structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
"""
n_samples = X.shape[0]
if min_samples > n_samples:
raise ValueError('%s must be no greater than the number of samples (%d). Got %d' % ('min_samples', n_samples, min_samples))
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
reachability_ = np.empty(n_samples)
reachability_.fill(np.inf)
predecessor_ = np.empty(n_samples, dtype=int)
predecessor_.fill(-1)
nbrs = NearestNeighbors(n_neighbors=min_samples, algorithm=algorithm, leaf_size=leaf_size, metric=metric, metric_params=metric_params, p=p, n_jobs=n_jobs)
nbrs.fit(X)
n_samples = X.shape[0]
core_distances = np.empty(n_samples)
core_distances.fill(np.nan)
chunk_n_rows = get_chunk_n_rows(row_bytes=16 * min_samples, max_n_rows=n_samples, working_memory=None)
slices = gen_batches(n_samples, chunk_n_rows)
for sl in slices:
core_distances[sl] = nbrs.kneighbors(X[sl], min_samples)[0][:, -1]
core_distances_ = core_distances
core_distances_[core_distances_ > max_eps] = np.inf
np.around(core_distances_, decimals=np.finfo(core_distances_.dtype).precision, out=core_distances_)
processed = np.zeros(X.shape[0], dtype=bool)
ordering = np.zeros(X.shape[0], dtype=int)
for ordering_idx in range(X.shape[0]):
index = np.where(processed == 0)[0]
point = index[np.argmin(reachability_[index])]
processed[point] = True
ordering[ordering_idx] = point
if core_distances_[point] != np.inf:
P = X[point:point + 1]
indices = nbrs.radius_neighbors(P, radius=max_eps, return_distance=False)[0]
unproc = np.compress(~np.take(processed, indices), indices)
if not unproc.size:
return
if metric == 'precomputed':
dists = X[point, unproc]
if issparse(dists):
dists.sort_indices()
dists = dists.data
else:
_params = dict() if metric_params is None else metric_params.copy()
if metric == 'minkowski' and 'p' not in _params:
_params['p'] = p
dists = pairwise_distances(P, X[unproc], metric, n_jobs=None, **_params).ravel()
rdists = np.maximum(dists, core_distances_[point])
np.around(rdists, decimals=np.finfo(rdists.dtype).precision, out=rdists)
improved = np.where(rdists < np.take(reachability_, unproc))
reachability_[unproc[improved]] = rdists[improved]
predecessor_[unproc[improved]] = point
if np.all(np.isinf(reachability_)):
warnings.warn('All reachability values are inf. Set a larger max_eps or all data will be considered outliers.', UserWarning)
return (ordering, core_distances_, reachability_, predecessor_)
|
@validate_params({'X': [np.ndarray, 'sparse matrix'], 'min_samples': [Interval(Integral, 2, None, closed='left'), Interval(RealNotInt, 0, 1, closed='both')], 'max_eps': [Interval(Real, 0, None, closed='both')], 'metric': [StrOptions(set(_VALID_METRICS) | {'precomputed'}), callable], 'p': [Interval(Real, 0, None, closed='right'), None], 'metric_params': [dict, None], 'algorithm': [StrOptions({'auto', 'brute', 'ball_tree', 'kd_tree'})], 'leaf_size': [Interval(Integral, 1, None, closed='left')], 'n_jobs': [Integral, None]})
def compute_optics_graph(X, *, min_samples, max_eps, metric, p, metric_params, algorithm, leaf_size, n_jobs):
"""Compute the OPTICS reachability graph.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features), or (n_samples, n_samples) if metric='precomputed'
A feature array, or array of distances between samples if
metric='precomputed'.
min_samples : int > 1 or float between 0 and 1
The number of samples in a neighborhood for a point to be considered
as a core point. Expressed as an absolute number or a fraction of the
number of samples (rounded to be at least 2).
max_eps : float, default=np.inf
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Default value of ``np.inf`` will
identify clusters across all scales; reducing ``max_eps`` will result
in shorter run times.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string. If metric is
"precomputed", X is assumed to be a distance matrix and must be square.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
.. note::
`'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11.
p : int, default=2
Parameter for the Minkowski metric from
:class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`.
- 'kd_tree' will use :class:`KDTree`.
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method. (default)
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
ordering_ : array of shape (n_samples,)
The cluster ordered list of sample indices.
core_distances_ : array of shape (n_samples,)
Distance at which each sample becomes a core point, indexed by object
order. Points which will never be core have a distance of inf. Use
``clust.core_distances_[clust.ordering_]`` to access in cluster order.
reachability_ : array of shape (n_samples,)
Reachability distances per sample, indexed by object order. Use
``clust.reachability_[clust.ordering_]`` to access in cluster order.
predecessor_ : array of shape (n_samples,)
Point that a sample was reached from, indexed by object order.
Seed points have a predecessor of -1.
References
----------
.. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
and Jörg Sander. "OPTICS: ordering points to identify the clustering
structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
"""
n_samples = X.shape[0]
<DeepExtract>
if min_samples > n_samples:
raise ValueError('%s must be no greater than the number of samples (%d). Got %d' % ('min_samples', n_samples, min_samples))
</DeepExtract>
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
reachability_ = np.empty(n_samples)
reachability_.fill(np.inf)
predecessor_ = np.empty(n_samples, dtype=int)
predecessor_.fill(-1)
nbrs = NearestNeighbors(n_neighbors=min_samples, algorithm=algorithm, leaf_size=leaf_size, metric=metric, metric_params=metric_params, p=p, n_jobs=n_jobs)
nbrs.fit(X)
<DeepExtract>
n_samples = X.shape[0]
core_distances = np.empty(n_samples)
core_distances.fill(np.nan)
chunk_n_rows = get_chunk_n_rows(row_bytes=16 * min_samples, max_n_rows=n_samples, working_memory=None)
slices = gen_batches(n_samples, chunk_n_rows)
for sl in slices:
core_distances[sl] = nbrs.kneighbors(X[sl], min_samples)[0][:, -1]
core_distances_ = core_distances
</DeepExtract>
core_distances_[core_distances_ > max_eps] = np.inf
np.around(core_distances_, decimals=np.finfo(core_distances_.dtype).precision, out=core_distances_)
processed = np.zeros(X.shape[0], dtype=bool)
ordering = np.zeros(X.shape[0], dtype=int)
for ordering_idx in range(X.shape[0]):
index = np.where(processed == 0)[0]
point = index[np.argmin(reachability_[index])]
processed[point] = True
ordering[ordering_idx] = point
if core_distances_[point] != np.inf:
<DeepExtract>
P = X[point:point + 1]
indices = nbrs.radius_neighbors(P, radius=max_eps, return_distance=False)[0]
unproc = np.compress(~np.take(processed, indices), indices)
if not unproc.size:
return
if metric == 'precomputed':
dists = X[point, unproc]
if issparse(dists):
dists.sort_indices()
dists = dists.data
else:
_params = dict() if metric_params is None else metric_params.copy()
if metric == 'minkowski' and 'p' not in _params:
_params['p'] = p
dists = pairwise_distances(P, X[unproc], metric, n_jobs=None, **_params).ravel()
rdists = np.maximum(dists, core_distances_[point])
np.around(rdists, decimals=np.finfo(rdists.dtype).precision, out=rdists)
improved = np.where(rdists < np.take(reachability_, unproc))
reachability_[unproc[improved]] = rdists[improved]
predecessor_[unproc[improved]] = point
</DeepExtract>
if np.all(np.isinf(reachability_)):
warnings.warn('All reachability values are inf. Set a larger max_eps or all data will be considered outliers.', UserWarning)
return (ordering, core_distances_, reachability_, predecessor_)
|
@pytest.mark.parametrize('X_binned, all_gradients, expected_categories_left, n_bins_non_missing,missing_values_bin_idx, has_missing_values, expected_missing_go_to_left', [([0, 1, 2, 3] * 11, [10, 1, 10, 10] * 11, [1], 4, 4, False, None), ([0, 1, 2, 3] * 11, [10, 10, 10, 1] * 11, [3], 4, 4, False, None), ([0, 1, 2, 3] * 11 + [4] * 5, [10, 10, 10, 1] * 11 + [10] * 5, [3], 4, 4, False, None), ([0, 1, 2, 3] * 11 + [4] * 5, [10, 10, 10, 1] * 11 + [1] * 5, [3], 4, 4, False, None), ([0, 1, 2] * 11 + [9] * 11, [10, 1, 10] * 11 + [10] * 11, [1], 3, 9, True, False), ([0, 1, 2] * 11 + [9] * 11, [10, 1, 10] * 11 + [1] * 11, [1, 9], 3, 9, True, True), ([0, 1, 2, 3, 4] * 11 + [255] * 12, [10, 10, 10, 10, 10] * 11 + [1] * 12, [255], 5, 255, True, True), (list(range(60)) * 12, [10, 1] * 360, list(range(1, 60, 2)), 59, 59, True, True), (list(range(256)) * 12, [10, 10, 10, 10, 10, 10, 10, 1] * 384, list(range(7, 256, 8)), 255, 255, True, True)])
def test_splitting_categorical_sanity(X_binned, all_gradients, expected_categories_left, n_bins_non_missing, missing_values_bin_idx, has_missing_values, expected_missing_go_to_left):
n_samples = len(X_binned)
n_bins = max(X_binned) + 1
X_binned = np.array(X_binned, dtype=X_BINNED_DTYPE).reshape(-1, 1)
X_binned = np.asfortranarray(X_binned)
l2_regularization = 0.0
min_hessian_to_split = 0.001
min_samples_leaf = 1
min_gain_to_split = 0.0
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = np.array(all_gradients, dtype=G_H_DTYPE)
all_hessians = np.ones(1, dtype=G_H_DTYPE)
has_missing_values = np.array([has_missing_values], dtype=np.uint8)
sum_gradients = all_gradients.sum()
sum_hessians = n_samples
hessians_are_constant = True
builder = HistogramBuilder(X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads)
n_bins_non_missing = np.array([n_bins_non_missing], dtype=np.uint32)
monotonic_cst = np.array([MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8)
is_categorical = np.ones_like(monotonic_cst, dtype=np.uint8)
splitter = Splitter(X_binned, n_bins_non_missing, missing_values_bin_idx, has_missing_values, is_categorical, monotonic_cst, l2_regularization, min_hessian_to_split, min_samples_leaf, min_gain_to_split, hessians_are_constant)
histograms = builder.compute_histograms_brute(sample_indices)
value = compute_node_value(sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization)
split_info = splitter.find_node_split(n_samples, histograms, sum_gradients, sum_hessians, value)
assert split_info.is_categorical
assert split_info.gain > 0
expected_bitset = np.zeros(8, dtype=np.uint32)
for cat in expected_categories_left:
idx = cat // 32
shift = cat % 32
expected_bitset[idx] |= 1 << shift
assert_array_equal(expected_bitset, split_info.left_cat_bitset)
if has_missing_values:
assert split_info.missing_go_to_left == expected_missing_go_to_left
(samples_left, samples_right, _) = splitter.split_indices(split_info, splitter.partition)
left_mask = np.isin(X_binned.ravel(), expected_categories_left)
assert_array_equal(sample_indices[left_mask], samples_left)
assert_array_equal(sample_indices[~left_mask], samples_right)
|
@pytest.mark.parametrize('X_binned, all_gradients, expected_categories_left, n_bins_non_missing,missing_values_bin_idx, has_missing_values, expected_missing_go_to_left', [([0, 1, 2, 3] * 11, [10, 1, 10, 10] * 11, [1], 4, 4, False, None), ([0, 1, 2, 3] * 11, [10, 10, 10, 1] * 11, [3], 4, 4, False, None), ([0, 1, 2, 3] * 11 + [4] * 5, [10, 10, 10, 1] * 11 + [10] * 5, [3], 4, 4, False, None), ([0, 1, 2, 3] * 11 + [4] * 5, [10, 10, 10, 1] * 11 + [1] * 5, [3], 4, 4, False, None), ([0, 1, 2] * 11 + [9] * 11, [10, 1, 10] * 11 + [10] * 11, [1], 3, 9, True, False), ([0, 1, 2] * 11 + [9] * 11, [10, 1, 10] * 11 + [1] * 11, [1, 9], 3, 9, True, True), ([0, 1, 2, 3, 4] * 11 + [255] * 12, [10, 10, 10, 10, 10] * 11 + [1] * 12, [255], 5, 255, True, True), (list(range(60)) * 12, [10, 1] * 360, list(range(1, 60, 2)), 59, 59, True, True), (list(range(256)) * 12, [10, 10, 10, 10, 10, 10, 10, 1] * 384, list(range(7, 256, 8)), 255, 255, True, True)])
def test_splitting_categorical_sanity(X_binned, all_gradients, expected_categories_left, n_bins_non_missing, missing_values_bin_idx, has_missing_values, expected_missing_go_to_left):
n_samples = len(X_binned)
n_bins = max(X_binned) + 1
X_binned = np.array(X_binned, dtype=X_BINNED_DTYPE).reshape(-1, 1)
X_binned = np.asfortranarray(X_binned)
l2_regularization = 0.0
min_hessian_to_split = 0.001
min_samples_leaf = 1
min_gain_to_split = 0.0
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = np.array(all_gradients, dtype=G_H_DTYPE)
all_hessians = np.ones(1, dtype=G_H_DTYPE)
has_missing_values = np.array([has_missing_values], dtype=np.uint8)
sum_gradients = all_gradients.sum()
sum_hessians = n_samples
hessians_are_constant = True
builder = HistogramBuilder(X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads)
n_bins_non_missing = np.array([n_bins_non_missing], dtype=np.uint32)
monotonic_cst = np.array([MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8)
is_categorical = np.ones_like(monotonic_cst, dtype=np.uint8)
splitter = Splitter(X_binned, n_bins_non_missing, missing_values_bin_idx, has_missing_values, is_categorical, monotonic_cst, l2_regularization, min_hessian_to_split, min_samples_leaf, min_gain_to_split, hessians_are_constant)
histograms = builder.compute_histograms_brute(sample_indices)
value = compute_node_value(sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization)
split_info = splitter.find_node_split(n_samples, histograms, sum_gradients, sum_hessians, value)
assert split_info.is_categorical
assert split_info.gain > 0
<DeepExtract>
expected_bitset = np.zeros(8, dtype=np.uint32)
for cat in expected_categories_left:
idx = cat // 32
shift = cat % 32
expected_bitset[idx] |= 1 << shift
assert_array_equal(expected_bitset, split_info.left_cat_bitset)
</DeepExtract>
if has_missing_values:
assert split_info.missing_go_to_left == expected_missing_go_to_left
(samples_left, samples_right, _) = splitter.split_indices(split_info, splitter.partition)
left_mask = np.isin(X_binned.ravel(), expected_categories_left)
assert_array_equal(sample_indices[left_mask], samples_left)
assert_array_equal(sample_indices[~left_mask], samples_right)
|
def plot_2d(points, points_color, title):
(fig, ax) = plt.subplots(figsize=(3, 3), facecolor='white', constrained_layout=True)
fig.suptitle(title, size=16)
(x, y) = points.T
ax.scatter(x, y, c=points_color, s=50, alpha=0.8)
ax.set_title(title)
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
plt.show()
|
def plot_2d(points, points_color, title):
(fig, ax) = plt.subplots(figsize=(3, 3), facecolor='white', constrained_layout=True)
fig.suptitle(title, size=16)
<DeepExtract>
(x, y) = points.T
ax.scatter(x, y, c=points_color, s=50, alpha=0.8)
ax.set_title(title)
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
</DeepExtract>
plt.show()
|
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache.
Parameters
----------
data_home : str, default=None
The path to scikit-learn data directory. If `None`, the default path
is `~/sklearn_learn_data`.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA', join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
makedirs(data_home, exist_ok=True)
data_home = data_home
shutil.rmtree(data_home)
|
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache.
Parameters
----------
data_home : str, default=None
The path to scikit-learn data directory. If `None`, the default path
is `~/sklearn_learn_data`.
"""
<DeepExtract>
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA', join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
makedirs(data_home, exist_ok=True)
data_home = data_home
</DeepExtract>
shutil.rmtree(data_home)
|
def test_nmf_negative_beta_loss():
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.clip(X, 0, None, out=X)
X_csr = sp.csr_matrix(X)
def _assert_nmf_no_nan(X, beta_loss):
(W, H, _) = non_negative_factorization(X, init='random', n_components=n_components, solver='mu', beta_loss=beta_loss, random_state=0, max_iter=1000)
assert not np.any(np.isnan(W))
assert not np.any(np.isnan(H))
msg = 'When beta_loss <= 0 and X contains zeros, the solver may diverge.'
for beta_loss in (-0.6, 0.0):
with pytest.raises(ValueError, match=msg):
(W, H, _) = non_negative_factorization(X, init='random', n_components=n_components, solver='mu', beta_loss=beta_loss, random_state=0, max_iter=1000)
assert not np.any(np.isnan(W))
assert not np.any(np.isnan(H))
(W, H, _) = non_negative_factorization(X + 1e-09, init='random', n_components=n_components, solver='mu', beta_loss=beta_loss, random_state=0, max_iter=1000)
assert not np.any(np.isnan(W))
assert not np.any(np.isnan(H))
for beta_loss in (0.2, 1.0, 1.2, 2.0, 2.5):
(W, H, _) = non_negative_factorization(X, init='random', n_components=n_components, solver='mu', beta_loss=beta_loss, random_state=0, max_iter=1000)
assert not np.any(np.isnan(W))
assert not np.any(np.isnan(H))
(W, H, _) = non_negative_factorization(X_csr, init='random', n_components=n_components, solver='mu', beta_loss=beta_loss, random_state=0, max_iter=1000)
assert not np.any(np.isnan(W))
assert not np.any(np.isnan(H))
</DeepExtract>
|
def test_nmf_negative_beta_loss():
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.clip(X, 0, None, out=X)
X_csr = sp.csr_matrix(X)
def _assert_nmf_no_nan(X, beta_loss):
(W, H, _) = non_negative_factorization(X, init='random', n_components=n_components, solver='mu', beta_loss=beta_loss, random_state=0, max_iter=1000)
assert not np.any(np.isnan(W))
assert not np.any(np.isnan(H))
msg = 'When beta_loss <= 0 and X contains zeros, the solver may diverge.'
for beta_loss in (-0.6, 0.0):
with pytest.raises(ValueError, match=msg):
<DeepExtract>
(W, H, _) = non_negative_factorization(X, init='random', n_components=n_components, solver='mu', beta_loss=beta_loss, random_state=0, max_iter=1000)
assert not np.any(np.isnan(W))
assert not np.any(np.isnan(H))
</DeepExtract>
<DeepExtract>
(W, H, _) = non_negative_factorization(X + 1e-09, init='random', n_components=n_components, solver='mu', beta_loss=beta_loss, random_state=0, max_iter=1000)
assert not np.any(np.isnan(W))
assert not np.any(np.isnan(H))
</DeepExtract>
for beta_loss in (0.2, 1.0, 1.2, 2.0, 2.5):
<DeepExtract>
(W, H, _) = non_negative_factorization(X, init='random', n_components=n_components, solver='mu', beta_loss=beta_loss, random_state=0, max_iter=1000)
assert not np.any(np.isnan(W))
assert not np.any(np.isnan(H))
</DeepExtract>
<DeepExtract>
(W, H, _) = non_negative_factorization(X_csr, init='random', n_components=n_components, solver='mu', beta_loss=beta_loss, random_state=0, max_iter=1000)
assert not np.any(np.isnan(W))
assert not np.any(np.isnan(H))
</DeepExtract>
|
def predict(self, X, sample_weight='deprecated'):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight.
.. deprecated:: 1.3
The parameter `sample_weight` is deprecated in version 1.3
and will be removed in 1.5.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse='csr', reset=False, dtype=[np.float64, np.float32], order='C', accept_large_sparse=False)
X = X
if not (isinstance(sample_weight, str) and sample_weight == 'deprecated'):
warnings.warn("'sample_weight' was deprecated in version 1.3 and will be removed in 1.5.", FutureWarning)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
else:
sample_weight = _check_sample_weight(None, X, dtype=X.dtype)
with threadpool_limits(limits=1, user_api='blas'):
result = _labels_inertia(X, sample_weight, self.cluster_centers_, self._n_threads, False)
labels = result
return labels
|
def predict(self, X, sample_weight='deprecated'):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight.
.. deprecated:: 1.3
The parameter `sample_weight` is deprecated in version 1.3
and will be removed in 1.5.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self)
<DeepExtract>
X = self._validate_data(X, accept_sparse='csr', reset=False, dtype=[np.float64, np.float32], order='C', accept_large_sparse=False)
X = X
</DeepExtract>
if not (isinstance(sample_weight, str) and sample_weight == 'deprecated'):
warnings.warn("'sample_weight' was deprecated in version 1.3 and will be removed in 1.5.", FutureWarning)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
else:
sample_weight = _check_sample_weight(None, X, dtype=X.dtype)
<DeepExtract>
with threadpool_limits(limits=1, user_api='blas'):
result = _labels_inertia(X, sample_weight, self.cluster_centers_, self._n_threads, False)
labels = result
</DeepExtract>
return labels
|
def fetch_species_distributions(*, data_home=None, download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006).
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured
at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1624,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (620,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
-----
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
- For an example of using this dataset with scikit-learn, see
:ref:`examples/applications/plot_species_distribution_modeling.py
<sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py>`.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
extra_params = dict(x_left_lower_corner=-94.8, Nx=1212, y_left_lower_corner=-56.05, Ny=1592, grid_size=0.05)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
if not download_if_missing:
raise IOError('Data not found and `download_if_missing` is False')
logger.info('Downloading species data from %s to %s' % (SAMPLES.url, data_home))
samples_path = _fetch_remote(SAMPLES, dirname=data_home)
with np.load(samples_path) as X:
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
names = fhandle.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(fhandle, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
train = rec
if 'test' in f:
names = fhandle.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(fhandle, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
test = rec
remove(samples_path)
logger.info('Downloading coverage data from %s to %s' % (COVERAGES.url, data_home))
coverages_path = _fetch_remote(COVERAGES, dirname=data_home)
with np.load(coverages_path) as X:
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
logger.debug(' - converting {}'.format(f))
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
remove(coverages_path)
bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
|
def fetch_species_distributions(*, data_home=None, download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006).
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured
at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1624,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (620,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
-----
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
- For an example of using this dataset with scikit-learn, see
:ref:`examples/applications/plot_species_distribution_modeling.py
<sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py>`.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
extra_params = dict(x_left_lower_corner=-94.8, Nx=1212, y_left_lower_corner=-56.05, Ny=1592, grid_size=0.05)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
if not download_if_missing:
raise IOError('Data not found and `download_if_missing` is False')
logger.info('Downloading species data from %s to %s' % (SAMPLES.url, data_home))
samples_path = _fetch_remote(SAMPLES, dirname=data_home)
with np.load(samples_path) as X:
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
<DeepExtract>
names = fhandle.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(fhandle, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
train = rec
</DeepExtract>
if 'test' in f:
<DeepExtract>
names = fhandle.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(fhandle, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
test = rec
</DeepExtract>
remove(samples_path)
logger.info('Downloading coverage data from %s to %s' % (COVERAGES.url, data_home))
coverages_path = _fetch_remote(COVERAGES, dirname=data_home)
with np.load(coverages_path) as X:
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
logger.debug(' - converting {}'.format(f))
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
remove(coverages_path)
bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
|
def _check_fit_params(X, fit_params, indices=None):
"""Check and validate the parameters passed during `fit`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data array.
fit_params : dict
Dictionary containing the parameters passed at fit.
indices : array-like of shape (n_samples,), default=None
Indices to be selected if the parameter has the same size as `X`.
Returns
-------
fit_params_validated : dict
Validated parameters. We ensure that the values support indexing.
"""
from . import _safe_indexing
fit_params_validated = {}
for (param_key, param_value) in fit_params.items():
if not _is_arraylike(param_value) or _num_samples(param_value) != _num_samples(X):
fit_params_validated[param_key] = param_value
else:
if sp.issparse(param_value):
fit_params_validated[param_key] = param_value.tocsr()
elif hasattr(param_value, '__getitem__') or hasattr(param_value, 'iloc'):
fit_params_validated[param_key] = param_value
elif param_value is None:
fit_params_validated[param_key] = param_value
fit_params_validated[param_key] = np.array(param_value)
fit_params_validated[param_key] = _safe_indexing(fit_params_validated[param_key], indices)
return fit_params_validated
|
def _check_fit_params(X, fit_params, indices=None):
"""Check and validate the parameters passed during `fit`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data array.
fit_params : dict
Dictionary containing the parameters passed at fit.
indices : array-like of shape (n_samples,), default=None
Indices to be selected if the parameter has the same size as `X`.
Returns
-------
fit_params_validated : dict
Validated parameters. We ensure that the values support indexing.
"""
from . import _safe_indexing
fit_params_validated = {}
for (param_key, param_value) in fit_params.items():
if not _is_arraylike(param_value) or _num_samples(param_value) != _num_samples(X):
fit_params_validated[param_key] = param_value
else:
<DeepExtract>
if sp.issparse(param_value):
fit_params_validated[param_key] = param_value.tocsr()
elif hasattr(param_value, '__getitem__') or hasattr(param_value, 'iloc'):
fit_params_validated[param_key] = param_value
elif param_value is None:
fit_params_validated[param_key] = param_value
fit_params_validated[param_key] = np.array(param_value)
</DeepExtract>
fit_params_validated[param_key] = _safe_indexing(fit_params_validated[param_key], indices)
return fit_params_validated
|
@ignore_warnings(category=FutureWarning)
def check_sparsify_coefficients(name, estimator_orig):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-1, -2], [2, 2], [-2, -2]])
y = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
if _safe_tags(estimator_orig, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator_orig, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator_orig, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
est = clone(estimator_orig)
est.fit(X, y)
pred_orig = est.predict(X)
est.sparsify()
assert sparse.issparse(est.coef_)
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
est = pickle.loads(pickle.dumps(est))
assert sparse.issparse(est.coef_)
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
|
@ignore_warnings(category=FutureWarning)
def check_sparsify_coefficients(name, estimator_orig):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-1, -2], [2, 2], [-2, -2]])
y = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
<DeepExtract>
if _safe_tags(estimator_orig, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator_orig, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator_orig, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
est = clone(estimator_orig)
est.fit(X, y)
pred_orig = est.predict(X)
est.sparsify()
assert sparse.issparse(est.coef_)
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
est = pickle.loads(pickle.dumps(est))
assert sparse.issparse(est.coef_)
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
|
def transform(self, X):
"""Transform the image samples in `X` into a matrix of patch data.
Parameters
----------
X : ndarray of shape (n_samples, image_height, image_width) or (n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches : array of shape (n_patches, patch_height, patch_width) or (n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
X = self._validate_data(X=X, ensure_2d=False, allow_nd=True, ensure_min_samples=1, ensure_min_features=1, reset=False)
random_state = check_random_state(self.random_state)
(n_imgs, img_height, img_width) = X.shape[:3]
if self.patch_size is None:
patch_size = (img_height // 10, img_width // 10)
else:
if len(self.patch_size) != 2:
raise ValueError(f'patch_size must be a tuple of two integers. Got {self.patch_size} instead.')
patch_size = self.patch_size
(n_imgs, img_height, img_width) = X.shape[:3]
X = np.reshape(X, (n_imgs, img_height, img_width, -1))
n_channels = X.shape[-1]
(patch_height, patch_width) = patch_size
n_h = img_height - patch_height + 1
n_w = img_width - patch_width + 1
all_patches = n_h * n_w
if self.max_patches:
if isinstance(self.max_patches, Integral) and self.max_patches < all_patches:
n_patches = self.max_patches
elif isinstance(self.max_patches, Integral) and self.max_patches >= all_patches:
n_patches = all_patches
elif isinstance(self.max_patches, Real) and 0 < self.max_patches < 1:
n_patches = int(self.max_patches * all_patches)
else:
raise ValueError('Invalid value for max_patches: %r' % self.max_patches)
else:
n_patches = all_patches
patches_shape = (n_imgs * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
patches = np.empty(patches_shape)
for (ii, image) in enumerate(X):
(i_h, i_w) = image.shape[:2]
(p_h, p_w) = patch_size
if p_h > i_h:
raise ValueError('Height of the patch should be less than the height of the image.')
if p_w > i_w:
raise ValueError('Width of the patch should be less than the width of the image.')
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = _extract_patches(image, patch_shape=(p_h, p_w, n_colors), extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
if self.max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
if patches.shape[-1] == 1:
patches[ii * n_patches:(ii + 1) * n_patches] = patches.reshape((n_patches, p_h, p_w))
else:
patches[ii * n_patches:(ii + 1) * n_patches] = patches
return patches
|
def transform(self, X):
"""Transform the image samples in `X` into a matrix of patch data.
Parameters
----------
X : ndarray of shape (n_samples, image_height, image_width) or (n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches : array of shape (n_patches, patch_height, patch_width) or (n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
X = self._validate_data(X=X, ensure_2d=False, allow_nd=True, ensure_min_samples=1, ensure_min_features=1, reset=False)
random_state = check_random_state(self.random_state)
(n_imgs, img_height, img_width) = X.shape[:3]
if self.patch_size is None:
patch_size = (img_height // 10, img_width // 10)
else:
if len(self.patch_size) != 2:
raise ValueError(f'patch_size must be a tuple of two integers. Got {self.patch_size} instead.')
patch_size = self.patch_size
(n_imgs, img_height, img_width) = X.shape[:3]
X = np.reshape(X, (n_imgs, img_height, img_width, -1))
n_channels = X.shape[-1]
(patch_height, patch_width) = patch_size
<DeepExtract>
n_h = img_height - patch_height + 1
n_w = img_width - patch_width + 1
all_patches = n_h * n_w
if self.max_patches:
if isinstance(self.max_patches, Integral) and self.max_patches < all_patches:
n_patches = self.max_patches
elif isinstance(self.max_patches, Integral) and self.max_patches >= all_patches:
n_patches = all_patches
elif isinstance(self.max_patches, Real) and 0 < self.max_patches < 1:
n_patches = int(self.max_patches * all_patches)
else:
raise ValueError('Invalid value for max_patches: %r' % self.max_patches)
else:
n_patches = all_patches
</DeepExtract>
patches_shape = (n_imgs * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
patches = np.empty(patches_shape)
for (ii, image) in enumerate(X):
<DeepExtract>
(i_h, i_w) = image.shape[:2]
(p_h, p_w) = patch_size
if p_h > i_h:
raise ValueError('Height of the patch should be less than the height of the image.')
if p_w > i_w:
raise ValueError('Width of the patch should be less than the width of the image.')
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = _extract_patches(image, patch_shape=(p_h, p_w, n_colors), extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
if self.max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
if patches.shape[-1] == 1:
patches[ii * n_patches:(ii + 1) * n_patches] = patches.reshape((n_patches, p_h, p_w))
else:
patches[ii * n_patches:(ii + 1) * n_patches] = patches
</DeepExtract>
return patches
|
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score):
is_multilabel = type_of_target(y_true).startswith('multilabel')
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
(n_samples, n_classes) = y_true_binarize.shape
label_measure = metric(y_true, y_pred, average=None)
assert_allclose(label_measure, [metric(y_true_binarize[:, i], y_pred_binarize[:, i]) for i in range(n_classes)])
micro_measure = metric(y_true, y_pred, average='micro')
assert_allclose(micro_measure, metric(y_true_binarize.ravel(), y_pred_binarize.ravel()))
macro_measure = metric(y_true, y_pred, average='macro')
assert_allclose(macro_measure, np.mean(label_measure))
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average='weighted')
assert_allclose(weighted_measure, np.average(label_measure, weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average='weighted')
assert_allclose(weighted_measure, 0)
if is_multilabel:
sample_measure = metric(y_true, y_pred, average='samples')
assert_allclose(sample_measure, np.mean([metric(y_true_binarize[i], y_pred_binarize[i]) for i in range(n_samples)]))
with pytest.raises(ValueError):
metric(y_true, y_pred, average='unknown')
with pytest.raises(ValueError):
metric(y_true, y_pred, average='garbage')
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
(n_samples, n_classes) = y_true_binarize.shape
label_measure = metric(y_true, y_score, average=None)
assert_allclose(label_measure, [metric(y_true_binarize[:, i], y_score[:, i]) for i in range(n_classes)])
micro_measure = metric(y_true, y_score, average='micro')
assert_allclose(micro_measure, metric(y_true_binarize.ravel(), y_score.ravel()))
macro_measure = metric(y_true, y_score, average='macro')
assert_allclose(macro_measure, np.mean(label_measure))
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_score, average='weighted')
assert_allclose(weighted_measure, np.average(label_measure, weights=weights))
else:
weighted_measure = metric(y_true, y_score, average='weighted')
assert_allclose(weighted_measure, 0)
if is_multilabel:
sample_measure = metric(y_true, y_score, average='samples')
assert_allclose(sample_measure, np.mean([metric(y_true_binarize[i], y_score[i]) for i in range(n_samples)]))
with pytest.raises(ValueError):
metric(y_true, y_score, average='unknown')
with pytest.raises(ValueError):
metric(y_true, y_score, average='garbage')
else:
raise ValueError('Metric is not recorded as having an average option')
|
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score):
is_multilabel = type_of_target(y_true).startswith('multilabel')
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
<DeepExtract>
(n_samples, n_classes) = y_true_binarize.shape
label_measure = metric(y_true, y_pred, average=None)
assert_allclose(label_measure, [metric(y_true_binarize[:, i], y_pred_binarize[:, i]) for i in range(n_classes)])
micro_measure = metric(y_true, y_pred, average='micro')
assert_allclose(micro_measure, metric(y_true_binarize.ravel(), y_pred_binarize.ravel()))
macro_measure = metric(y_true, y_pred, average='macro')
assert_allclose(macro_measure, np.mean(label_measure))
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average='weighted')
assert_allclose(weighted_measure, np.average(label_measure, weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average='weighted')
assert_allclose(weighted_measure, 0)
if is_multilabel:
sample_measure = metric(y_true, y_pred, average='samples')
assert_allclose(sample_measure, np.mean([metric(y_true_binarize[i], y_pred_binarize[i]) for i in range(n_samples)]))
with pytest.raises(ValueError):
metric(y_true, y_pred, average='unknown')
with pytest.raises(ValueError):
metric(y_true, y_pred, average='garbage')
</DeepExtract>
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
<DeepExtract>
(n_samples, n_classes) = y_true_binarize.shape
label_measure = metric(y_true, y_score, average=None)
assert_allclose(label_measure, [metric(y_true_binarize[:, i], y_score[:, i]) for i in range(n_classes)])
micro_measure = metric(y_true, y_score, average='micro')
assert_allclose(micro_measure, metric(y_true_binarize.ravel(), y_score.ravel()))
macro_measure = metric(y_true, y_score, average='macro')
assert_allclose(macro_measure, np.mean(label_measure))
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_score, average='weighted')
assert_allclose(weighted_measure, np.average(label_measure, weights=weights))
else:
weighted_measure = metric(y_true, y_score, average='weighted')
assert_allclose(weighted_measure, 0)
if is_multilabel:
sample_measure = metric(y_true, y_score, average='samples')
assert_allclose(sample_measure, np.mean([metric(y_true_binarize[i], y_score[i]) for i in range(n_samples)]))
with pytest.raises(ValueError):
metric(y_true, y_score, average='unknown')
with pytest.raises(ValueError):
metric(y_true, y_score, average='garbage')
</DeepExtract>
else:
raise ValueError('Metric is not recorded as having an average option')
|
def test_base_chain_fit_and_predict():
(X, y) = make_classification(n_samples=1000, n_features=100, n_classes=16, n_informative=10, random_state=0)
Y_multi = np.array([[int(yyy) for yyy in format(yy, '#06b')[2:]] for yy in y])
(X, Y) = (X, Y_multi)
chains = [RegressorChain(Ridge()), ClassifierChain(LogisticRegression())]
for chain in chains:
chain.fit(X, Y)
Y_pred = chain.predict(X)
assert Y_pred.shape == Y.shape
assert [c.coef_.size for c in chain.estimators_] == list(range(X.shape[1], X.shape[1] + Y.shape[1]))
Y_prob = chains[1].predict_proba(X)
Y_binary = Y_prob >= 0.5
assert_array_equal(Y_binary, Y_pred)
assert isinstance(chains[1], ClassifierMixin)
|
def test_base_chain_fit_and_predict():
<DeepExtract>
(X, y) = make_classification(n_samples=1000, n_features=100, n_classes=16, n_informative=10, random_state=0)
Y_multi = np.array([[int(yyy) for yyy in format(yy, '#06b')[2:]] for yy in y])
(X, Y) = (X, Y_multi)
</DeepExtract>
chains = [RegressorChain(Ridge()), ClassifierChain(LogisticRegression())]
for chain in chains:
chain.fit(X, Y)
Y_pred = chain.predict(X)
assert Y_pred.shape == Y.shape
assert [c.coef_.size for c in chain.estimators_] == list(range(X.shape[1], X.shape[1] + Y.shape[1]))
Y_prob = chains[1].predict_proba(X)
Y_binary = Y_prob >= 0.5
assert_array_equal(Y_binary, Y_pred)
assert isinstance(chains[1], ClassifierMixin)
|
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fitted)"""
if not hasattr(self, 'vocabulary_'):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for (i, t) in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = 'Duplicate term in vocabulary: %r' % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(vocabulary.values())
if len(indices) != len(vocabulary):
raise ValueError('Vocabulary contains repeated indices.')
for i in range(len(vocabulary)):
if i not in indices:
msg = "Vocabulary of size %d doesn't contain index %d." % (len(vocabulary), i)
raise ValueError(msg)
if not vocabulary:
raise ValueError('empty vocabulary passed to fit')
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
if not self.fixed_vocabulary_:
raise NotFittedError('Vocabulary not fitted or provided')
if len(self.vocabulary_) == 0:
raise ValueError('Vocabulary is empty')
|
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fitted)"""
if not hasattr(self, 'vocabulary_'):
<DeepExtract>
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for (i, t) in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = 'Duplicate term in vocabulary: %r' % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(vocabulary.values())
if len(indices) != len(vocabulary):
raise ValueError('Vocabulary contains repeated indices.')
for i in range(len(vocabulary)):
if i not in indices:
msg = "Vocabulary of size %d doesn't contain index %d." % (len(vocabulary), i)
raise ValueError(msg)
if not vocabulary:
raise ValueError('empty vocabulary passed to fit')
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
</DeepExtract>
if not self.fixed_vocabulary_:
raise NotFittedError('Vocabulary not fitted or provided')
if len(self.vocabulary_) == 0:
raise ValueError('Vocabulary is empty')
|
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None, min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
(person_names, file_paths) = ([], [])
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in sorted(listdir(folder_path))]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError('min_faces_per_person=%d is too restrictive' % min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
try:
from PIL import Image
except ImportError:
raise ImportError('The Python Imaging Library (PIL) is required to load data from jpeg files. Please refer to https://pillow.readthedocs.io/en/stable/installation.html for installing PIL.')
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple((s or ds for (s, ds) in zip(slice_, default_slice)))
(h_slice, w_slice) = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
for (i, file_path) in enumerate(file_paths):
if i % 1000 == 0:
logger.debug('Loading face #%05d / %05d', i + 1, n_faces)
pil_img = Image.open(file_path)
pil_img = pil_img.crop((w_slice.start, h_slice.start, w_slice.stop, h_slice.stop))
if resize is not None:
pil_img = pil_img.resize((w, h))
face = np.asarray(pil_img, dtype=np.float32)
if face.ndim == 0:
raise RuntimeError('Failed to read the image file %s, Please make sure that libjpeg is installed' % file_path)
face /= 255.0
if not color:
face = face.mean(axis=2)
faces[i, ...] = face
faces = faces
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
(faces, target) = (faces[indices], target[indices])
return (faces, target, target_names)
|
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None, min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
(person_names, file_paths) = ([], [])
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in sorted(listdir(folder_path))]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError('min_faces_per_person=%d is too restrictive' % min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
<DeepExtract>
try:
from PIL import Image
except ImportError:
raise ImportError('The Python Imaging Library (PIL) is required to load data from jpeg files. Please refer to https://pillow.readthedocs.io/en/stable/installation.html for installing PIL.')
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple((s or ds for (s, ds) in zip(slice_, default_slice)))
(h_slice, w_slice) = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
for (i, file_path) in enumerate(file_paths):
if i % 1000 == 0:
logger.debug('Loading face #%05d / %05d', i + 1, n_faces)
pil_img = Image.open(file_path)
pil_img = pil_img.crop((w_slice.start, h_slice.start, w_slice.stop, h_slice.stop))
if resize is not None:
pil_img = pil_img.resize((w, h))
face = np.asarray(pil_img, dtype=np.float32)
if face.ndim == 0:
raise RuntimeError('Failed to read the image file %s, Please make sure that libjpeg is installed' % file_path)
face /= 255.0
if not color:
face = face.mean(axis=2)
faces[i, ...] = face
faces = faces
</DeepExtract>
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
(faces, target) = (faces[indices], target[indices])
return (faces, target, target_names)
|
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is not needed to compute tfidf.
Returns
-------
self : object
Fitted vectorizer.
"""
self._validate_params()
if self.dtype not in FLOAT_DTYPES:
warnings.warn("Only {} 'dtype' should be used. {} 'dtype' will be converted to np.float64.".format(FLOAT_DTYPES, self.dtype), UserWarning)
if self.tokenizer is not None and self.token_pattern is not None:
warnings.warn("The parameter 'token_pattern' will not be used since 'tokenizer' is not None'")
if self.preprocessor is not None and callable(self.analyzer):
warnings.warn("The parameter 'preprocessor' will not be used since 'analyzer' is callable'")
if self.ngram_range != (1, 1) and self.ngram_range is not None and callable(self.analyzer):
warnings.warn("The parameter 'ngram_range' will not be used since 'analyzer' is callable'")
if self.analyzer != 'word' or callable(self.analyzer):
if self.stop_words is not None:
warnings.warn("The parameter 'stop_words' will not be used since 'analyzer' != 'word'")
if self.token_pattern is not None and self.token_pattern != '(?u)\\b\\w\\w+\\b':
warnings.warn("The parameter 'token_pattern' will not be used since 'analyzer' != 'word'")
if self.tokenizer is not None:
warnings.warn("The parameter 'tokenizer' will not be used since 'analyzer' != 'word'")
self._tfidf = TfidfTransformer(norm=self.norm, use_idf=self.use_idf, smooth_idf=self.smooth_idf, sublinear_tf=self.sublinear_tf)
X = super().fit_transform(raw_documents)
self._tfidf.fit(X)
return self
|
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is not needed to compute tfidf.
Returns
-------
self : object
Fitted vectorizer.
"""
self._validate_params()
<DeepExtract>
if self.dtype not in FLOAT_DTYPES:
warnings.warn("Only {} 'dtype' should be used. {} 'dtype' will be converted to np.float64.".format(FLOAT_DTYPES, self.dtype), UserWarning)
</DeepExtract>
<DeepExtract>
if self.tokenizer is not None and self.token_pattern is not None:
warnings.warn("The parameter 'token_pattern' will not be used since 'tokenizer' is not None'")
if self.preprocessor is not None and callable(self.analyzer):
warnings.warn("The parameter 'preprocessor' will not be used since 'analyzer' is callable'")
if self.ngram_range != (1, 1) and self.ngram_range is not None and callable(self.analyzer):
warnings.warn("The parameter 'ngram_range' will not be used since 'analyzer' is callable'")
if self.analyzer != 'word' or callable(self.analyzer):
if self.stop_words is not None:
warnings.warn("The parameter 'stop_words' will not be used since 'analyzer' != 'word'")
if self.token_pattern is not None and self.token_pattern != '(?u)\\b\\w\\w+\\b':
warnings.warn("The parameter 'token_pattern' will not be used since 'analyzer' != 'word'")
if self.tokenizer is not None:
warnings.warn("The parameter 'tokenizer' will not be used since 'analyzer' != 'word'")
</DeepExtract>
self._tfidf = TfidfTransformer(norm=self.norm, use_idf=self.use_idf, smooth_idf=self.smooth_idf, sublinear_tf=self.sublinear_tf)
X = super().fit_transform(raw_documents)
self._tfidf.fit(X)
return self
|
def test_path_parameters():
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=0.001, max_iter=max_iter, l1_ratio=0.5, tol=0.001)
clf.fit(X, y)
assert_almost_equal(0.5, clf.l1_ratio)
assert 50 == clf.n_alphas
assert 50 == len(clf.alphas_)
|
def test_path_parameters():
<DeepExtract>
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
</DeepExtract>
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=0.001, max_iter=max_iter, l1_ratio=0.5, tol=0.001)
clf.fit(X, y)
assert_almost_equal(0.5, clf.l1_ratio)
assert 50 == clf.n_alphas
assert 50 == len(clf.alphas_)
|
@pytest.mark.parametrize('gzip_response', [True, False])
@pytest.mark.parametrize('data_id, params, err_type, err_msg', [(40675, {'name': 'glass2'}, ValueError, 'No active dataset glass2 found'), (61, {'data_id': 61, 'target_column': ['sepalwidth', 'class']}, ValueError, 'Can only handle homogeneous multi-target datasets'), (40945, {'data_id': 40945, 'as_frame': False}, ValueError, 'STRING attributes are not supported for array representation. Try as_frame=True'), (2, {'data_id': 2, 'target_column': 'family', 'as_frame': True}, ValueError, "Target column 'family'"), (2, {'data_id': 2, 'target_column': 'family', 'as_frame': False}, ValueError, "Target column 'family'"), (61, {'data_id': 61, 'target_column': 'undefined'}, KeyError, "Could not find target_column='undefined'"), (61, {'data_id': 61, 'target_column': ['undefined', 'class']}, KeyError, "Could not find target_column='undefined'")])
@pytest.mark.parametrize('parser', ['liac-arff', 'pandas'])
def test_fetch_openml_error(monkeypatch, gzip_response, data_id, params, err_type, err_msg, parser):
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and gzip_response:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
if params.get('as_frame', True) or parser == 'pandas':
pytest.importorskip('pandas')
with pytest.raises(err_type, match=err_msg):
fetch_openml(cache=False, parser=parser, **params)
|
@pytest.mark.parametrize('gzip_response', [True, False])
@pytest.mark.parametrize('data_id, params, err_type, err_msg', [(40675, {'name': 'glass2'}, ValueError, 'No active dataset glass2 found'), (61, {'data_id': 61, 'target_column': ['sepalwidth', 'class']}, ValueError, 'Can only handle homogeneous multi-target datasets'), (40945, {'data_id': 40945, 'as_frame': False}, ValueError, 'STRING attributes are not supported for array representation. Try as_frame=True'), (2, {'data_id': 2, 'target_column': 'family', 'as_frame': True}, ValueError, "Target column 'family'"), (2, {'data_id': 2, 'target_column': 'family', 'as_frame': False}, ValueError, "Target column 'family'"), (61, {'data_id': 61, 'target_column': 'undefined'}, KeyError, "Could not find target_column='undefined'"), (61, {'data_id': 61, 'target_column': ['undefined', 'class']}, KeyError, "Could not find target_column='undefined'")])
@pytest.mark.parametrize('parser', ['liac-arff', 'pandas'])
def test_fetch_openml_error(monkeypatch, gzip_response, data_id, params, err_type, err_msg, parser):
<DeepExtract>
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and gzip_response:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
</DeepExtract>
if params.get('as_frame', True) or parser == 'pandas':
pytest.importorskip('pandas')
with pytest.raises(err_type, match=err_msg):
fetch_openml(cache=False, parser=parser, **params)
|
def radius_neighbors_graph(X, radius, *, mode='connectivity', metric='minkowski', p=2, metric_params=None, include_self=False, n_jobs=None):
"""Compute the (weighted) graph of Neighbors for points in X.
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like of shape (n_samples, n_features) or BallTree
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : str, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
p : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
include_self : bool or 'auto', default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If 'auto', then True is used for mode='connectivity' and False
for mode='distance'.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse matrix of shape (n_samples, n_samples)
Graph where A[i, j] is assigned the weight of edge that connects
i to j. The matrix is of CSR format.
See Also
--------
kneighbors_graph: Compute the weighted graph of k-neighbors for points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5, mode='connectivity',
... include_self=True)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 0.],
[1., 0., 1.]])
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p, metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
params = zip(['metric', 'p', 'metric_params'], [metric, p, metric_params])
est_params = X.get_params()
for (param_name, func_param) in params:
if func_param != est_params[param_name]:
raise ValueError('Got %s for %s, while the estimator has %s for the same parameter.' % (func_param, param_name, est_params[param_name]))
if include_self == 'auto':
include_self = mode == 'connectivity'
if not include_self:
X._fit_X = None
query = X._fit_X
return X.radius_neighbors_graph(query, radius, mode)
|
def radius_neighbors_graph(X, radius, *, mode='connectivity', metric='minkowski', p=2, metric_params=None, include_self=False, n_jobs=None):
"""Compute the (weighted) graph of Neighbors for points in X.
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like of shape (n_samples, n_features) or BallTree
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : str, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
p : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
include_self : bool or 'auto', default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If 'auto', then True is used for mode='connectivity' and False
for mode='distance'.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse matrix of shape (n_samples, n_samples)
Graph where A[i, j] is assigned the weight of edge that connects
i to j. The matrix is of CSR format.
See Also
--------
kneighbors_graph: Compute the weighted graph of k-neighbors for points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5, mode='connectivity',
... include_self=True)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 0.],
[1., 0., 1.]])
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p, metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
<DeepExtract>
params = zip(['metric', 'p', 'metric_params'], [metric, p, metric_params])
est_params = X.get_params()
for (param_name, func_param) in params:
if func_param != est_params[param_name]:
raise ValueError('Got %s for %s, while the estimator has %s for the same parameter.' % (func_param, param_name, est_params[param_name]))
</DeepExtract>
<DeepExtract>
if include_self == 'auto':
include_self = mode == 'connectivity'
if not include_self:
X._fit_X = None
query = X._fit_X
</DeepExtract>
return X.radius_neighbors_graph(query, radius, mode)
|
@property
def powers_(self):
"""Exponent for each of the inputs in the output."""
check_is_fitted(self)
comb = combinations if self.interaction_only else combinations_w_r
start = max(1, self._min_degree)
iter = chain.from_iterable((comb(range(self.n_features_in_), i) for i in range(start, self._max_degree + 1)))
if self.include_bias:
iter = chain(comb(range(self.n_features_in_), 0), iter)
combinations = iter
return np.vstack([np.bincount(c, minlength=self.n_features_in_) for c in combinations])
|
@property
def powers_(self):
"""Exponent for each of the inputs in the output."""
check_is_fitted(self)
<DeepExtract>
comb = combinations if self.interaction_only else combinations_w_r
start = max(1, self._min_degree)
iter = chain.from_iterable((comb(range(self.n_features_in_), i) for i in range(start, self._max_degree + 1)))
if self.include_bias:
iter = chain(comb(range(self.n_features_in_), 0), iter)
combinations = iter
</DeepExtract>
return np.vstack([np.bincount(c, minlength=self.n_features_in_) for c in combinations])
|
def test_too_many_samples_to_find_a_safe_embedding():
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix((rng.randn(1000), (rng.randint(1000, size=1000), rng.randint(100, size=1000))), shape=(1000, 100))
(data, _) = (data_coo.toarray(), data_coo.tocsr())
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = 'eps=0.100000 and n_samples=1000 lead to a target dimension of 5920 which is larger than the original space with n_features=100'
with pytest.raises(ValueError, match=expected_msg):
rp.fit(data)
|
def test_too_many_samples_to_find_a_safe_embedding():
<DeepExtract>
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix((rng.randn(1000), (rng.randint(1000, size=1000), rng.randint(100, size=1000))), shape=(1000, 100))
(data, _) = (data_coo.toarray(), data_coo.tocsr())
</DeepExtract>
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = 'eps=0.100000 and n_samples=1000 lead to a target dimension of 5920 which is larger than the original space with n_features=100'
with pytest.raises(ValueError, match=expected_msg):
rp.fit(data)
|
def pairwise_kernels(X, Y=None, metric='linear', *, filter_params=False, n_jobs=None, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are:
['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf',
'laplacian', 'sigmoid', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_features)
Array of pairwise kernels between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
A second feature array only if X has shape (n_samples_X, n_features).
metric : str or callable, default="linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two rows from X as input and return the corresponding
kernel value as a single number. This means that callables from
:mod:`sklearn.metrics.pairwise` are not allowed, as they operate on
matrices, not single samples. Use the string identifying the kernel
instead.
filter_params : bool, default=False
Whether to filter invalid parameters or not.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_samples_Y)
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == 'precomputed':
(X, Y, dtype_float) = _return_float_dtype(X, Y)
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
else:
X = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
if True:
if X.shape[1] != Y.shape[0]:
raise ValueError('Precomputed metric requires shape (n_queries, n_indexed). Got (%d, %d) for %d indexed.' % (X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError('Incompatible dimension for X and Y matrices: X.shape[1] == %d while Y.shape[1] == %d' % (X.shape[1], Y.shape[1]))
(X, _) = (X, Y)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = {k: kwds[k] for k in kwds if k in KERNEL_PARAMS[metric]}
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError('Unknown kernel %r' % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
def pairwise_kernels(X, Y=None, metric='linear', *, filter_params=False, n_jobs=None, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are:
['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf',
'laplacian', 'sigmoid', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_features)
Array of pairwise kernels between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
A second feature array only if X has shape (n_samples_X, n_features).
metric : str or callable, default="linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two rows from X as input and return the corresponding
kernel value as a single number. This means that callables from
:mod:`sklearn.metrics.pairwise` are not allowed, as they operate on
matrices, not single samples. Use the string identifying the kernel
instead.
filter_params : bool, default=False
Whether to filter invalid parameters or not.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_samples_Y)
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == 'precomputed':
<DeepExtract>
(X, Y, dtype_float) = _return_float_dtype(X, Y)
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
else:
X = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
if True:
if X.shape[1] != Y.shape[0]:
raise ValueError('Precomputed metric requires shape (n_queries, n_indexed). Got (%d, %d) for %d indexed.' % (X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError('Incompatible dimension for X and Y matrices: X.shape[1] == %d while Y.shape[1] == %d' % (X.shape[1], Y.shape[1]))
(X, _) = (X, Y)
</DeepExtract>
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = {k: kwds[k] for k in kwds if k in KERNEL_PARAMS[metric]}
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError('Unknown kernel %r' % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
def test_hidden_constraint():
"""Check that internal constraints are not exposed in the error message."""
@validate_params({'param': [Hidden(list), dict]})
def f(param):
pass
pass
pass
with pytest.raises(InvalidParameterError, match="The 'param' parameter") as exc_info:
pass
err_msg = str(exc_info.value)
assert "an instance of 'dict'" in err_msg
assert "an instance of 'list'" not in err_msg
|
def test_hidden_constraint():
"""Check that internal constraints are not exposed in the error message."""
@validate_params({'param': [Hidden(list), dict]})
def f(param):
pass
<DeepExtract>
pass
</DeepExtract>
<DeepExtract>
pass
</DeepExtract>
with pytest.raises(InvalidParameterError, match="The 'param' parameter") as exc_info:
<DeepExtract>
pass
</DeepExtract>
err_msg = str(exc_info.value)
assert "an instance of 'dict'" in err_msg
assert "an instance of 'list'" not in err_msg
|
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will
be converted to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array-like of shape (n_samples, n_estimators, n_classes)
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
In the case of binary classification n_classes is 1.
"""
check_is_fitted(self)
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
(n_estimators, n_classes) = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
|
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will
be converted to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array-like of shape (n_samples, n_estimators, n_classes)
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
In the case of binary classification n_classes is 1.
"""
<DeepExtract>
check_is_fitted(self)
</DeepExtract>
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
(n_estimators, n_classes) = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
|
def _compute_score_samples(X, subsample_features):
"""
Compute the score of each samples in X going through the extra trees.
Parameters
----------
X : array-like or sparse matrix
Data matrix.
subsample_features : bool
Whether features should be subsampled.
"""
n_samples = X.shape[0]
depths = np.zeros(n_samples, order='f')
[self._max_samples] = check_array([self._max_samples], ensure_2d=False)
n_samples_leaf_shape = [self._max_samples].shape
[self._max_samples] = [self._max_samples].reshape((1, -1))
average_path_length = np.zeros([self._max_samples].shape)
mask_1 = [self._max_samples] <= 1
mask_2 = [self._max_samples] == 2
not_mask = ~np.logical_or(mask_1, mask_2)
average_path_length[mask_1] = 0.0
average_path_length[mask_2] = 1.0
average_path_length[not_mask] = 2.0 * (np.log([self._max_samples][not_mask] - 1.0) + np.euler_gamma) - 2.0 * ([self._max_samples][not_mask] - 1.0) / [self._max_samples][not_mask]
average_path_length_max_samples = average_path_length.reshape(n_samples_leaf_shape)
for (tree_idx, (tree, features)) in enumerate(zip(self.estimators_, self.estimators_features_)):
X_subset = X[:, features] if subsample_features else X
leaves_index = tree.apply(X_subset, check_input=False)
depths += self._decision_path_lengths[tree_idx][leaves_index] + self._average_path_length_per_tree[tree_idx][leaves_index] - 1.0
denominator = len(self.estimators_) * average_path_length_max_samples
scores = 2 ** (-np.divide(depths, denominator, out=np.ones_like(depths), where=denominator != 0))
return scores
|
def _compute_score_samples(X, subsample_features):
"""
Compute the score of each samples in X going through the extra trees.
Parameters
----------
X : array-like or sparse matrix
Data matrix.
subsample_features : bool
Whether features should be subsampled.
"""
n_samples = X.shape[0]
depths = np.zeros(n_samples, order='f')
<DeepExtract>
[self._max_samples] = check_array([self._max_samples], ensure_2d=False)
n_samples_leaf_shape = [self._max_samples].shape
[self._max_samples] = [self._max_samples].reshape((1, -1))
average_path_length = np.zeros([self._max_samples].shape)
mask_1 = [self._max_samples] <= 1
mask_2 = [self._max_samples] == 2
not_mask = ~np.logical_or(mask_1, mask_2)
average_path_length[mask_1] = 0.0
average_path_length[mask_2] = 1.0
average_path_length[not_mask] = 2.0 * (np.log([self._max_samples][not_mask] - 1.0) + np.euler_gamma) - 2.0 * ([self._max_samples][not_mask] - 1.0) / [self._max_samples][not_mask]
average_path_length_max_samples = average_path_length.reshape(n_samples_leaf_shape)
</DeepExtract>
for (tree_idx, (tree, features)) in enumerate(zip(self.estimators_, self.estimators_features_)):
X_subset = X[:, features] if subsample_features else X
leaves_index = tree.apply(X_subset, check_input=False)
depths += self._decision_path_lengths[tree_idx][leaves_index] + self._average_path_length_per_tree[tree_idx][leaves_index] - 1.0
denominator = len(self.estimators_) * average_path_length_max_samples
scores = 2 ** (-np.divide(depths, denominator, out=np.ones_like(depths), where=denominator != 0))
return scores
|
def decision_function(self, X):
"""Confidence score.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data.
Returns
-------
decision : ndarray of shape (n_samples,) if n_classes == 2 else (n_samples, n_classes)
Confidence score.
"""
if self.methods_to_check == 'all' or 'decision_function' in self.methods_to_check:
if should_be_fitted:
check_is_fitted(self)
if self.check_X is not None:
params = {} if self.check_X_params is None else self.check_X_params
checked_X = self.check_X(X, **params)
if isinstance(checked_X, (bool, np.bool_)):
assert checked_X
else:
X = checked_X
if y is not None and self.check_y is not None:
params = {} if self.check_y_params is None else self.check_y_params
checked_y = self.check_y(y, **params)
if isinstance(checked_y, (bool, np.bool_)):
assert checked_y
else:
y = checked_y
(X, y) = (X, y)
if len(self.classes_) == 2:
return np.zeros(_num_samples(X))
else:
decision = np.zeros((_num_samples(X), len(self.classes_)))
decision[:, 0] = 1
return decision
|
def decision_function(self, X):
"""Confidence score.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data.
Returns
-------
decision : ndarray of shape (n_samples,) if n_classes == 2 else (n_samples, n_classes)
Confidence score.
"""
if self.methods_to_check == 'all' or 'decision_function' in self.methods_to_check:
<DeepExtract>
if should_be_fitted:
check_is_fitted(self)
if self.check_X is not None:
params = {} if self.check_X_params is None else self.check_X_params
checked_X = self.check_X(X, **params)
if isinstance(checked_X, (bool, np.bool_)):
assert checked_X
else:
X = checked_X
if y is not None and self.check_y is not None:
params = {} if self.check_y_params is None else self.check_y_params
checked_y = self.check_y(y, **params)
if isinstance(checked_y, (bool, np.bool_)):
assert checked_y
else:
y = checked_y
(X, y) = (X, y)
</DeepExtract>
if len(self.classes_) == 2:
return np.zeros(_num_samples(X))
else:
decision = np.zeros((_num_samples(X), len(self.classes_)))
decision[:, 0] = 1
return decision
|
def gradient(self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1, raw_prediction=None):
"""Computes the gradient w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
raw_prediction : C-contiguous array of shape (n_samples,) or array of shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
"""
(n_features, n_classes) = (X.shape[1], self.base_loss.n_classes)
n_dof = n_features + int(self.fit_intercept)
if raw_prediction is None:
(weights, intercept) = self.weight_intercept(coef)
if not self.base_loss.is_multiclass:
raw_prediction = X @ weights + intercept
else:
raw_prediction = X @ weights.T + intercept
(weights, intercept, raw_prediction) = (weights, intercept, raw_prediction)
else:
if not self.base_loss.is_multiclass:
if self.fit_intercept:
intercept = coef[-1]
weights = coef[:-1]
else:
intercept = 0.0
weights = coef
else:
if coef.ndim == 1:
weights = coef.reshape((self.base_loss.n_classes, -1), order='F')
else:
weights = coef
if self.fit_intercept:
intercept = weights[:, -1]
weights = weights[:, :-1]
else:
intercept = 0.0
(weights, intercept) = (weights, intercept)
grad_pointwise = self.base_loss.gradient(y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=n_threads)
if not self.base_loss.is_multiclass:
grad = np.empty_like(coef, dtype=weights.dtype)
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
return grad
else:
grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order='F')
grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
if self.fit_intercept:
grad[:, -1] = grad_pointwise.sum(axis=0)
if coef.ndim == 1:
return grad.ravel(order='F')
else:
return grad
|
def gradient(self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1, raw_prediction=None):
"""Computes the gradient w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
raw_prediction : C-contiguous array of shape (n_samples,) or array of shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
"""
(n_features, n_classes) = (X.shape[1], self.base_loss.n_classes)
n_dof = n_features + int(self.fit_intercept)
if raw_prediction is None:
<DeepExtract>
(weights, intercept) = self.weight_intercept(coef)
if not self.base_loss.is_multiclass:
raw_prediction = X @ weights + intercept
else:
raw_prediction = X @ weights.T + intercept
(weights, intercept, raw_prediction) = (weights, intercept, raw_prediction)
</DeepExtract>
else:
<DeepExtract>
if not self.base_loss.is_multiclass:
if self.fit_intercept:
intercept = coef[-1]
weights = coef[:-1]
else:
intercept = 0.0
weights = coef
else:
if coef.ndim == 1:
weights = coef.reshape((self.base_loss.n_classes, -1), order='F')
else:
weights = coef
if self.fit_intercept:
intercept = weights[:, -1]
weights = weights[:, :-1]
else:
intercept = 0.0
(weights, intercept) = (weights, intercept)
</DeepExtract>
grad_pointwise = self.base_loss.gradient(y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=n_threads)
if not self.base_loss.is_multiclass:
grad = np.empty_like(coef, dtype=weights.dtype)
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
return grad
else:
grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order='F')
grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
if self.fit_intercept:
grad[:, -1] = grad_pointwise.sum(axis=0)
if coef.ndim == 1:
return grad.ravel(order='F')
else:
return grad
|
def fit(self, X, y=None):
"""Create a biclustering for X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
SpectralBiclustering instance.
"""
self._validate_params()
X = self._validate_data(X, accept_sparse='csr', dtype=np.float64)
(normalized_data, row_diag, col_diag) = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
(u, v) = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u, col_diag[:, np.newaxis] * v))
(_, labels) = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack([self.row_labels_ == c for c in range(self.n_clusters)])
self.columns_ = np.vstack([self.column_labels_ == c for c in range(self.n_clusters)])
return self
|
def fit(self, X, y=None):
"""Create a biclustering for X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
SpectralBiclustering instance.
"""
self._validate_params()
X = self._validate_data(X, accept_sparse='csr', dtype=np.float64)
<DeepExtract>
</DeepExtract>
<DeepExtract>
(normalized_data, row_diag, col_diag) = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
(u, v) = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u, col_diag[:, np.newaxis] * v))
(_, labels) = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack([self.row_labels_ == c for c in range(self.n_clusters)])
self.columns_ = np.vstack([self.column_labels_ == c for c in range(self.n_clusters)])
</DeepExtract>
return self
|
def _initialize(self, X, resp):
"""Initialization of the Gaussian mixture parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
resp : array-like of shape (n_samples, n_components)
"""
(n_samples, _) = X.shape
nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
means = np.dot(resp.T, X) / nk[:, np.newaxis]
covariances = {'full': _estimate_gaussian_covariances_full, 'tied': _estimate_gaussian_covariances_tied, 'diag': _estimate_gaussian_covariances_diag, 'spherical': _estimate_gaussian_covariances_spherical}[self.covariance_type](resp, X, nk, means, self.reg_covar)
(weights, means, covariances) = (nk, means, covariances)
weights /= n_samples
self.weights_ = weights if self.weights_init is None else self.weights_init
self.means_ = means if self.means_init is None else self.means_init
if self.precisions_init is None:
self.covariances_ = covariances
estimate_precision_error_message = 'Fitting the mixture model failed because some components have ill-defined empirical covariance (for instance caused by singleton or collapsed samples). Try to decrease the number of components, or increase reg_covar.'
if self.covariance_type == 'full':
(n_components, n_features, _) = covariances.shape
precisions_chol = np.empty((n_components, n_features, n_features))
for (k, covariance) in enumerate(covariances):
try:
cov_chol = linalg.cholesky(covariance, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[k] = linalg.solve_triangular(cov_chol, np.eye(n_features), lower=True).T
elif self.covariance_type == 'tied':
(_, n_features) = covariances.shape
try:
cov_chol = linalg.cholesky(covariances, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol = linalg.solve_triangular(cov_chol, np.eye(n_features), lower=True).T
else:
if np.any(np.less_equal(covariances, 0.0)):
raise ValueError(estimate_precision_error_message)
precisions_chol = 1.0 / np.sqrt(covariances)
self.precisions_cholesky_ = precisions_chol
elif self.covariance_type == 'full':
self.precisions_cholesky_ = np.array([linalg.cholesky(prec_init, lower=True) for prec_init in self.precisions_init])
elif self.covariance_type == 'tied':
self.precisions_cholesky_ = linalg.cholesky(self.precisions_init, lower=True)
else:
self.precisions_cholesky_ = np.sqrt(self.precisions_init)
|
def _initialize(self, X, resp):
"""Initialization of the Gaussian mixture parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
resp : array-like of shape (n_samples, n_components)
"""
(n_samples, _) = X.shape
<DeepExtract>
nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
means = np.dot(resp.T, X) / nk[:, np.newaxis]
covariances = {'full': _estimate_gaussian_covariances_full, 'tied': _estimate_gaussian_covariances_tied, 'diag': _estimate_gaussian_covariances_diag, 'spherical': _estimate_gaussian_covariances_spherical}[self.covariance_type](resp, X, nk, means, self.reg_covar)
(weights, means, covariances) = (nk, means, covariances)
</DeepExtract>
weights /= n_samples
self.weights_ = weights if self.weights_init is None else self.weights_init
self.means_ = means if self.means_init is None else self.means_init
if self.precisions_init is None:
self.covariances_ = covariances
<DeepExtract>
estimate_precision_error_message = 'Fitting the mixture model failed because some components have ill-defined empirical covariance (for instance caused by singleton or collapsed samples). Try to decrease the number of components, or increase reg_covar.'
if self.covariance_type == 'full':
(n_components, n_features, _) = covariances.shape
precisions_chol = np.empty((n_components, n_features, n_features))
for (k, covariance) in enumerate(covariances):
try:
cov_chol = linalg.cholesky(covariance, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[k] = linalg.solve_triangular(cov_chol, np.eye(n_features), lower=True).T
elif self.covariance_type == 'tied':
(_, n_features) = covariances.shape
try:
cov_chol = linalg.cholesky(covariances, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol = linalg.solve_triangular(cov_chol, np.eye(n_features), lower=True).T
else:
if np.any(np.less_equal(covariances, 0.0)):
raise ValueError(estimate_precision_error_message)
precisions_chol = 1.0 / np.sqrt(covariances)
self.precisions_cholesky_ = precisions_chol
</DeepExtract>
elif self.covariance_type == 'full':
self.precisions_cholesky_ = np.array([linalg.cholesky(prec_init, lower=True) for prec_init in self.precisions_init])
elif self.covariance_type == 'tied':
self.precisions_cholesky_ = linalg.cholesky(self.precisions_init, lower=True)
else:
self.precisions_cholesky_ = np.sqrt(self.precisions_init)
|
@pytest.mark.filterwarnings('ignore:The max_iter was reached')
def test_binary_classifier_class_weight():
"""tests binary classifier with classweights for each class"""
alpha = 0.1
n_samples = 50
n_iter = 20
tol = 1e-05
fit_intercept = True
(X, y) = make_blobs(n_samples=n_samples, centers=2, random_state=10, cluster_std=0.1)
if True:
step_size = 4.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + 4.0 * alpha)
else:
step_size = 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: 0.45, -1: 0.55}
clf1 = LogisticRegression(solver='sag', C=1.0 / alpha / n_samples, max_iter=n_iter, tol=tol, random_state=77, fit_intercept=fit_intercept, multi_class='ovr', class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, classes=np.unique(y), y=y)
sample_weight = class_weight_[le.fit_transform(y)]
if step_size * alpha == 1.0:
raise ZeroDivisionError('Sparse sag does not handle the case step_size * alpha == 1')
(n_samples, n_features) = (X.shape[0], X.shape[1])
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=int)
gradient_memory = np.zeros(n_samples)
rng = check_random_state(random_state)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
if sparse:
decay = 0.01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter - 1] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
last_updated[j] = counter
p = wscale * np.dot(entry, weights) + intercept
gradient = log_dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
gradient_correction = update - gradient_memory[idx] * entry
sum_gradient += gradient_correction
if saga:
for j in range(n_features):
weights[j] -= gradient_correction[j] * step_size * (1 - 1.0 / len(seen)) / wscale
if fit_intercept:
gradient_correction = gradient - gradient_memory[idx]
intercept_sum_gradient += gradient_correction
gradient_correction *= step_size * (1.0 - 1.0 / len(seen))
if saga:
intercept -= step_size * intercept_sum_gradient / len(seen) * decay + gradient_correction
else:
intercept -= step_size * intercept_sum_gradient / len(seen) * decay
gradient_memory[idx] = gradient
wscale *= 1.0 - alpha * step_size
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = c_sum[counter - 1] + step_size / (wscale * len(seen))
if counter >= 1 and wscale < 1e-09:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter - 1] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
weights *= wscale
(spweights, spintercept) = (weights, intercept)
if step_size * alpha == 1.0:
raise ZeroDivisionError('Sparse sag does not handle the case step_size * alpha == 1')
(n_samples, n_features) = (X.shape[0], X.shape[1])
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=int)
gradient_memory = np.zeros(n_samples)
rng = check_random_state(random_state)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
if True:
decay = 0.01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter - 1] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
last_updated[j] = counter
p = wscale * np.dot(entry, weights) + intercept
gradient = log_dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
gradient_correction = update - gradient_memory[idx] * entry
sum_gradient += gradient_correction
if saga:
for j in range(n_features):
weights[j] -= gradient_correction[j] * step_size * (1 - 1.0 / len(seen)) / wscale
if fit_intercept:
gradient_correction = gradient - gradient_memory[idx]
intercept_sum_gradient += gradient_correction
gradient_correction *= step_size * (1.0 - 1.0 / len(seen))
if saga:
intercept -= step_size * intercept_sum_gradient / len(seen) * decay + gradient_correction
else:
intercept -= step_size * intercept_sum_gradient / len(seen) * decay
gradient_memory[idx] = gradient
wscale *= 1.0 - alpha * step_size
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = c_sum[counter - 1] + step_size / (wscale * len(seen))
if counter >= 1 and wscale < 1e-09:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter - 1] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
weights *= wscale
(spweights2, spintercept2) = (weights, intercept)
assert_array_almost_equal(clf1.coef_.ravel(), spweights.ravel(), decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(), spweights2.ravel(), decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
|
@pytest.mark.filterwarnings('ignore:The max_iter was reached')
def test_binary_classifier_class_weight():
"""tests binary classifier with classweights for each class"""
alpha = 0.1
n_samples = 50
n_iter = 20
tol = 1e-05
fit_intercept = True
(X, y) = make_blobs(n_samples=n_samples, centers=2, random_state=10, cluster_std=0.1)
<DeepExtract>
if True:
step_size = 4.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + 4.0 * alpha)
else:
step_size = 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
</DeepExtract>
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: 0.45, -1: 0.55}
clf1 = LogisticRegression(solver='sag', C=1.0 / alpha / n_samples, max_iter=n_iter, tol=tol, random_state=77, fit_intercept=fit_intercept, multi_class='ovr', class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, classes=np.unique(y), y=y)
sample_weight = class_weight_[le.fit_transform(y)]
<DeepExtract>
if step_size * alpha == 1.0:
raise ZeroDivisionError('Sparse sag does not handle the case step_size * alpha == 1')
(n_samples, n_features) = (X.shape[0], X.shape[1])
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=int)
gradient_memory = np.zeros(n_samples)
rng = check_random_state(random_state)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
if sparse:
decay = 0.01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter - 1] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
last_updated[j] = counter
p = wscale * np.dot(entry, weights) + intercept
gradient = log_dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
gradient_correction = update - gradient_memory[idx] * entry
sum_gradient += gradient_correction
if saga:
for j in range(n_features):
weights[j] -= gradient_correction[j] * step_size * (1 - 1.0 / len(seen)) / wscale
if fit_intercept:
gradient_correction = gradient - gradient_memory[idx]
intercept_sum_gradient += gradient_correction
gradient_correction *= step_size * (1.0 - 1.0 / len(seen))
if saga:
intercept -= step_size * intercept_sum_gradient / len(seen) * decay + gradient_correction
else:
intercept -= step_size * intercept_sum_gradient / len(seen) * decay
gradient_memory[idx] = gradient
wscale *= 1.0 - alpha * step_size
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = c_sum[counter - 1] + step_size / (wscale * len(seen))
if counter >= 1 and wscale < 1e-09:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter - 1] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
weights *= wscale
(spweights, spintercept) = (weights, intercept)
</DeepExtract>
<DeepExtract>
if step_size * alpha == 1.0:
raise ZeroDivisionError('Sparse sag does not handle the case step_size * alpha == 1')
(n_samples, n_features) = (X.shape[0], X.shape[1])
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=int)
gradient_memory = np.zeros(n_samples)
rng = check_random_state(random_state)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
if True:
decay = 0.01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter - 1] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
last_updated[j] = counter
p = wscale * np.dot(entry, weights) + intercept
gradient = log_dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
gradient_correction = update - gradient_memory[idx] * entry
sum_gradient += gradient_correction
if saga:
for j in range(n_features):
weights[j] -= gradient_correction[j] * step_size * (1 - 1.0 / len(seen)) / wscale
if fit_intercept:
gradient_correction = gradient - gradient_memory[idx]
intercept_sum_gradient += gradient_correction
gradient_correction *= step_size * (1.0 - 1.0 / len(seen))
if saga:
intercept -= step_size * intercept_sum_gradient / len(seen) * decay + gradient_correction
else:
intercept -= step_size * intercept_sum_gradient / len(seen) * decay
gradient_memory[idx] = gradient
wscale *= 1.0 - alpha * step_size
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = c_sum[counter - 1] + step_size / (wscale * len(seen))
if counter >= 1 and wscale < 1e-09:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter - 1] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
weights *= wscale
(spweights2, spintercept2) = (weights, intercept)
</DeepExtract>
assert_array_almost_equal(clf1.coef_.ravel(), spweights.ravel(), decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(), spweights2.ravel(), decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
|
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for, zero_division='warn'):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements equal to
0, 1 or np.nan (according to ``zero_division``). Plus, if
``zero_division != "warn"`` raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
mask = denominator == 0.0
denominator = denominator.copy()
denominator[mask] = 1
result = numerator / denominator
if not np.any(mask):
return result
if isinstance(zero_division, str) and zero_division == 'warn':
zero_division_value = np.float64(0.0)
elif isinstance(zero_division, (int, float)) and zero_division in [0, 1]:
zero_division_value = np.float64(zero_division)
else:
zero_division_value = np.nan
result[mask] = zero_division_value
if zero_division != 'warn' or metric not in warn_for:
return result
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
(axis0, axis1) = ('sample', 'label')
if average == 'samples':
(axis0, axis1) = (axis1, axis0)
msg = '{0} ill-defined and being set to 0.0 {{0}} no {1} {2}s. Use `zero_division` parameter to control this behavior.'.format(msg_start, modifier, axis0)
if len(result) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
|
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for, zero_division='warn'):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements equal to
0, 1 or np.nan (according to ``zero_division``). Plus, if
``zero_division != "warn"`` raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
mask = denominator == 0.0
denominator = denominator.copy()
denominator[mask] = 1
result = numerator / denominator
if not np.any(mask):
return result
<DeepExtract>
if isinstance(zero_division, str) and zero_division == 'warn':
zero_division_value = np.float64(0.0)
elif isinstance(zero_division, (int, float)) and zero_division in [0, 1]:
zero_division_value = np.float64(zero_division)
else:
zero_division_value = np.nan
</DeepExtract>
result[mask] = zero_division_value
if zero_division != 'warn' or metric not in warn_for:
return result
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
<DeepExtract>
(axis0, axis1) = ('sample', 'label')
if average == 'samples':
(axis0, axis1) = (axis1, axis0)
msg = '{0} ill-defined and being set to 0.0 {{0}} no {1} {2}s. Use `zero_division` parameter to control this behavior.'.format(msg_start, modifier, axis0)
if len(result) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
</DeepExtract>
return result
|
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Returns the instance itself.
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs).
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors, return_distance=False)
X = check_array(X, dtype=FLOAT_DTYPES)
self.nbrs_._fit_X = check_array(self.nbrs_._fit_X, dtype=FLOAT_DTYPES)
ind = check_array(ind, dtype=int)
(n_samples, n_neighbors) = ind.shape
assert X.shape[0] == n_samples
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
for (i, ind) in enumerate(ind):
A = self.nbrs_._fit_X[ind]
C = A - X[i]
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = self.reg * trace
else:
R = self.reg
G.flat[::n_neighbors + 1] += R
w = solve(G, v, assume_a='pos')
B[i, :] = w / np.sum(w)
weights = B
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
|
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Returns the instance itself.
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs).
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors, return_distance=False)
<DeepExtract>
X = check_array(X, dtype=FLOAT_DTYPES)
self.nbrs_._fit_X = check_array(self.nbrs_._fit_X, dtype=FLOAT_DTYPES)
ind = check_array(ind, dtype=int)
(n_samples, n_neighbors) = ind.shape
assert X.shape[0] == n_samples
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
for (i, ind) in enumerate(ind):
A = self.nbrs_._fit_X[ind]
C = A - X[i]
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = self.reg * trace
else:
R = self.reg
G.flat[::n_neighbors + 1] += R
w = solve(G, v, assume_a='pos')
B[i, :] = w / np.sum(w)
weights = B
</DeepExtract>
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
|
def partial_fit(self, X, y=None, sample_weight=None):
"""Fit linear One-Class SVM with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : object
Returns a fitted instance of self.
"""
if not hasattr(self, 'coef_'):
self._validate_params()
if self.early_stopping and True:
raise ValueError('early_stopping should be False with partial_fit')
if self.learning_rate in ('constant', 'invscaling', 'adaptive') and self.eta0 <= 0.0:
raise ValueError('eta0 must be > 0')
if self.learning_rate == 'optimal' and self.alpha == 0:
raise ValueError("alpha must be > 0 since learning_rate is 'optimal'. alpha is used to compute the optimal learning rate.")
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
alpha = self.nu / 2
return self._partial_fit(X, alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, max_iter=1, sample_weight=sample_weight, coef_init=None, offset_init=None)
|
def partial_fit(self, X, y=None, sample_weight=None):
"""Fit linear One-Class SVM with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : object
Returns a fitted instance of self.
"""
if not hasattr(self, 'coef_'):
self._validate_params()
<DeepExtract>
if self.early_stopping and True:
raise ValueError('early_stopping should be False with partial_fit')
if self.learning_rate in ('constant', 'invscaling', 'adaptive') and self.eta0 <= 0.0:
raise ValueError('eta0 must be > 0')
if self.learning_rate == 'optimal' and self.alpha == 0:
raise ValueError("alpha must be > 0 since learning_rate is 'optimal'. alpha is used to compute the optimal learning rate.")
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
</DeepExtract>
alpha = self.nu / 2
return self._partial_fit(X, alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, max_iter=1, sample_weight=sample_weight, coef_init=None, offset_init=None)
|
@ignore_warnings(category=FutureWarning)
def check_transformer_data_not_an_array(name, transformer):
(X, y) = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
if '1darray' in _safe_tags(transformer, key='X_types'):
X = X[:, 0]
if _safe_tags(transformer, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(transformer, key='X_types'):
X = (X - X.min()).astype(np.int32)
if transformer.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(transformer):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(transformer, key='pairwise'):
X = kernel(X, X)
X = X
this_X = _NotAnArray(X)
this_y = _NotAnArray(np.asarray(y))
(n_samples, n_features) = np.asarray(this_X).shape
transformer = clone(transformer)
set_random_state(transformer)
if name in CROSS_DECOMPOSITION:
y_ = np.c_[np.asarray(this_y), np.asarray(this_y)]
y_[::2, 1] *= 2
if isinstance(this_X, _NotAnArray):
y_ = _NotAnArray(y_)
else:
y_ = this_y
transformer.fit(this_X, y_)
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(this_X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert x_pred.shape[0] == n_samples
else:
assert X_pred.shape[0] == n_samples
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(this_X, y_)
X_pred3 = transformer.fit_transform(this_X, y=y_)
else:
X_pred2 = transformer.transform(this_X)
X_pred3 = transformer.fit_transform(this_X, y=y_)
if _safe_tags(transformer, key='non_deterministic'):
msg = name + ' is non deterministic'
raise SkipTest(msg)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for (x_pred, x_pred2, x_pred3) in zip(X_pred, X_pred2, X_pred3):
assert_allclose_dense_sparse(x_pred, x_pred2, atol=0.01, err_msg='fit_transform and transform outcomes not consistent in %s' % transformer)
assert_allclose_dense_sparse(x_pred, x_pred3, atol=0.01, err_msg='consecutive fit_transform outcomes not consistent in %s' % transformer)
else:
assert_allclose_dense_sparse(X_pred, X_pred2, err_msg='fit_transform and transform outcomes not consistent in %s' % transformer, atol=0.01)
assert_allclose_dense_sparse(X_pred, X_pred3, atol=0.01, err_msg='consecutive fit_transform outcomes not consistent in %s' % transformer)
assert _num_samples(X_pred2) == n_samples
assert _num_samples(X_pred3) == n_samples
if hasattr(this_X, 'shape') and (not _safe_tags(transformer, key='stateless')) and (this_X.ndim == 2) and (this_X.shape[1] > 1):
with raises(ValueError, err_msg=f'The transformer {name} does not raise an error when the number of features in transform is different from the number of features in fit.'):
transformer.transform(this_X[:, :-1])
(n_samples, n_features) = np.asarray(X.tolist()).shape
transformer = clone(transformer)
set_random_state(transformer)
if name in CROSS_DECOMPOSITION:
y_ = np.c_[np.asarray(y.tolist()), np.asarray(y.tolist())]
y_[::2, 1] *= 2
if isinstance(X.tolist(), _NotAnArray):
y_ = _NotAnArray(y_)
else:
y_ = y.tolist()
transformer.fit(X.tolist(), y_)
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X.tolist(), y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert x_pred.shape[0] == n_samples
else:
assert X_pred.shape[0] == n_samples
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X.tolist(), y_)
X_pred3 = transformer.fit_transform(X.tolist(), y=y_)
else:
X_pred2 = transformer.transform(X.tolist())
X_pred3 = transformer.fit_transform(X.tolist(), y=y_)
if _safe_tags(transformer, key='non_deterministic'):
msg = name + ' is non deterministic'
raise SkipTest(msg)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for (x_pred, x_pred2, x_pred3) in zip(X_pred, X_pred2, X_pred3):
assert_allclose_dense_sparse(x_pred, x_pred2, atol=0.01, err_msg='fit_transform and transform outcomes not consistent in %s' % transformer)
assert_allclose_dense_sparse(x_pred, x_pred3, atol=0.01, err_msg='consecutive fit_transform outcomes not consistent in %s' % transformer)
else:
assert_allclose_dense_sparse(X_pred, X_pred2, err_msg='fit_transform and transform outcomes not consistent in %s' % transformer, atol=0.01)
assert_allclose_dense_sparse(X_pred, X_pred3, atol=0.01, err_msg='consecutive fit_transform outcomes not consistent in %s' % transformer)
assert _num_samples(X_pred2) == n_samples
assert _num_samples(X_pred3) == n_samples
if hasattr(X.tolist(), 'shape') and (not _safe_tags(transformer, key='stateless')) and (X.tolist().ndim == 2) and (X.tolist().shape[1] > 1):
with raises(ValueError, err_msg=f'The transformer {name} does not raise an error when the number of features in transform is different from the number of features in fit.'):
transformer.transform(X.tolist()[:, :-1])
</DeepExtract>
|
@ignore_warnings(category=FutureWarning)
def check_transformer_data_not_an_array(name, transformer):
(X, y) = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
<DeepExtract>
if '1darray' in _safe_tags(transformer, key='X_types'):
X = X[:, 0]
if _safe_tags(transformer, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(transformer, key='X_types'):
X = (X - X.min()).astype(np.int32)
if transformer.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(transformer):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(transformer, key='pairwise'):
X = kernel(X, X)
X = X
</DeepExtract>
this_X = _NotAnArray(X)
this_y = _NotAnArray(np.asarray(y))
<DeepExtract>
(n_samples, n_features) = np.asarray(this_X).shape
transformer = clone(transformer)
set_random_state(transformer)
if name in CROSS_DECOMPOSITION:
y_ = np.c_[np.asarray(this_y), np.asarray(this_y)]
y_[::2, 1] *= 2
if isinstance(this_X, _NotAnArray):
y_ = _NotAnArray(y_)
else:
y_ = this_y
transformer.fit(this_X, y_)
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(this_X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert x_pred.shape[0] == n_samples
else:
assert X_pred.shape[0] == n_samples
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(this_X, y_)
X_pred3 = transformer.fit_transform(this_X, y=y_)
else:
X_pred2 = transformer.transform(this_X)
X_pred3 = transformer.fit_transform(this_X, y=y_)
if _safe_tags(transformer, key='non_deterministic'):
msg = name + ' is non deterministic'
raise SkipTest(msg)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for (x_pred, x_pred2, x_pred3) in zip(X_pred, X_pred2, X_pred3):
assert_allclose_dense_sparse(x_pred, x_pred2, atol=0.01, err_msg='fit_transform and transform outcomes not consistent in %s' % transformer)
assert_allclose_dense_sparse(x_pred, x_pred3, atol=0.01, err_msg='consecutive fit_transform outcomes not consistent in %s' % transformer)
else:
assert_allclose_dense_sparse(X_pred, X_pred2, err_msg='fit_transform and transform outcomes not consistent in %s' % transformer, atol=0.01)
assert_allclose_dense_sparse(X_pred, X_pred3, atol=0.01, err_msg='consecutive fit_transform outcomes not consistent in %s' % transformer)
assert _num_samples(X_pred2) == n_samples
assert _num_samples(X_pred3) == n_samples
if hasattr(this_X, 'shape') and (not _safe_tags(transformer, key='stateless')) and (this_X.ndim == 2) and (this_X.shape[1] > 1):
with raises(ValueError, err_msg=f'The transformer {name} does not raise an error when the number of features in transform is different from the number of features in fit.'):
transformer.transform(this_X[:, :-1])
</DeepExtract>
<DeepExtract>
(n_samples, n_features) = np.asarray(X.tolist()).shape
transformer = clone(transformer)
set_random_state(transformer)
if name in CROSS_DECOMPOSITION:
y_ = np.c_[np.asarray(y.tolist()), np.asarray(y.tolist())]
y_[::2, 1] *= 2
if isinstance(X.tolist(), _NotAnArray):
y_ = _NotAnArray(y_)
else:
y_ = y.tolist()
transformer.fit(X.tolist(), y_)
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X.tolist(), y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert x_pred.shape[0] == n_samples
else:
assert X_pred.shape[0] == n_samples
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X.tolist(), y_)
X_pred3 = transformer.fit_transform(X.tolist(), y=y_)
else:
X_pred2 = transformer.transform(X.tolist())
X_pred3 = transformer.fit_transform(X.tolist(), y=y_)
if _safe_tags(transformer, key='non_deterministic'):
msg = name + ' is non deterministic'
raise SkipTest(msg)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for (x_pred, x_pred2, x_pred3) in zip(X_pred, X_pred2, X_pred3):
assert_allclose_dense_sparse(x_pred, x_pred2, atol=0.01, err_msg='fit_transform and transform outcomes not consistent in %s' % transformer)
assert_allclose_dense_sparse(x_pred, x_pred3, atol=0.01, err_msg='consecutive fit_transform outcomes not consistent in %s' % transformer)
else:
assert_allclose_dense_sparse(X_pred, X_pred2, err_msg='fit_transform and transform outcomes not consistent in %s' % transformer, atol=0.01)
assert_allclose_dense_sparse(X_pred, X_pred3, atol=0.01, err_msg='consecutive fit_transform outcomes not consistent in %s' % transformer)
assert _num_samples(X_pred2) == n_samples
assert _num_samples(X_pred3) == n_samples
if hasattr(X.tolist(), 'shape') and (not _safe_tags(transformer, key='stateless')) and (X.tolist().ndim == 2) and (X.tolist().shape[1] > 1):
with raises(ValueError, err_msg=f'The transformer {name} does not raise an error when the number of features in transform is different from the number of features in fit.'):
transformer.transform(X.tolist()[:, :-1])
</DeepExtract>
|
@pytest.mark.filterwarnings('ignore:WMinkowskiDistance:FutureWarning:sklearn')
@pytest.mark.parametrize('metric', sorted(set(neighbors.VALID_METRICS['brute']) - set(['precomputed'])))
def test_radius_neighbors_brute_backend(metric, n_samples=2000, n_features=30, n_query_pts=100, n_neighbors=5, radius=1.0):
X_train = rng.rand(n_samples, n_features)
X_test = rng.rand(n_query_pts, n_features)
if metric == 'haversine':
feature_sl = slice(None, 2)
X_train = np.ascontiguousarray(X_train[:, feature_sl])
X_test = np.ascontiguousarray(X_test[:, feature_sl])
rng = np.random.RandomState(1)
weights = rng.random_sample(n_features)
if metric == 'minkowski':
minkowski_kwargs = [dict(p=1.5), dict(p=2), dict(p=3), dict(p=np.inf)]
if sp_version >= parse_version('1.8.0.dev0'):
minkowski_kwargs.append(dict(p=3, w=rng.rand(n_features)))
metric_params_list = minkowski_kwargs
if metric == 'wminkowski':
weights /= weights.sum()
wminkowski_kwargs = [dict(p=1.5, w=weights)]
if sp_version < parse_version('1.8.0.dev0'):
wminkowski_kwargs.append(dict(p=3, w=rng.rand(n_features)))
metric_params_list = wminkowski_kwargs
if metric == 'seuclidean':
metric_params_list = [dict(V=rng.rand(n_features))]
if metric == 'mahalanobis':
A = rng.rand(n_features, n_features)
VI = A + A.T + 3 * np.eye(n_features)
metric_params_list = [dict(VI=VI)]
metric_params_list = [{}]
warn_context_manager = nullcontext()
if metric == 'wminkowski' and sp_version >= parse_version('1.6.0'):
warn_context_manager = pytest.warns((FutureWarning, DeprecationWarning))
for metric_params in metric_params_list:
p = metric_params.pop('p', 2)
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors, radius=radius, algorithm='brute', metric=metric, p=p, metric_params=metric_params)
neigh.fit(X_train)
with warn_context_manager:
with config_context(enable_cython_pairwise_dist=False):
(legacy_brute_dst, legacy_brute_idx) = neigh.radius_neighbors(X_test, return_distance=True)
with config_context(enable_cython_pairwise_dist=True):
(pdr_brute_dst, pdr_brute_idx) = neigh.radius_neighbors(X_test, return_distance=True)
assert_radius_neighbors_results_equality(legacy_brute_dst, pdr_brute_dst, legacy_brute_idx, pdr_brute_idx, radius=radius)
|
@pytest.mark.filterwarnings('ignore:WMinkowskiDistance:FutureWarning:sklearn')
@pytest.mark.parametrize('metric', sorted(set(neighbors.VALID_METRICS['brute']) - set(['precomputed'])))
def test_radius_neighbors_brute_backend(metric, n_samples=2000, n_features=30, n_query_pts=100, n_neighbors=5, radius=1.0):
X_train = rng.rand(n_samples, n_features)
X_test = rng.rand(n_query_pts, n_features)
if metric == 'haversine':
feature_sl = slice(None, 2)
X_train = np.ascontiguousarray(X_train[:, feature_sl])
X_test = np.ascontiguousarray(X_test[:, feature_sl])
<DeepExtract>
rng = np.random.RandomState(1)
weights = rng.random_sample(n_features)
if metric == 'minkowski':
minkowski_kwargs = [dict(p=1.5), dict(p=2), dict(p=3), dict(p=np.inf)]
if sp_version >= parse_version('1.8.0.dev0'):
minkowski_kwargs.append(dict(p=3, w=rng.rand(n_features)))
metric_params_list = minkowski_kwargs
if metric == 'wminkowski':
weights /= weights.sum()
wminkowski_kwargs = [dict(p=1.5, w=weights)]
if sp_version < parse_version('1.8.0.dev0'):
wminkowski_kwargs.append(dict(p=3, w=rng.rand(n_features)))
metric_params_list = wminkowski_kwargs
if metric == 'seuclidean':
metric_params_list = [dict(V=rng.rand(n_features))]
if metric == 'mahalanobis':
A = rng.rand(n_features, n_features)
VI = A + A.T + 3 * np.eye(n_features)
metric_params_list = [dict(VI=VI)]
metric_params_list = [{}]
</DeepExtract>
warn_context_manager = nullcontext()
if metric == 'wminkowski' and sp_version >= parse_version('1.6.0'):
warn_context_manager = pytest.warns((FutureWarning, DeprecationWarning))
for metric_params in metric_params_list:
p = metric_params.pop('p', 2)
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors, radius=radius, algorithm='brute', metric=metric, p=p, metric_params=metric_params)
neigh.fit(X_train)
with warn_context_manager:
with config_context(enable_cython_pairwise_dist=False):
(legacy_brute_dst, legacy_brute_idx) = neigh.radius_neighbors(X_test, return_distance=True)
with config_context(enable_cython_pairwise_dist=True):
(pdr_brute_dst, pdr_brute_idx) = neigh.radius_neighbors(X_test, return_distance=True)
assert_radius_neighbors_results_equality(legacy_brute_dst, pdr_brute_dst, legacy_brute_idx, pdr_brute_idx, radius=radius)
|
@pytest.mark.parametrize('with_sample_weight', [True, False])
def test_ridge_fit_intercept_sparse_sag(with_sample_weight, global_random_seed):
(X, y, c) = make_regression(n_samples=20, n_features=5, n_informative=n_informative, n_targets=n_targets, bias=bias, noise=noise, shuffle=shuffle, coef=True, random_state=global_random_seed)
if 5 == 1:
c = np.asarray([c])
X += 5.0
mask = np.random.RandomState(global_random_seed).binomial(1, proportion_nonzero, X.shape) > 0
removed_X = X.copy()
X[~mask] = 0.0
removed_X[mask] = 0.0
y -= removed_X.dot(c)
if positive:
y += X.dot(np.abs(c) + 1 - c)
c = np.abs(c) + 1
if 5 == 1:
c = c[0]
if coef:
(X, y) = (X, y, c)
(X, y) = (X, y)
if with_sample_weight:
rng = np.random.RandomState(global_random_seed)
sample_weight = 1.0 + rng.uniform(size=X.shape[0])
else:
sample_weight = None
X_csr = sp.csr_matrix(X)
params = dict(alpha=1.0, solver='sag', fit_intercept=True, tol=1e-10, max_iter=100000)
dense_ridge = Ridge(**params)
sparse_ridge = Ridge(**params)
dense_ridge.fit(X, y, sample_weight=sample_weight)
with warnings.catch_warnings():
warnings.simplefilter('error', UserWarning)
sparse_ridge.fit(X_csr, y, sample_weight=sample_weight)
assert_allclose(dense_ridge.intercept_, sparse_ridge.intercept_, rtol=0.0001)
assert_allclose(dense_ridge.coef_, sparse_ridge.coef_, rtol=0.0001)
with pytest.warns(UserWarning, match='"sag" solver requires.*'):
Ridge(solver='sag', fit_intercept=True, tol=0.001, max_iter=None).fit(X_csr, y)
|
@pytest.mark.parametrize('with_sample_weight', [True, False])
def test_ridge_fit_intercept_sparse_sag(with_sample_weight, global_random_seed):
<DeepExtract>
(X, y, c) = make_regression(n_samples=20, n_features=5, n_informative=n_informative, n_targets=n_targets, bias=bias, noise=noise, shuffle=shuffle, coef=True, random_state=global_random_seed)
if 5 == 1:
c = np.asarray([c])
X += 5.0
mask = np.random.RandomState(global_random_seed).binomial(1, proportion_nonzero, X.shape) > 0
removed_X = X.copy()
X[~mask] = 0.0
removed_X[mask] = 0.0
y -= removed_X.dot(c)
if positive:
y += X.dot(np.abs(c) + 1 - c)
c = np.abs(c) + 1
if 5 == 1:
c = c[0]
if coef:
(X, y) = (X, y, c)
(X, y) = (X, y)
</DeepExtract>
if with_sample_weight:
rng = np.random.RandomState(global_random_seed)
sample_weight = 1.0 + rng.uniform(size=X.shape[0])
else:
sample_weight = None
X_csr = sp.csr_matrix(X)
params = dict(alpha=1.0, solver='sag', fit_intercept=True, tol=1e-10, max_iter=100000)
dense_ridge = Ridge(**params)
sparse_ridge = Ridge(**params)
dense_ridge.fit(X, y, sample_weight=sample_weight)
with warnings.catch_warnings():
warnings.simplefilter('error', UserWarning)
sparse_ridge.fit(X_csr, y, sample_weight=sample_weight)
assert_allclose(dense_ridge.intercept_, sparse_ridge.intercept_, rtol=0.0001)
assert_allclose(dense_ridge.coef_, sparse_ridge.coef_, rtol=0.0001)
with pytest.warns(UserWarning, match='"sag" solver requires.*'):
Ridge(solver='sag', fit_intercept=True, tol=0.001, max_iter=None).fit(X_csr, y)
|
@pytest.mark.parametrize('method', ('online', 'batch'))
def test_lda_perplexity(method):
n_components = 3
block = np.full((3, 3), n_components, dtype=int)
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
(n_components, X) = (n_components, X)
lda_1 = LatentDirichletAllocation(n_components=n_components, max_iter=1, learning_method=method, total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components, max_iter=10, learning_method=method, total_samples=100, random_state=0)
lda_1.fit(X)
perp_1 = lda_1.perplexity(X, sub_sampling=False)
lda_2.fit(X)
perp_2 = lda_2.perplexity(X, sub_sampling=False)
assert perp_1 >= perp_2
perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True)
assert perp_1_subsampling >= perp_2_subsampling
|
@pytest.mark.parametrize('method', ('online', 'batch'))
def test_lda_perplexity(method):
<DeepExtract>
n_components = 3
block = np.full((3, 3), n_components, dtype=int)
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
(n_components, X) = (n_components, X)
</DeepExtract>
lda_1 = LatentDirichletAllocation(n_components=n_components, max_iter=1, learning_method=method, total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components, max_iter=10, learning_method=method, total_samples=100, random_state=0)
lda_1.fit(X)
perp_1 = lda_1.perplexity(X, sub_sampling=False)
lda_2.fit(X)
perp_2 = lda_2.perplexity(X, sub_sampling=False)
assert perp_1 >= perp_2
perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True)
assert perp_1_subsampling >= perp_2_subsampling
|
def scale(X, *, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis.
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to center and scale.
axis : int, default=0
Axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : bool, default=True
If True, center the data before scaling.
with_std : bool, default=True
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : bool, default=True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
See Also
--------
StandardScaler : Performs scaling to unit variance using the Transformer
API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
We use a biased estimator for the standard deviation, equivalent to
`numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to
affect model performance.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.scale` unless you know
what you are doing. A common mistake is to apply it to the entire data
*before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.StandardScaler` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking: `pipe = make_pipeline(StandardScaler(), LogisticRegression())`.
"""
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False, estimator='the scale function', dtype=FLOAT_DTYPES, force_all_finite='allow-nan')
if sparse.issparse(X):
if with_mean:
raise ValueError('Cannot center sparse matrices: pass `with_mean=False` instead See docstring for motivation and alternatives.')
if axis != 0:
raise ValueError('Can only scale sparse matrix on axis=0, got axis=%d' % axis)
if with_std:
(_, var) = mean_variance_axis(X, axis=0)
if np.isscalar(var):
if var == 0.0:
var = 1.0
var = var
elif isinstance(var, np.ndarray):
if constant_mask is None:
constant_mask = var < 10 * np.finfo(var.dtype).eps
if False:
var = var.copy()
var[constant_mask] = 1.0
var = var
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.nanmean(X, axis)
if with_std:
scale_ = np.nanstd(X, axis)
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = np.nanmean(Xr, axis=0)
if not np.allclose(mean_1, 0):
warnings.warn('Numerical issues were encountered when centering the data and might not be solved. Dataset may contain too large values. You may need to prescale your features.')
Xr -= mean_1
if with_std:
if np.isscalar(scale_):
if scale_ == 0.0:
scale_ = 1.0
scale_ = scale_
elif isinstance(scale_, np.ndarray):
if constant_mask is None:
constant_mask = scale_ < 10 * np.finfo(scale_.dtype).eps
if False:
scale_ = scale_.copy()
scale_[constant_mask] = 1.0
scale_ = scale_
Xr /= scale_
if with_mean:
mean_2 = np.nanmean(Xr, axis=0)
if not np.allclose(mean_2, 0):
warnings.warn('Numerical issues were encountered when scaling the data and might not be solved. The standard deviation of the data is probably very close to 0. ')
Xr -= mean_2
return X
|
def scale(X, *, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis.
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to center and scale.
axis : int, default=0
Axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : bool, default=True
If True, center the data before scaling.
with_std : bool, default=True
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : bool, default=True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
See Also
--------
StandardScaler : Performs scaling to unit variance using the Transformer
API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
We use a biased estimator for the standard deviation, equivalent to
`numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to
affect model performance.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.scale` unless you know
what you are doing. A common mistake is to apply it to the entire data
*before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.StandardScaler` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking: `pipe = make_pipeline(StandardScaler(), LogisticRegression())`.
"""
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False, estimator='the scale function', dtype=FLOAT_DTYPES, force_all_finite='allow-nan')
if sparse.issparse(X):
if with_mean:
raise ValueError('Cannot center sparse matrices: pass `with_mean=False` instead See docstring for motivation and alternatives.')
if axis != 0:
raise ValueError('Can only scale sparse matrix on axis=0, got axis=%d' % axis)
if with_std:
(_, var) = mean_variance_axis(X, axis=0)
<DeepExtract>
if np.isscalar(var):
if var == 0.0:
var = 1.0
var = var
elif isinstance(var, np.ndarray):
if constant_mask is None:
constant_mask = var < 10 * np.finfo(var.dtype).eps
if False:
var = var.copy()
var[constant_mask] = 1.0
var = var
</DeepExtract>
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.nanmean(X, axis)
if with_std:
scale_ = np.nanstd(X, axis)
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = np.nanmean(Xr, axis=0)
if not np.allclose(mean_1, 0):
warnings.warn('Numerical issues were encountered when centering the data and might not be solved. Dataset may contain too large values. You may need to prescale your features.')
Xr -= mean_1
if with_std:
<DeepExtract>
if np.isscalar(scale_):
if scale_ == 0.0:
scale_ = 1.0
scale_ = scale_
elif isinstance(scale_, np.ndarray):
if constant_mask is None:
constant_mask = scale_ < 10 * np.finfo(scale_.dtype).eps
if False:
scale_ = scale_.copy()
scale_[constant_mask] = 1.0
scale_ = scale_
</DeepExtract>
Xr /= scale_
if with_mean:
mean_2 = np.nanmean(Xr, axis=0)
if not np.allclose(mean_2, 0):
warnings.warn('Numerical issues were encountered when scaling the data and might not be solved. The standard deviation of the data is probably very close to 0. ')
Xr -= mean_2
return X
|
@validate_params({'y_true': ['array-like'], 'y_score': ['array-like'], 'pos_label': [Real, str, 'boolean', None], 'sample_weight': ['array-like', None], 'drop_intermediate': ['boolean']})
def roc_curve(y_true, y_score, *, pos_label=None, sample_weight=None, drop_intermediate=True):
"""Compute Receiver operating characteristic (ROC).
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
y_score : array-like of shape (n_samples,)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
pos_label : int, float, bool or str, default=None
The label of the positive class.
When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1},
``pos_label`` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : bool, default=True
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
.. versionadded:: 0.17
parameter *drop_intermediate*.
Returns
-------
fpr : ndarray of shape (>2,)
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= `thresholds[i]`.
tpr : ndarray of shape (>2,)
Increasing true positive rates such that element `i` is the true
positive rate of predictions with score >= `thresholds[i]`.
thresholds : ndarray of shape = (n_thresholds,)
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See Also
--------
RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic
(ROC) curve given an estimator and some data.
RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic
(ROC) curve given the true and predicted values.
det_curve: Compute error rates for different probability thresholds.
roc_auc_score : Compute the area under the ROC curve.
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
.. [2] Fawcett T. An introduction to ROC analysis[J]. Pattern Recognition
Letters, 2006, 27(8):861-874.
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([0. , 0. , 0.5, 0.5, 1. ])
>>> tpr
array([0. , 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([1.8 , 0.8 , 0.4 , 0.35, 0.1 ])
"""
y_type = type_of_target(y_true, input_name='y_true')
if not (y_type == 'binary' or (y_type == 'multiclass' and pos_label is not None)):
raise ValueError('{0} format is not supported'.format(y_type))
check_consistent_length(y_true, y_score, sample_weight)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
assert_all_finite(y_true)
assert_all_finite(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
sample_weight = _check_sample_weight(sample_weight, y_true)
nonzero_weight_mask = sample_weight != 0
y_true = y_true[nonzero_weight_mask]
y_score = y_score[nonzero_weight_mask]
sample_weight = sample_weight[nonzero_weight_mask]
pos_label = _check_pos_label_consistency(pos_label, y_true)
y_true = y_true == pos_label
desc_score_indices = np.argsort(y_score, kind='mergesort')[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.0
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
tps = stable_cumsum(y_true * weight)[threshold_idxs]
if sample_weight is not None:
fps = stable_cumsum((1 - y_true) * weight)[threshold_idxs]
else:
fps = 1 + threshold_idxs - tps
(fps, tps, thresholds) = (fps, tps, y_score[threshold_idxs])
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(np.r_[True, np.logical_or(np.diff(fps, 2), np.diff(tps, 2)), True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn('No negative samples in y_true, false positive value should be meaningless', UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn('No positive samples in y_true, true positive value should be meaningless', UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return (fpr, tpr, thresholds)
|
@validate_params({'y_true': ['array-like'], 'y_score': ['array-like'], 'pos_label': [Real, str, 'boolean', None], 'sample_weight': ['array-like', None], 'drop_intermediate': ['boolean']})
def roc_curve(y_true, y_score, *, pos_label=None, sample_weight=None, drop_intermediate=True):
"""Compute Receiver operating characteristic (ROC).
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
y_score : array-like of shape (n_samples,)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
pos_label : int, float, bool or str, default=None
The label of the positive class.
When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1},
``pos_label`` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : bool, default=True
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
.. versionadded:: 0.17
parameter *drop_intermediate*.
Returns
-------
fpr : ndarray of shape (>2,)
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= `thresholds[i]`.
tpr : ndarray of shape (>2,)
Increasing true positive rates such that element `i` is the true
positive rate of predictions with score >= `thresholds[i]`.
thresholds : ndarray of shape = (n_thresholds,)
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See Also
--------
RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic
(ROC) curve given an estimator and some data.
RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic
(ROC) curve given the true and predicted values.
det_curve: Compute error rates for different probability thresholds.
roc_auc_score : Compute the area under the ROC curve.
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
.. [2] Fawcett T. An introduction to ROC analysis[J]. Pattern Recognition
Letters, 2006, 27(8):861-874.
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([0. , 0. , 0.5, 0.5, 1. ])
>>> tpr
array([0. , 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([1.8 , 0.8 , 0.4 , 0.35, 0.1 ])
"""
<DeepExtract>
y_type = type_of_target(y_true, input_name='y_true')
if not (y_type == 'binary' or (y_type == 'multiclass' and pos_label is not None)):
raise ValueError('{0} format is not supported'.format(y_type))
check_consistent_length(y_true, y_score, sample_weight)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
assert_all_finite(y_true)
assert_all_finite(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
sample_weight = _check_sample_weight(sample_weight, y_true)
nonzero_weight_mask = sample_weight != 0
y_true = y_true[nonzero_weight_mask]
y_score = y_score[nonzero_weight_mask]
sample_weight = sample_weight[nonzero_weight_mask]
pos_label = _check_pos_label_consistency(pos_label, y_true)
y_true = y_true == pos_label
desc_score_indices = np.argsort(y_score, kind='mergesort')[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.0
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
tps = stable_cumsum(y_true * weight)[threshold_idxs]
if sample_weight is not None:
fps = stable_cumsum((1 - y_true) * weight)[threshold_idxs]
else:
fps = 1 + threshold_idxs - tps
(fps, tps, thresholds) = (fps, tps, y_score[threshold_idxs])
</DeepExtract>
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(np.r_[True, np.logical_or(np.diff(fps, 2), np.diff(tps, 2)), True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn('No negative samples in y_true, false positive value should be meaningless', UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn('No positive samples in y_true, true positive value should be meaningless', UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return (fpr, tpr, thresholds)
|
def _get_mask(X, value_to_mask):
"""Compute the boolean mask X == value_to_mask.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
value_to_mask : {int, float}
The value which is to be masked in X.
Returns
-------
X_mask : {ndarray, sparse matrix} of shape (n_samples, n_features)
Missing mask.
"""
if not sp.issparse(X):
return _get_dense_mask(X, value_to_mask)
with suppress(ImportError, AttributeError):
import pandas
if value_to_mask is pandas.NA:
Xt = pandas.isna(X.data)
if is_scalar_nan(value_to_mask):
if X.data.dtype.kind == 'f':
Xt = np.isnan(X.data)
elif X.data.dtype.kind in ('i', 'u'):
Xt = np.zeros(X.data.shape, dtype=bool)
else:
Xt = _object_dtype_isnan(X.data)
else:
Xt = X.data == value_to_mask
Xt = Xt
sparse_constructor = sp.csr_matrix if X.format == 'csr' else sp.csc_matrix
Xt_sparse = sparse_constructor((Xt, X.indices.copy(), X.indptr.copy()), shape=X.shape, dtype=bool)
return Xt_sparse
|
def _get_mask(X, value_to_mask):
"""Compute the boolean mask X == value_to_mask.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
value_to_mask : {int, float}
The value which is to be masked in X.
Returns
-------
X_mask : {ndarray, sparse matrix} of shape (n_samples, n_features)
Missing mask.
"""
if not sp.issparse(X):
return _get_dense_mask(X, value_to_mask)
<DeepExtract>
with suppress(ImportError, AttributeError):
import pandas
if value_to_mask is pandas.NA:
Xt = pandas.isna(X.data)
if is_scalar_nan(value_to_mask):
if X.data.dtype.kind == 'f':
Xt = np.isnan(X.data)
elif X.data.dtype.kind in ('i', 'u'):
Xt = np.zeros(X.data.shape, dtype=bool)
else:
Xt = _object_dtype_isnan(X.data)
else:
Xt = X.data == value_to_mask
Xt = Xt
</DeepExtract>
sparse_constructor = sp.csr_matrix if X.format == 'csr' else sp.csc_matrix
Xt_sparse = sparse_constructor((Xt, X.indices.copy(), X.indptr.copy()), shape=X.shape, dtype=bool)
return Xt_sparse
|
@ignore_warnings(category=FutureWarning)
def check_supervised_y_2d(name, estimator_orig):
tags = _safe_tags(estimator_orig)
rnd = np.random.RandomState(0)
n_samples = 30
if '1darray' in _safe_tags(estimator_orig, key='X_types'):
rnd.uniform(size=(n_samples, 3)) = rnd.uniform(size=(n_samples, 3))[:, 0]
if _safe_tags(estimator_orig, key='requires_positive_X'):
rnd.uniform(size=(n_samples, 3)) = rnd.uniform(size=(n_samples, 3)) - rnd.uniform(size=(n_samples, 3)).min()
if 'categorical' in _safe_tags(estimator_orig, key='X_types'):
rnd.uniform(size=(n_samples, 3)) = (rnd.uniform(size=(n_samples, 3)) - rnd.uniform(size=(n_samples, 3)).min()).astype(np.int32)
if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler':
rnd.uniform(size=(n_samples, 3)) = rnd.uniform(size=(n_samples, 3)) - rnd.uniform(size=(n_samples, 3)).min()
if _is_pairwise_metric(estimator_orig):
rnd.uniform(size=(n_samples, 3)) = pairwise_distances(rnd.uniform(size=(n_samples, 3)), metric='euclidean')
elif _safe_tags(estimator_orig, key='pairwise'):
rnd.uniform(size=(n_samples, 3)) = kernel(rnd.uniform(size=(n_samples, 3)), rnd.uniform(size=(n_samples, 3)))
rnd.uniform(size=(n_samples, 3)) = rnd.uniform(size=(n_samples, 3))
y = np.arange(n_samples) % 3
if _safe_tags(estimator_orig, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator_orig, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator_orig, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
estimator = clone(estimator_orig)
set_random_state(estimator)
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', DataConversionWarning)
warnings.simplefilter('ignore', RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = 'expected 1 DataConversionWarning, got: %s' % ', '.join([str(w_x) for w_x in w])
if not tags['multioutput']:
assert len(w) > 0, msg
assert "DataConversionWarning('A column-vector y was passed when a 1d array was expected" in msg
assert_allclose(y_pred.ravel(), y_pred_2d.ravel())
|
@ignore_warnings(category=FutureWarning)
def check_supervised_y_2d(name, estimator_orig):
tags = _safe_tags(estimator_orig)
rnd = np.random.RandomState(0)
n_samples = 30
<DeepExtract>
if '1darray' in _safe_tags(estimator_orig, key='X_types'):
rnd.uniform(size=(n_samples, 3)) = rnd.uniform(size=(n_samples, 3))[:, 0]
if _safe_tags(estimator_orig, key='requires_positive_X'):
rnd.uniform(size=(n_samples, 3)) = rnd.uniform(size=(n_samples, 3)) - rnd.uniform(size=(n_samples, 3)).min()
if 'categorical' in _safe_tags(estimator_orig, key='X_types'):
rnd.uniform(size=(n_samples, 3)) = (rnd.uniform(size=(n_samples, 3)) - rnd.uniform(size=(n_samples, 3)).min()).astype(np.int32)
if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler':
rnd.uniform(size=(n_samples, 3)) = rnd.uniform(size=(n_samples, 3)) - rnd.uniform(size=(n_samples, 3)).min()
if _is_pairwise_metric(estimator_orig):
rnd.uniform(size=(n_samples, 3)) = pairwise_distances(rnd.uniform(size=(n_samples, 3)), metric='euclidean')
elif _safe_tags(estimator_orig, key='pairwise'):
rnd.uniform(size=(n_samples, 3)) = kernel(rnd.uniform(size=(n_samples, 3)), rnd.uniform(size=(n_samples, 3)))
rnd.uniform(size=(n_samples, 3)) = rnd.uniform(size=(n_samples, 3))
</DeepExtract>
y = np.arange(n_samples) % 3
<DeepExtract>
if _safe_tags(estimator_orig, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator_orig, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator_orig, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
estimator = clone(estimator_orig)
set_random_state(estimator)
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', DataConversionWarning)
warnings.simplefilter('ignore', RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = 'expected 1 DataConversionWarning, got: %s' % ', '.join([str(w_x) for w_x in w])
if not tags['multioutput']:
assert len(w) > 0, msg
assert "DataConversionWarning('A column-vector y was passed when a 1d array was expected" in msg
assert_allclose(y_pred.ravel(), y_pred_2d.ravel())
|
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
X = self._validate_data(X, dtype=[np.float64, np.float32], order='C', copy=False)
self._n_components = self.n_components
if self._n_components is None:
self._n_components = X.shape[1]
_check_positive_coding(self.fit_algorithm, self.positive_code)
self._fit_algorithm = 'lasso_' + self.fit_algorithm
self._batch_size = min(self.batch_size, X.shape[0])
if self.n_iter != 'deprecated':
warnings.warn("'n_iter' is deprecated in version 1.1 and will be removed in version 1.4. Use 'max_iter' and let 'n_iter' to its default value instead. 'n_iter' is also ignored if 'max_iter' is specified.", FutureWarning)
n_iter = self.n_iter
self._random_state = check_random_state(self.random_state)
if self.dict_init is not None:
dictionary = self.dict_init
else:
(_, S, dictionary) = randomized_svd(X, self._n_components, random_state=self._random_state)
dictionary = S[:, np.newaxis] * dictionary
if self._n_components <= len(dictionary):
dictionary = dictionary[:self._n_components, :]
else:
dictionary = np.concatenate((dictionary, np.zeros((self._n_components - len(dictionary), dictionary.shape[1]), dtype=dictionary.dtype)))
dictionary = check_array(dictionary, order='F', dtype=X.dtype, copy=False)
dictionary = np.require(dictionary, requirements='W')
dictionary = dictionary
old_dict = dictionary.copy()
if self.shuffle:
X_train = X.copy()
self._random_state.shuffle(X_train)
else:
X_train = X
(n_samples, n_features) = X_train.shape
if self.verbose:
print('[dict_learning]')
self._A = np.zeros((self._n_components, self._n_components), dtype=X_train.dtype)
self._B = np.zeros((n_features, self._n_components), dtype=X_train.dtype)
if self.max_iter is not None:
self._ewa_cost = None
self._ewa_cost_min = None
self._no_improvement = 0
batches = gen_batches(n_samples, self._batch_size)
batches = itertools.cycle(batches)
n_steps_per_iter = int(np.ceil(n_samples / self._batch_size))
n_steps = self.max_iter * n_steps_per_iter
i = -1
for (i, batch) in zip(range(n_steps), batches):
X_batch = X_train[batch]
batch_size = X_batch.shape[0]
code = _sparse_encode(X_batch, dictionary, algorithm=self._fit_algorithm, alpha=self.alpha, n_jobs=self.n_jobs, positive=self.positive_code, max_iter=self.transform_max_iter, verbose=self.verbose)
batch_cost = (0.5 * ((X_batch - code @ dictionary) ** 2).sum() + self.alpha * np.sum(np.abs(code))) / batch_size
self._update_inner_stats(X_batch, code, batch_size, i)
_update_dict(dictionary, X_batch, code, self._A, self._B, verbose=self.verbose, random_state=self._random_state, positive=self.positive_dict)
batch_cost = batch_cost
if self._check_convergence(X_batch, batch_cost, dictionary, old_dict, n_samples, i, n_steps):
break
if self.callback is not None:
self.callback(locals())
old_dict[:] = dictionary
self.n_steps_ = i + 1
self.n_iter_ = np.ceil(self.n_steps_ / n_steps_per_iter)
else:
n_iter = 1000 if self.n_iter == 'deprecated' else self.n_iter
batches = gen_batches(n_samples, self._batch_size)
batches = itertools.cycle(batches)
for (i, batch) in zip(range(n_iter), batches):
batch_size = X_train[batch].shape[0]
code = _sparse_encode(X_train[batch], dictionary, algorithm=self._fit_algorithm, alpha=self.alpha, n_jobs=self.n_jobs, positive=self.positive_code, max_iter=self.transform_max_iter, verbose=self.verbose)
batch_cost = (0.5 * ((X_train[batch] - code @ dictionary) ** 2).sum() + self.alpha * np.sum(np.abs(code))) / batch_size
self._update_inner_stats(X_train[batch], code, batch_size, i)
_update_dict(dictionary, X_train[batch], code, self._A, self._B, verbose=self.verbose, random_state=self._random_state, positive=self.positive_dict)
return batch_cost
trigger_verbose = self.verbose and i % ceil(100.0 / self.verbose) == 0
if self.verbose > 10 or trigger_verbose:
print(f'{i} batches processed.')
if self.callback is not None:
self.callback(locals())
self.n_steps_ = n_iter
self.n_iter_ = np.ceil(n_iter / int(np.ceil(n_samples / self._batch_size)))
self.components_ = dictionary
return self
|
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
X = self._validate_data(X, dtype=[np.float64, np.float32], order='C', copy=False)
<DeepExtract>
self._n_components = self.n_components
if self._n_components is None:
self._n_components = X.shape[1]
_check_positive_coding(self.fit_algorithm, self.positive_code)
self._fit_algorithm = 'lasso_' + self.fit_algorithm
self._batch_size = min(self.batch_size, X.shape[0])
</DeepExtract>
if self.n_iter != 'deprecated':
warnings.warn("'n_iter' is deprecated in version 1.1 and will be removed in version 1.4. Use 'max_iter' and let 'n_iter' to its default value instead. 'n_iter' is also ignored if 'max_iter' is specified.", FutureWarning)
n_iter = self.n_iter
self._random_state = check_random_state(self.random_state)
<DeepExtract>
if self.dict_init is not None:
dictionary = self.dict_init
else:
(_, S, dictionary) = randomized_svd(X, self._n_components, random_state=self._random_state)
dictionary = S[:, np.newaxis] * dictionary
if self._n_components <= len(dictionary):
dictionary = dictionary[:self._n_components, :]
else:
dictionary = np.concatenate((dictionary, np.zeros((self._n_components - len(dictionary), dictionary.shape[1]), dtype=dictionary.dtype)))
dictionary = check_array(dictionary, order='F', dtype=X.dtype, copy=False)
dictionary = np.require(dictionary, requirements='W')
dictionary = dictionary
</DeepExtract>
old_dict = dictionary.copy()
if self.shuffle:
X_train = X.copy()
self._random_state.shuffle(X_train)
else:
X_train = X
(n_samples, n_features) = X_train.shape
if self.verbose:
print('[dict_learning]')
self._A = np.zeros((self._n_components, self._n_components), dtype=X_train.dtype)
self._B = np.zeros((n_features, self._n_components), dtype=X_train.dtype)
if self.max_iter is not None:
self._ewa_cost = None
self._ewa_cost_min = None
self._no_improvement = 0
batches = gen_batches(n_samples, self._batch_size)
batches = itertools.cycle(batches)
n_steps_per_iter = int(np.ceil(n_samples / self._batch_size))
n_steps = self.max_iter * n_steps_per_iter
i = -1
for (i, batch) in zip(range(n_steps), batches):
X_batch = X_train[batch]
<DeepExtract>
batch_size = X_batch.shape[0]
code = _sparse_encode(X_batch, dictionary, algorithm=self._fit_algorithm, alpha=self.alpha, n_jobs=self.n_jobs, positive=self.positive_code, max_iter=self.transform_max_iter, verbose=self.verbose)
batch_cost = (0.5 * ((X_batch - code @ dictionary) ** 2).sum() + self.alpha * np.sum(np.abs(code))) / batch_size
self._update_inner_stats(X_batch, code, batch_size, i)
_update_dict(dictionary, X_batch, code, self._A, self._B, verbose=self.verbose, random_state=self._random_state, positive=self.positive_dict)
batch_cost = batch_cost
</DeepExtract>
if self._check_convergence(X_batch, batch_cost, dictionary, old_dict, n_samples, i, n_steps):
break
if self.callback is not None:
self.callback(locals())
old_dict[:] = dictionary
self.n_steps_ = i + 1
self.n_iter_ = np.ceil(self.n_steps_ / n_steps_per_iter)
else:
n_iter = 1000 if self.n_iter == 'deprecated' else self.n_iter
batches = gen_batches(n_samples, self._batch_size)
batches = itertools.cycle(batches)
for (i, batch) in zip(range(n_iter), batches):
<DeepExtract>
batch_size = X_train[batch].shape[0]
code = _sparse_encode(X_train[batch], dictionary, algorithm=self._fit_algorithm, alpha=self.alpha, n_jobs=self.n_jobs, positive=self.positive_code, max_iter=self.transform_max_iter, verbose=self.verbose)
batch_cost = (0.5 * ((X_train[batch] - code @ dictionary) ** 2).sum() + self.alpha * np.sum(np.abs(code))) / batch_size
self._update_inner_stats(X_train[batch], code, batch_size, i)
_update_dict(dictionary, X_train[batch], code, self._A, self._B, verbose=self.verbose, random_state=self._random_state, positive=self.positive_dict)
return batch_cost
</DeepExtract>
trigger_verbose = self.verbose and i % ceil(100.0 / self.verbose) == 0
if self.verbose > 10 or trigger_verbose:
print(f'{i} batches processed.')
if self.callback is not None:
self.callback(locals())
self.n_steps_ = n_iter
self.n_iter_ = np.ceil(n_iter / int(np.ceil(n_samples / self._batch_size)))
self.components_ = dictionary
return self
|
def test_enet_path_positive():
random_state = np.random.RandomState(0)
if 2 > 1:
w = random_state.randn(50, 2)
else:
w = random_state.randn(50)
w[n_informative_features:] = 0.0
X = random_state.randn(50, 50)
y = np.dot(X, w)
X_test = random_state.randn(50, 50)
y_test = np.dot(X_test, w)
(X, Y, _, _) = (X, y, X_test, y_test)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, Y[:, 0], positive=True)[1]
assert np.all(pos_path_coef >= 0)
for path in [enet_path, lasso_path]:
with pytest.raises(ValueError):
path(X, Y, positive=True)
|
def test_enet_path_positive():
<DeepExtract>
random_state = np.random.RandomState(0)
if 2 > 1:
w = random_state.randn(50, 2)
else:
w = random_state.randn(50)
w[n_informative_features:] = 0.0
X = random_state.randn(50, 50)
y = np.dot(X, w)
X_test = random_state.randn(50, 50)
y_test = np.dot(X_test, w)
(X, Y, _, _) = (X, y, X_test, y_test)
</DeepExtract>
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, Y[:, 0], positive=True)[1]
assert np.all(pos_path_coef >= 0)
for path in [enet_path, lasso_path]:
with pytest.raises(ValueError):
path(X, Y, positive=True)
|
def _wrap_data_with_container(method, data_to_wrap, original_input, estimator):
"""Wrap output with container based on an estimator's or global config.
Parameters
----------
method : {"transform"}
Estimator's method to get container output for.
data_to_wrap : {ndarray, dataframe}
Data to wrap with container.
original_input : {ndarray, dataframe}
Original input of function.
estimator : estimator instance
Estimator with to get the output configuration from.
Returns
-------
output : {ndarray, dataframe}
If the output config is "default" or the estimator is not configured
for wrapping return `data_to_wrap` unchanged.
If the output config is "pandas", return `data_to_wrap` as a pandas
DataFrame.
"""
est_sklearn_output_config = getattr(estimator, '_sklearn_output_config', {})
if method in est_sklearn_output_config:
dense_config = est_sklearn_output_config[method]
else:
dense_config = get_config()[f'{method}_output']
if dense_config not in {'default', 'pandas'}:
raise ValueError(f"output config must be 'default' or 'pandas' got {dense_config}")
output_config = {'dense': dense_config}
if output_config['dense'] == 'default' or not _auto_wrap_is_configured(estimator):
return data_to_wrap
return _wrap_in_pandas_container(data_to_wrap=data_to_wrap, index=getattr(original_input, 'index', None), columns=estimator.get_feature_names_out)
|
def _wrap_data_with_container(method, data_to_wrap, original_input, estimator):
"""Wrap output with container based on an estimator's or global config.
Parameters
----------
method : {"transform"}
Estimator's method to get container output for.
data_to_wrap : {ndarray, dataframe}
Data to wrap with container.
original_input : {ndarray, dataframe}
Original input of function.
estimator : estimator instance
Estimator with to get the output configuration from.
Returns
-------
output : {ndarray, dataframe}
If the output config is "default" or the estimator is not configured
for wrapping return `data_to_wrap` unchanged.
If the output config is "pandas", return `data_to_wrap` as a pandas
DataFrame.
"""
<DeepExtract>
est_sklearn_output_config = getattr(estimator, '_sklearn_output_config', {})
if method in est_sklearn_output_config:
dense_config = est_sklearn_output_config[method]
else:
dense_config = get_config()[f'{method}_output']
if dense_config not in {'default', 'pandas'}:
raise ValueError(f"output config must be 'default' or 'pandas' got {dense_config}")
output_config = {'dense': dense_config}
</DeepExtract>
if output_config['dense'] == 'default' or not _auto_wrap_is_configured(estimator):
return data_to_wrap
return _wrap_in_pandas_container(data_to_wrap=data_to_wrap, index=getattr(original_input, 'index', None), columns=estimator.get_feature_names_out)
|
def fit(self, X, y, sample_weight=None):
"""
Build a forest of trees from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, its dtype will be converted
to ``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
if issparse(y):
raise ValueError('sparse multilabel-indicator for y is not supported.')
(X, y) = self._validate_data(X, y, multi_output=True, accept_sparse='csc', dtype=DTYPE)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if issparse(X):
X.sort_indices()
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn('A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().', DataConversionWarning, stacklevel=2)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
if self.criterion == 'poisson':
if np.any(y < 0):
raise ValueError('Some value(s) of y are negative which is not allowed for Poisson regression.')
if np.sum(y) <= 0:
raise ValueError('Sum of y is not strictly positive which is necessary for Poisson regression.')
self.n_outputs_ = y.shape[1]
(y, expanded_class_weight) = (y, None)
if getattr(y, 'dtype', None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
if not self.bootstrap and self.max_samples is not None:
raise ValueError('`max_sample` cannot be set if `bootstrap=False`. Either switch to `bootstrap=True` or set `max_sample=None`.')
elif self.bootstrap:
if self.max_samples is None:
n_samples_bootstrap = X.shape[0]
if isinstance(self.max_samples, Integral):
if self.max_samples > X.shape[0]:
msg = '`max_samples` must be <= n_samples={} but got value {}'
raise ValueError(msg.format(X.shape[0], self.max_samples))
n_samples_bootstrap = self.max_samples
if isinstance(self.max_samples, Real):
n_samples_bootstrap = max(round(X.shape[0] * self.max_samples), 1)
else:
n_samples_bootstrap = None
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError('Out of bag estimation only available if bootstrap=True')
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, 'estimators_'):
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to len(estimators_)=%d when warm_start==True' % (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn('Warm-start fitting without increasing n_estimators does not fit new trees.')
else:
if self.warm_start and len(self.estimators_) > 0:
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = [self._make_estimator(append=False, random_state=random_state) for i in range(n_more_estimators)]
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer='threads')((delayed(_parallel_build_trees)(t, self.bootstrap, X, y, sample_weight, i, len(trees), verbose=self.verbose, class_weight=self.class_weight, n_samples_bootstrap=n_samples_bootstrap) for (i, t) in enumerate(trees)))
self.estimators_.extend(trees)
if self.oob_score:
y_type = type_of_target(y)
if y_type in ('multiclass-multioutput', 'unknown'):
raise ValueError(f'The type of target cannot be used to compute OOB estimates. Got {y_type} while only the following are supported: continuous, continuous-multioutput, binary, multiclass, multilabel-indicator.')
if callable(self.oob_score):
else:
if hasattr(self, 'classes_') and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
|
def fit(self, X, y, sample_weight=None):
"""
Build a forest of trees from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, its dtype will be converted
to ``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
if issparse(y):
raise ValueError('sparse multilabel-indicator for y is not supported.')
(X, y) = self._validate_data(X, y, multi_output=True, accept_sparse='csc', dtype=DTYPE)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if issparse(X):
X.sort_indices()
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn('A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().', DataConversionWarning, stacklevel=2)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
if self.criterion == 'poisson':
if np.any(y < 0):
raise ValueError('Some value(s) of y are negative which is not allowed for Poisson regression.')
if np.sum(y) <= 0:
raise ValueError('Sum of y is not strictly positive which is necessary for Poisson regression.')
self.n_outputs_ = y.shape[1]
<DeepExtract>
(y, expanded_class_weight) = (y, None)
</DeepExtract>
if getattr(y, 'dtype', None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
if not self.bootstrap and self.max_samples is not None:
raise ValueError('`max_sample` cannot be set if `bootstrap=False`. Either switch to `bootstrap=True` or set `max_sample=None`.')
elif self.bootstrap:
<DeepExtract>
if self.max_samples is None:
n_samples_bootstrap = X.shape[0]
if isinstance(self.max_samples, Integral):
if self.max_samples > X.shape[0]:
msg = '`max_samples` must be <= n_samples={} but got value {}'
raise ValueError(msg.format(X.shape[0], self.max_samples))
n_samples_bootstrap = self.max_samples
if isinstance(self.max_samples, Real):
n_samples_bootstrap = max(round(X.shape[0] * self.max_samples), 1)
</DeepExtract>
else:
n_samples_bootstrap = None
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError('Out of bag estimation only available if bootstrap=True')
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, 'estimators_'):
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to len(estimators_)=%d when warm_start==True' % (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn('Warm-start fitting without increasing n_estimators does not fit new trees.')
else:
if self.warm_start and len(self.estimators_) > 0:
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = [self._make_estimator(append=False, random_state=random_state) for i in range(n_more_estimators)]
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer='threads')((delayed(_parallel_build_trees)(t, self.bootstrap, X, y, sample_weight, i, len(trees), verbose=self.verbose, class_weight=self.class_weight, n_samples_bootstrap=n_samples_bootstrap) for (i, t) in enumerate(trees)))
self.estimators_.extend(trees)
if self.oob_score:
y_type = type_of_target(y)
if y_type in ('multiclass-multioutput', 'unknown'):
raise ValueError(f'The type of target cannot be used to compute OOB estimates. Got {y_type} while only the following are supported: continuous, continuous-multioutput, binary, multiclass, multilabel-indicator.')
if callable(self.oob_score):
<DeepExtract>
</DeepExtract>
else:
<DeepExtract>
</DeepExtract>
if hasattr(self, 'classes_') and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
|
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_no_target(monkeypatch, gzip_response):
"""Check that we can get a dataset without target."""
data_id = 61
target_column = None
expected_observations = 150
expected_features = 5
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and gzip_response:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
data = fetch_openml(data_id=data_id, target_column=target_column, cache=False, as_frame=False, parser='liac-arff')
assert data.data.shape == (expected_observations, expected_features)
assert data.target is None
|
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_no_target(monkeypatch, gzip_response):
"""Check that we can get a dataset without target."""
data_id = 61
target_column = None
expected_observations = 150
expected_features = 5
<DeepExtract>
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and gzip_response:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
</DeepExtract>
data = fetch_openml(data_id=data_id, target_column=target_column, cache=False, as_frame=False, parser='liac-arff')
assert data.data.shape == (expected_observations, expected_features)
assert data.target is None
|
@pytest.mark.parametrize('gcv_mode', ['svd', 'eigen'])
@pytest.mark.parametrize('X_constructor', [np.asarray, sp.csr_matrix])
@pytest.mark.parametrize('X_shape', [(11, 8), (11, 20)])
@pytest.mark.parametrize('fit_intercept', [True, False])
@pytest.mark.parametrize('y_shape, noise', [((11,), 1.0), ((11, 1), 30.0), ((11, 3), 150.0)])
def test_ridge_gcv_vs_ridge_loo_cv(gcv_mode, X_constructor, X_shape, y_shape, fit_intercept, noise):
(n_samples, n_features) = X_shape
n_targets = y_shape[-1] if len(y_shape) == 2 else 1
(X, y, c) = make_regression(n_samples=n_samples, n_features=n_features, n_informative=5, n_targets=n_targets, bias=bias, noise=noise, shuffle=False, coef=True, random_state=0)
if n_features == 1:
c = np.asarray([c])
X += X_offset
mask = np.random.RandomState(0).binomial(1, proportion_nonzero, X.shape) > 0
removed_X = X.copy()
X[~mask] = 0.0
removed_X[mask] = 0.0
y -= removed_X.dot(c)
if positive:
y += X.dot(np.abs(c) + 1 - c)
c = np.abs(c) + 1
if n_features == 1:
c = c[0]
if coef:
(X, y) = (X, y, c)
(X, y) = (X, y)
y = y.reshape(y_shape)
alphas = [0.001, 0.1, 1.0, 10.0, 1000.0]
loo_ridge = RidgeCV(cv=n_samples, fit_intercept=fit_intercept, alphas=alphas, scoring='neg_mean_squared_error')
gcv_ridge = RidgeCV(gcv_mode=gcv_mode, fit_intercept=fit_intercept, alphas=alphas)
loo_ridge.fit(X, y)
X_gcv = X_constructor(X)
gcv_ridge.fit(X_gcv, y)
assert gcv_ridge.alpha_ == pytest.approx(loo_ridge.alpha_)
assert_allclose(gcv_ridge.coef_, loo_ridge.coef_, rtol=0.001)
assert_allclose(gcv_ridge.intercept_, loo_ridge.intercept_, rtol=0.001)
|
@pytest.mark.parametrize('gcv_mode', ['svd', 'eigen'])
@pytest.mark.parametrize('X_constructor', [np.asarray, sp.csr_matrix])
@pytest.mark.parametrize('X_shape', [(11, 8), (11, 20)])
@pytest.mark.parametrize('fit_intercept', [True, False])
@pytest.mark.parametrize('y_shape, noise', [((11,), 1.0), ((11, 1), 30.0), ((11, 3), 150.0)])
def test_ridge_gcv_vs_ridge_loo_cv(gcv_mode, X_constructor, X_shape, y_shape, fit_intercept, noise):
(n_samples, n_features) = X_shape
n_targets = y_shape[-1] if len(y_shape) == 2 else 1
<DeepExtract>
(X, y, c) = make_regression(n_samples=n_samples, n_features=n_features, n_informative=5, n_targets=n_targets, bias=bias, noise=noise, shuffle=False, coef=True, random_state=0)
if n_features == 1:
c = np.asarray([c])
X += X_offset
mask = np.random.RandomState(0).binomial(1, proportion_nonzero, X.shape) > 0
removed_X = X.copy()
X[~mask] = 0.0
removed_X[mask] = 0.0
y -= removed_X.dot(c)
if positive:
y += X.dot(np.abs(c) + 1 - c)
c = np.abs(c) + 1
if n_features == 1:
c = c[0]
if coef:
(X, y) = (X, y, c)
(X, y) = (X, y)
</DeepExtract>
y = y.reshape(y_shape)
alphas = [0.001, 0.1, 1.0, 10.0, 1000.0]
loo_ridge = RidgeCV(cv=n_samples, fit_intercept=fit_intercept, alphas=alphas, scoring='neg_mean_squared_error')
gcv_ridge = RidgeCV(gcv_mode=gcv_mode, fit_intercept=fit_intercept, alphas=alphas)
loo_ridge.fit(X, y)
X_gcv = X_constructor(X)
gcv_ridge.fit(X_gcv, y)
assert gcv_ridge.alpha_ == pytest.approx(loo_ridge.alpha_)
assert_allclose(gcv_ridge.coef_, loo_ridge.coef_, rtol=0.001)
assert_allclose(gcv_ridge.intercept_, loo_ridge.intercept_, rtol=0.001)
|
def fit(self, X, y=None):
"""Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : ndarray of shape [n_samples, n_features]
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
HashingVectorizer instance.
"""
self._validate_params()
if isinstance(X, str):
raise ValueError('Iterable over raw text documents expected, string object received.')
if self.tokenizer is not None and self.token_pattern is not None:
warnings.warn("The parameter 'token_pattern' will not be used since 'tokenizer' is not None'")
if self.preprocessor is not None and callable(self.analyzer):
warnings.warn("The parameter 'preprocessor' will not be used since 'analyzer' is callable'")
if self.ngram_range != (1, 1) and self.ngram_range is not None and callable(self.analyzer):
warnings.warn("The parameter 'ngram_range' will not be used since 'analyzer' is callable'")
if self.analyzer != 'word' or callable(self.analyzer):
if self.stop_words is not None:
warnings.warn("The parameter 'stop_words' will not be used since 'analyzer' != 'word'")
if self.token_pattern is not None and self.token_pattern != '(?u)\\b\\w\\w+\\b':
warnings.warn("The parameter 'token_pattern' will not be used since 'analyzer' != 'word'")
if self.tokenizer is not None:
warnings.warn("The parameter 'tokenizer' will not be used since 'analyzer' != 'word'")
(min_n, max_m) = self.ngram_range
if min_n > max_m:
raise ValueError('Invalid value for ngram_range=%s lower boundary larger than the upper boundary.' % str(self.ngram_range))
self._get_hasher().fit(X, y=y)
return self
|
def fit(self, X, y=None):
"""Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : ndarray of shape [n_samples, n_features]
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
HashingVectorizer instance.
"""
self._validate_params()
if isinstance(X, str):
raise ValueError('Iterable over raw text documents expected, string object received.')
<DeepExtract>
if self.tokenizer is not None and self.token_pattern is not None:
warnings.warn("The parameter 'token_pattern' will not be used since 'tokenizer' is not None'")
if self.preprocessor is not None and callable(self.analyzer):
warnings.warn("The parameter 'preprocessor' will not be used since 'analyzer' is callable'")
if self.ngram_range != (1, 1) and self.ngram_range is not None and callable(self.analyzer):
warnings.warn("The parameter 'ngram_range' will not be used since 'analyzer' is callable'")
if self.analyzer != 'word' or callable(self.analyzer):
if self.stop_words is not None:
warnings.warn("The parameter 'stop_words' will not be used since 'analyzer' != 'word'")
if self.token_pattern is not None and self.token_pattern != '(?u)\\b\\w\\w+\\b':
warnings.warn("The parameter 'token_pattern' will not be used since 'analyzer' != 'word'")
if self.tokenizer is not None:
warnings.warn("The parameter 'tokenizer' will not be used since 'analyzer' != 'word'")
</DeepExtract>
<DeepExtract>
(min_n, max_m) = self.ngram_range
if min_n > max_m:
raise ValueError('Invalid value for ngram_range=%s lower boundary larger than the upper boundary.' % str(self.ngram_range))
</DeepExtract>
self._get_hasher().fit(X, y=y)
return self
|
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Estimated probabilities.
"""
check_is_fitted(self)
(xp, is_array_api) = get_namespace(X)
decision = super().decision_function(X)
if self.classes_.size == 2:
proba = _expit(decision)
return xp.stack([1 - proba, proba], axis=1)
else:
return softmax(decision)
|
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Estimated probabilities.
"""
check_is_fitted(self)
(xp, is_array_api) = get_namespace(X)
<DeepExtract>
decision = super().decision_function(X)
</DeepExtract>
if self.classes_.size == 2:
proba = _expit(decision)
return xp.stack([1 - proba, proba], axis=1)
else:
return softmax(decision)
|
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except Exception:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except Exception:
C = clf.C
try:
intercept = clf.intercept_
except Exception:
intercept = 0.0
n_samples = X.shape[0]
clf.coef_ = clf.coef_.ravel()
p = np.mean(np.log(1.0 + np.exp(-y * (X.dot(clf.coef_) + intercept))))
print('%f + %f' % (p, clf.coef_.dot(clf.coef_) / 2.0 / C / n_samples))
p += clf.coef_.dot(clf.coef_) / 2.0 / C / n_samples
train_loss = p
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return (train_loss, train_score, test_score, duration)
|
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except Exception:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except Exception:
C = clf.C
try:
intercept = clf.intercept_
except Exception:
intercept = 0.0
<DeepExtract>
n_samples = X.shape[0]
clf.coef_ = clf.coef_.ravel()
p = np.mean(np.log(1.0 + np.exp(-y * (X.dot(clf.coef_) + intercept))))
print('%f + %f' % (p, clf.coef_.dot(clf.coef_) / 2.0 / C / n_samples))
p += clf.coef_.dot(clf.coef_) / 2.0 / C / n_samples
train_loss = p
</DeepExtract>
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return (train_loss, train_score, test_score, duration)
|
def matches(self, testString, parseAll=True):
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- testString - to test against this expression for a match
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
try:
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
_ustr(testString) = _ustr(testString).expandtabs()
try:
(loc, tokens) = self._parse(_ustr(testString), 0)
if parseAll:
loc = self.preParse(_ustr(testString), loc)
se = Empty() + StringEnd()
se._parse(_ustr(testString), loc)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
else:
return tokens
return True
except ParseBaseException:
return False
|
def matches(self, testString, parseAll=True):
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- testString - to test against this expression for a match
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
try:
<DeepExtract>
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
_ustr(testString) = _ustr(testString).expandtabs()
try:
(loc, tokens) = self._parse(_ustr(testString), 0)
if parseAll:
loc = self.preParse(_ustr(testString), loc)
se = Empty() + StringEnd()
se._parse(_ustr(testString), loc)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
else:
return tokens
</DeepExtract>
return True
except ParseBaseException:
return False
|
@pytest.mark.parametrize('mean', [0, 10000000.0, -10000000.0])
@pytest.mark.parametrize('var', [1, 1e-08, 100000.0])
@pytest.mark.parametrize('weight_loc, weight_scale', [(0, 1), (0, 1e-08), (1, 1e-08), (10, 1), (10000000.0, 1)])
def test_incremental_weighted_mean_and_variance(mean, var, weight_loc, weight_scale, rng):
def _assert(X, sample_weight, expected_mean, expected_var):
n = X.shape[0]
for chunk_size in [1, n // 10 + 1, n // 4 + 1, n // 2 + 1, n]:
(last_mean, last_weight_sum, last_var) = (0, 0, 0)
for batch in gen_batches(n, chunk_size):
(last_mean, last_var, last_weight_sum) = _incremental_mean_and_var(X[batch], last_mean, last_var, last_weight_sum, sample_weight=sample_weight[batch])
assert_allclose(last_mean, expected_mean)
assert_allclose(last_var, expected_var, atol=1e-06)
size = (100, 20)
weight = rng.normal(loc=weight_loc, scale=weight_scale, size=size[0])
X = rng.normal(loc=mean, scale=var, size=size)
expected_mean = _safe_accumulator_op(np.average, X, weights=weight, axis=0)
expected_var = _safe_accumulator_op(np.average, (X - expected_mean) ** 2, weights=weight, axis=0)
n = X.shape[0]
for chunk_size in [1, n // 10 + 1, n // 4 + 1, n // 2 + 1, n]:
(last_mean, last_weight_sum, last_var) = (0, 0, 0)
for batch in gen_batches(n, chunk_size):
(last_mean, last_var, last_weight_sum) = _incremental_mean_and_var(X[batch], last_mean, last_var, last_weight_sum, sample_weight=weight[batch])
assert_allclose(last_mean, expected_mean)
assert_allclose(last_var, expected_var, atol=1e-06)
X = rng.normal(loc=mean, scale=var, size=size)
ones_weight = np.ones(size[0])
expected_mean = _safe_accumulator_op(np.mean, X, axis=0)
expected_var = _safe_accumulator_op(np.var, X, axis=0)
n = X.shape[0]
for chunk_size in [1, n // 10 + 1, n // 4 + 1, n // 2 + 1, n]:
(last_mean, last_weight_sum, last_var) = (0, 0, 0)
for batch in gen_batches(n, chunk_size):
(last_mean, last_var, last_weight_sum) = _incremental_mean_and_var(X[batch], last_mean, last_var, last_weight_sum, sample_weight=ones_weight[batch])
assert_allclose(last_mean, expected_mean)
assert_allclose(last_var, expected_var, atol=1e-06)
</DeepExtract>
|
@pytest.mark.parametrize('mean', [0, 10000000.0, -10000000.0])
@pytest.mark.parametrize('var', [1, 1e-08, 100000.0])
@pytest.mark.parametrize('weight_loc, weight_scale', [(0, 1), (0, 1e-08), (1, 1e-08), (10, 1), (10000000.0, 1)])
def test_incremental_weighted_mean_and_variance(mean, var, weight_loc, weight_scale, rng):
def _assert(X, sample_weight, expected_mean, expected_var):
n = X.shape[0]
for chunk_size in [1, n // 10 + 1, n // 4 + 1, n // 2 + 1, n]:
(last_mean, last_weight_sum, last_var) = (0, 0, 0)
for batch in gen_batches(n, chunk_size):
(last_mean, last_var, last_weight_sum) = _incremental_mean_and_var(X[batch], last_mean, last_var, last_weight_sum, sample_weight=sample_weight[batch])
assert_allclose(last_mean, expected_mean)
assert_allclose(last_var, expected_var, atol=1e-06)
size = (100, 20)
weight = rng.normal(loc=weight_loc, scale=weight_scale, size=size[0])
X = rng.normal(loc=mean, scale=var, size=size)
expected_mean = _safe_accumulator_op(np.average, X, weights=weight, axis=0)
expected_var = _safe_accumulator_op(np.average, (X - expected_mean) ** 2, weights=weight, axis=0)
<DeepExtract>
n = X.shape[0]
for chunk_size in [1, n // 10 + 1, n // 4 + 1, n // 2 + 1, n]:
(last_mean, last_weight_sum, last_var) = (0, 0, 0)
for batch in gen_batches(n, chunk_size):
(last_mean, last_var, last_weight_sum) = _incremental_mean_and_var(X[batch], last_mean, last_var, last_weight_sum, sample_weight=weight[batch])
assert_allclose(last_mean, expected_mean)
assert_allclose(last_var, expected_var, atol=1e-06)
</DeepExtract>
X = rng.normal(loc=mean, scale=var, size=size)
ones_weight = np.ones(size[0])
expected_mean = _safe_accumulator_op(np.mean, X, axis=0)
expected_var = _safe_accumulator_op(np.var, X, axis=0)
<DeepExtract>
n = X.shape[0]
for chunk_size in [1, n // 10 + 1, n // 4 + 1, n // 2 + 1, n]:
(last_mean, last_weight_sum, last_var) = (0, 0, 0)
for batch in gen_batches(n, chunk_size):
(last_mean, last_var, last_weight_sum) = _incremental_mean_and_var(X[batch], last_mean, last_var, last_weight_sum, sample_weight=ones_weight[batch])
assert_allclose(last_mean, expected_mean)
assert_allclose(last_var, expected_var, atol=1e-06)
</DeepExtract>
|
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
self._validate_params()
dtype = [np.float64, np.float32] if True else self.components_.dtype
X = self._validate_data(X, reset=True, accept_sparse='csr', dtype=dtype)
check_non_negative(X, 'LatentDirichletAllocation.fit')
X = X
(n_samples, n_features) = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
batch_size = self.batch_size
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1.0 / self.n_components
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1.0 / self.n_components
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.0
init_var = 1.0 / init_gamma
self.components_ = self.random_state_.gamma(init_gamma, init_var, (self.n_components, n_features)).astype(X.dtype, copy=False)
self.exp_dirichlet_component_ = np.exp(_dirichlet_expectation_2d(self.components_))
last_bound = None
n_jobs = effective_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for i in range(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
(_, suff_stats) = self._e_step(X[idx_slice, :], cal_sstats=True, random_init=True, parallel=parallel)
if False:
self.components_ = self.topic_word_prior_ + suff_stats
else:
weight = np.power(self.learning_offset + self.n_batch_iter_, -self.learning_decay)
doc_ratio = float(n_samples) / X[idx_slice, :].shape[0]
self.components_ *= 1 - weight
self.components_ += weight * (self.topic_word_prior_ + doc_ratio * suff_stats)
self.exp_dirichlet_component_ = np.exp(_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
else:
(_, suff_stats) = self._e_step(X, cal_sstats=True, random_init=True, parallel=parallel)
if True:
self.components_ = self.topic_word_prior_ + suff_stats
else:
weight = np.power(self.learning_offset + self.n_batch_iter_, -self.learning_decay)
doc_ratio = float(n_samples) / X.shape[0]
self.components_ *= 1 - weight
self.components_ += weight * (self.topic_word_prior_ + doc_ratio * suff_stats)
self.exp_dirichlet_component_ = np.exp(_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
random_state = self.random_state_ if False else None
n_jobs = effective_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1))
results = parallel((delayed(_update_doc_distribution)(X[idx_slice, :], self.exp_dirichlet_component_, self.doc_topic_prior_, self.max_doc_update_iter, self.mean_change_tol, False, random_state) for idx_slice in gen_even_slices(X.shape[0], n_jobs)))
(doc_topics, sstats_list) = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if False:
suff_stats = np.zeros(self.components_.shape, dtype=self.components_.dtype)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
(doc_topics_distr, _) = (doc_topic_distr, suff_stats)
if doc_topics_distr is None:
doc_topics_distr = self._unnormalized_transform(X)
else:
(n_samples, n_components) = doc_topics_distr.shape
if n_samples != X.shape[0]:
raise ValueError('Number of samples in X and doc_topic_distr do not match.')
if n_components != self.n_components:
raise ValueError('Number of topics does not match.')
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topics_distr, False)
if False:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
bound = np.exp(-1.0 * perword_bound)
if self.verbose:
print('iteration: %d of max_iter: %d, perplexity: %.4f' % (i + 1, max_iter, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
elif self.verbose:
print('iteration: %d of max_iter: %d' % (i + 1, max_iter))
self.n_iter_ += 1
random_state = self.random_state_ if False else None
n_jobs = effective_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1))
results = parallel((delayed(_update_doc_distribution)(X[idx_slice, :], self.exp_dirichlet_component_, self.doc_topic_prior_, self.max_doc_update_iter, self.mean_change_tol, False, random_state) for idx_slice in gen_even_slices(X.shape[0], n_jobs)))
(doc_topics, sstats_list) = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if False:
suff_stats = np.zeros(self.components_.shape, dtype=self.components_.dtype)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
(doc_topics_distr, _) = (doc_topic_distr, suff_stats)
if doc_topics_distr is None:
doc_topics_distr = self._unnormalized_transform(X)
else:
(n_samples, n_components) = doc_topics_distr.shape
if n_samples != X.shape[0]:
raise ValueError('Number of samples in X and doc_topic_distr do not match.')
if n_components != self.n_components:
raise ValueError('Number of topics does not match.')
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topics_distr, False)
if False:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
self.bound_ = np.exp(-1.0 * perword_bound)
return self
|
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
self._validate_params()
<DeepExtract>
dtype = [np.float64, np.float32] if True else self.components_.dtype
X = self._validate_data(X, reset=True, accept_sparse='csr', dtype=dtype)
check_non_negative(X, 'LatentDirichletAllocation.fit')
X = X
</DeepExtract>
(n_samples, n_features) = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
batch_size = self.batch_size
<DeepExtract>
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1.0 / self.n_components
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1.0 / self.n_components
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.0
init_var = 1.0 / init_gamma
self.components_ = self.random_state_.gamma(init_gamma, init_var, (self.n_components, n_features)).astype(X.dtype, copy=False)
self.exp_dirichlet_component_ = np.exp(_dirichlet_expectation_2d(self.components_))
</DeepExtract>
last_bound = None
n_jobs = effective_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for i in range(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
<DeepExtract>
(_, suff_stats) = self._e_step(X[idx_slice, :], cal_sstats=True, random_init=True, parallel=parallel)
if False:
self.components_ = self.topic_word_prior_ + suff_stats
else:
weight = np.power(self.learning_offset + self.n_batch_iter_, -self.learning_decay)
doc_ratio = float(n_samples) / X[idx_slice, :].shape[0]
self.components_ *= 1 - weight
self.components_ += weight * (self.topic_word_prior_ + doc_ratio * suff_stats)
self.exp_dirichlet_component_ = np.exp(_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
</DeepExtract>
else:
<DeepExtract>
(_, suff_stats) = self._e_step(X, cal_sstats=True, random_init=True, parallel=parallel)
if True:
self.components_ = self.topic_word_prior_ + suff_stats
else:
weight = np.power(self.learning_offset + self.n_batch_iter_, -self.learning_decay)
doc_ratio = float(n_samples) / X.shape[0]
self.components_ *= 1 - weight
self.components_ += weight * (self.topic_word_prior_ + doc_ratio * suff_stats)
self.exp_dirichlet_component_ = np.exp(_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
</DeepExtract>
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
<DeepExtract>
random_state = self.random_state_ if False else None
n_jobs = effective_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1))
results = parallel((delayed(_update_doc_distribution)(X[idx_slice, :], self.exp_dirichlet_component_, self.doc_topic_prior_, self.max_doc_update_iter, self.mean_change_tol, False, random_state) for idx_slice in gen_even_slices(X.shape[0], n_jobs)))
(doc_topics, sstats_list) = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if False:
suff_stats = np.zeros(self.components_.shape, dtype=self.components_.dtype)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
(doc_topics_distr, _) = (doc_topic_distr, suff_stats)
</DeepExtract>
<DeepExtract>
if doc_topics_distr is None:
doc_topics_distr = self._unnormalized_transform(X)
else:
(n_samples, n_components) = doc_topics_distr.shape
if n_samples != X.shape[0]:
raise ValueError('Number of samples in X and doc_topic_distr do not match.')
if n_components != self.n_components:
raise ValueError('Number of topics does not match.')
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topics_distr, False)
if False:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
bound = np.exp(-1.0 * perword_bound)
</DeepExtract>
if self.verbose:
print('iteration: %d of max_iter: %d, perplexity: %.4f' % (i + 1, max_iter, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
elif self.verbose:
print('iteration: %d of max_iter: %d' % (i + 1, max_iter))
self.n_iter_ += 1
<DeepExtract>
random_state = self.random_state_ if False else None
n_jobs = effective_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1))
results = parallel((delayed(_update_doc_distribution)(X[idx_slice, :], self.exp_dirichlet_component_, self.doc_topic_prior_, self.max_doc_update_iter, self.mean_change_tol, False, random_state) for idx_slice in gen_even_slices(X.shape[0], n_jobs)))
(doc_topics, sstats_list) = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if False:
suff_stats = np.zeros(self.components_.shape, dtype=self.components_.dtype)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
(doc_topics_distr, _) = (doc_topic_distr, suff_stats)
</DeepExtract>
<DeepExtract>
if doc_topics_distr is None:
doc_topics_distr = self._unnormalized_transform(X)
else:
(n_samples, n_components) = doc_topics_distr.shape
if n_samples != X.shape[0]:
raise ValueError('Number of samples in X and doc_topic_distr do not match.')
if n_components != self.n_components:
raise ValueError('Number of topics does not match.')
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topics_distr, False)
if False:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
self.bound_ = np.exp(-1.0 * perword_bound)
</DeepExtract>
return self
|
def _tsne(self, P, degrees_of_freedom, n_samples, X_embedded, neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
params = X_embedded.ravel()
opt_args = {'it': 0, 'n_iter_check': self._N_ITER_CHECK, 'min_grad_norm': self.min_grad_norm, 'learning_rate': self.learning_rate_, 'verbose': self.verbose, 'kwargs': dict(skip_num_points=skip_num_points), 'args': [P, degrees_of_freedom, n_samples, self.n_components], 'n_iter_without_progress': self._EXPLORATION_N_ITER, 'n_iter': self._EXPLORATION_N_ITER, 'momentum': 0.5}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
opt_args['kwargs']['verbose'] = self.verbose
opt_args['kwargs']['num_threads'] = _openmp_effective_n_threads()
else:
obj_func = _kl_divergence
P *= self.early_exaggeration
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = params.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(float).max
best_error = np.finfo(float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
check_convergence = (i + 1) % n_iter_check == 0
kwargs['compute_error'] = check_convergence or i == n_iter - 1
(error, grad) = obj_func(p, *args, **kwargs)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if check_convergence:
toc = time()
duration = toc - tic
tic = toc
grad_norm = linalg.norm(grad)
if verbose >= 2:
print('[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f (%s iterations in %0.3fs)' % (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print('[t-SNE] Iteration %d: did not make any progress during the last %d episodes. Finished.' % (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print('[t-SNE] Iteration %d: gradient norm %f. Finished.' % (i + 1, grad_norm))
break
(params, kl_divergence, it) = (p, error, i)
if self.verbose:
print('[t-SNE] KL divergence after %d iterations with early exaggeration: %f' % (it + 1, kl_divergence))
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = params.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(float).max
best_error = np.finfo(float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
check_convergence = (i + 1) % n_iter_check == 0
kwargs['compute_error'] = check_convergence or i == n_iter - 1
(error, grad) = obj_func(p, *args, **kwargs)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if check_convergence:
toc = time()
duration = toc - tic
tic = toc
grad_norm = linalg.norm(grad)
if verbose >= 2:
print('[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f (%s iterations in %0.3fs)' % (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print('[t-SNE] Iteration %d: did not make any progress during the last %d episodes. Finished.' % (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print('[t-SNE] Iteration %d: gradient norm %f. Finished.' % (i + 1, grad_norm))
break
(params, kl_divergence, it) = (p, error, i)
self.n_iter_ = it
if self.verbose:
print('[t-SNE] KL divergence after %d iterations: %f' % (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
|
def _tsne(self, P, degrees_of_freedom, n_samples, X_embedded, neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
params = X_embedded.ravel()
opt_args = {'it': 0, 'n_iter_check': self._N_ITER_CHECK, 'min_grad_norm': self.min_grad_norm, 'learning_rate': self.learning_rate_, 'verbose': self.verbose, 'kwargs': dict(skip_num_points=skip_num_points), 'args': [P, degrees_of_freedom, n_samples, self.n_components], 'n_iter_without_progress': self._EXPLORATION_N_ITER, 'n_iter': self._EXPLORATION_N_ITER, 'momentum': 0.5}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
opt_args['kwargs']['verbose'] = self.verbose
opt_args['kwargs']['num_threads'] = _openmp_effective_n_threads()
else:
obj_func = _kl_divergence
P *= self.early_exaggeration
<DeepExtract>
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = params.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(float).max
best_error = np.finfo(float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
check_convergence = (i + 1) % n_iter_check == 0
kwargs['compute_error'] = check_convergence or i == n_iter - 1
(error, grad) = obj_func(p, *args, **kwargs)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if check_convergence:
toc = time()
duration = toc - tic
tic = toc
grad_norm = linalg.norm(grad)
if verbose >= 2:
print('[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f (%s iterations in %0.3fs)' % (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print('[t-SNE] Iteration %d: did not make any progress during the last %d episodes. Finished.' % (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print('[t-SNE] Iteration %d: gradient norm %f. Finished.' % (i + 1, grad_norm))
break
(params, kl_divergence, it) = (p, error, i)
</DeepExtract>
if self.verbose:
print('[t-SNE] KL divergence after %d iterations with early exaggeration: %f' % (it + 1, kl_divergence))
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
<DeepExtract>
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = params.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(float).max
best_error = np.finfo(float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
check_convergence = (i + 1) % n_iter_check == 0
kwargs['compute_error'] = check_convergence or i == n_iter - 1
(error, grad) = obj_func(p, *args, **kwargs)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if check_convergence:
toc = time()
duration = toc - tic
tic = toc
grad_norm = linalg.norm(grad)
if verbose >= 2:
print('[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f (%s iterations in %0.3fs)' % (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print('[t-SNE] Iteration %d: did not make any progress during the last %d episodes. Finished.' % (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print('[t-SNE] Iteration %d: gradient norm %f. Finished.' % (i + 1, grad_norm))
break
(params, kl_divergence, it) = (p, error, i)
</DeepExtract>
self.n_iter_ = it
if self.verbose:
print('[t-SNE] KL divergence after %d iterations: %f' % (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
|
def test_roc_curve_multi():
if dataset is None:
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if False:
(X, y) = (X[y < 2], y[y < 2])
(n_samples, n_features) = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
(X, y) = (X[p], y[p])
half = int(n_samples / 2)
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
y_score = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if False:
y_score = y_score[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
(y_true, _, y_score) = (y_true, y_pred, y_score)
with pytest.raises(ValueError):
roc_curve(y_true, y_score)
|
def test_roc_curve_multi():
<DeepExtract>
if dataset is None:
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if False:
(X, y) = (X[y < 2], y[y < 2])
(n_samples, n_features) = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
(X, y) = (X[p], y[p])
half = int(n_samples / 2)
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
y_score = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if False:
y_score = y_score[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
(y_true, _, y_score) = (y_true, y_pred, y_score)
</DeepExtract>
with pytest.raises(ValueError):
roc_curve(y_true, y_score)
|
def test_fastica_convergence_fail():
rng = np.random.RandomState(0)
n_samples = 1000
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
s = np.rollaxis(s, axis)
s -= s.mean(axis=0)
s /= s.std(axis=0)
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
warn_msg = 'FastICA did not converge. Consider increasing tolerance or the maximum number of iterations.'
with pytest.warns(ConvergenceWarning, match=warn_msg):
ica = FastICA(algorithm='parallel', n_components=2, random_state=rng, max_iter=2, tol=0.0)
ica.fit(m.T)
|
def test_fastica_convergence_fail():
rng = np.random.RandomState(0)
n_samples = 1000
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
<DeepExtract>
s = np.rollaxis(s, axis)
s -= s.mean(axis=0)
s /= s.std(axis=0)
</DeepExtract>
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
warn_msg = 'FastICA did not converge. Consider increasing tolerance or the maximum number of iterations.'
with pytest.warns(ConvergenceWarning, match=warn_msg):
ica = FastICA(algorithm='parallel', n_components=2, random_state=rng, max_iter=2, tol=0.0)
ica.fit(m.T)
|
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]['X']
X_sparse = DATASETS[dataset]['X_sparse']
y = DATASETS[dataset]['y']
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X_sparse, y)
assert s.tree_.node_count == d.tree_.node_count, '{0}: inequal number of node ({1} != {2})'.format('{0} with dense and sparse format gave different trees'.format(tree), s.tree_.node_count, d.tree_.node_count)
assert_array_equal(d.tree_.children_right, s.tree_.children_right, '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal children_right')
assert_array_equal(d.tree_.children_left, s.tree_.children_left, '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal children_left')
external = d.tree_.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.tree_.feature[internal], s.tree_.feature[internal], '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal features')
assert_array_equal(d.tree_.threshold[internal], s.tree_.threshold[internal], '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal threshold')
assert_array_equal(d.tree_.n_node_samples.sum(), s.tree_.n_node_samples.sum(), '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal sum(n_node_samples)')
assert_array_equal(d.tree_.n_node_samples, s.tree_.n_node_samples, '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal n_node_samples')
assert_almost_equal(d.tree_.impurity, s.tree_.impurity, err_msg='{0} with dense and sparse format gave different trees'.format(tree) + ': inequal impurity')
assert_array_almost_equal(d.tree_.value[external], s.tree_.value[external], err_msg='{0} with dense and sparse format gave different trees'.format(tree) + ': inequal value')
assert_array_almost_equal(s.predict(X), d.predict(X))
|
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]['X']
X_sparse = DATASETS[dataset]['X_sparse']
y = DATASETS[dataset]['y']
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X_sparse, y)
<DeepExtract>
assert s.tree_.node_count == d.tree_.node_count, '{0}: inequal number of node ({1} != {2})'.format('{0} with dense and sparse format gave different trees'.format(tree), s.tree_.node_count, d.tree_.node_count)
assert_array_equal(d.tree_.children_right, s.tree_.children_right, '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal children_right')
assert_array_equal(d.tree_.children_left, s.tree_.children_left, '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal children_left')
external = d.tree_.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.tree_.feature[internal], s.tree_.feature[internal], '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal features')
assert_array_equal(d.tree_.threshold[internal], s.tree_.threshold[internal], '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal threshold')
assert_array_equal(d.tree_.n_node_samples.sum(), s.tree_.n_node_samples.sum(), '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal sum(n_node_samples)')
assert_array_equal(d.tree_.n_node_samples, s.tree_.n_node_samples, '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal n_node_samples')
assert_almost_equal(d.tree_.impurity, s.tree_.impurity, err_msg='{0} with dense and sparse format gave different trees'.format(tree) + ': inequal impurity')
assert_array_almost_equal(d.tree_.value[external], s.tree_.value[external], err_msg='{0} with dense and sparse format gave different trees'.format(tree) + ': inequal value')
</DeepExtract>
assert_array_almost_equal(s.predict(X), d.predict(X))
|
def fit_predict(self, X, y=None, sample_weight=None):
"""Compute clusters from a data or distance matrix and predict labels.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or (n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels. Noisy samples are given the label -1.
"""
self._validate_params()
X = self._validate_data(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if self.metric == 'precomputed' and sparse.issparse(X):
with warnings.catch_warnings():
warnings.simplefilter('ignore', sparse.SparseEfficiencyWarning)
X.setdiag(X.diagonal())
neighbors_model = NearestNeighbors(radius=self.eps, algorithm=self.algorithm, leaf_size=self.leaf_size, metric=self.metric, metric_params=self.metric_params, p=self.p, n_jobs=self.n_jobs)
neighbors_model.fit(X)
neighborhoods = neighbors_model.radius_neighbors(X, return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors]) for neighbors in neighborhoods])
labels = np.full(X.shape[0], -1, dtype=np.intp)
core_samples = np.asarray(n_neighbors >= self.min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
self.core_sample_indices_ = np.where(core_samples)[0]
self.labels_ = labels
if len(self.core_sample_indices_):
self.components_ = X[self.core_sample_indices_].copy()
else:
self.components_ = np.empty((0, X.shape[1]))
return self
return self.labels_
|
def fit_predict(self, X, y=None, sample_weight=None):
"""Compute clusters from a data or distance matrix and predict labels.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or (n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels. Noisy samples are given the label -1.
"""
<DeepExtract>
self._validate_params()
X = self._validate_data(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if self.metric == 'precomputed' and sparse.issparse(X):
with warnings.catch_warnings():
warnings.simplefilter('ignore', sparse.SparseEfficiencyWarning)
X.setdiag(X.diagonal())
neighbors_model = NearestNeighbors(radius=self.eps, algorithm=self.algorithm, leaf_size=self.leaf_size, metric=self.metric, metric_params=self.metric_params, p=self.p, n_jobs=self.n_jobs)
neighbors_model.fit(X)
neighborhoods = neighbors_model.radius_neighbors(X, return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors]) for neighbors in neighborhoods])
labels = np.full(X.shape[0], -1, dtype=np.intp)
core_samples = np.asarray(n_neighbors >= self.min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
self.core_sample_indices_ = np.where(core_samples)[0]
self.labels_ = labels
if len(self.core_sample_indices_):
self.components_ = X[self.core_sample_indices_].copy()
else:
self.components_ = np.empty((0, X.shape[1]))
return self
</DeepExtract>
return self.labels_
|
def load_sample_image(image_name):
"""Load the numpy array of a single sample image.
Read more in the :ref:`User Guide <sample_images>`.
Parameters
----------
image_name : {`china.jpg`, `flower.jpg`}
The name of the sample image loaded.
Returns
-------
img : 3D array
The image as a numpy array: height x width x color.
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
try:
from PIL import Image
except ImportError:
raise ImportError('The Python Imaging Library (PIL) is required to load data from jpeg files. Please refer to https://pillow.readthedocs.io/en/stable/installation.html for installing PIL.')
descr = load_descr('README.txt', descr_module=IMAGES_MODULE)
(filenames, images) = ([], [])
for filename in sorted(_contents(IMAGES_MODULE)):
if filename.endswith('.jpg'):
filenames.append(filename)
with _open_binary(IMAGES_MODULE, filename) as image_file:
pil_image = Image.open(image_file)
image = np.asarray(pil_image)
images.append(image)
images = Bunch(images=images, filenames=filenames, DESCR=descr)
index = None
for (i, filename) in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError('Cannot find sample image: %s' % image_name)
return images.images[index]
|
def load_sample_image(image_name):
"""Load the numpy array of a single sample image.
Read more in the :ref:`User Guide <sample_images>`.
Parameters
----------
image_name : {`china.jpg`, `flower.jpg`}
The name of the sample image loaded.
Returns
-------
img : 3D array
The image as a numpy array: height x width x color.
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
<DeepExtract>
try:
from PIL import Image
except ImportError:
raise ImportError('The Python Imaging Library (PIL) is required to load data from jpeg files. Please refer to https://pillow.readthedocs.io/en/stable/installation.html for installing PIL.')
descr = load_descr('README.txt', descr_module=IMAGES_MODULE)
(filenames, images) = ([], [])
for filename in sorted(_contents(IMAGES_MODULE)):
if filename.endswith('.jpg'):
filenames.append(filename)
with _open_binary(IMAGES_MODULE, filename) as image_file:
pil_image = Image.open(image_file)
image = np.asarray(pil_image)
images.append(image)
images = Bunch(images=images, filenames=filenames, DESCR=descr)
</DeepExtract>
index = None
for (i, filename) in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError('Cannot find sample image: %s' % image_name)
return images.images[index]
|
def partial_fit(self, X, y, sample_weight=None):
"""Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence and early stopping
should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data.
y : numpy array of shape (n_samples,)
Subset of target values.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : object
Returns an instance of self.
"""
if not hasattr(self, 'coef_'):
self._validate_params()
if self.early_stopping and True:
raise ValueError('early_stopping should be False with partial_fit')
if self.learning_rate in ('constant', 'invscaling', 'adaptive') and self.eta0 <= 0.0:
raise ValueError('eta0 must be > 0')
if self.learning_rate == 'optimal' and self.alpha == 0:
raise ValueError("alpha must be > 0 since learning_rate is 'optimal'. alpha is used to compute the optimal learning rate.")
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
return self._partial_fit(X, y, self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, max_iter=1, sample_weight=sample_weight, coef_init=None, intercept_init=None)
|
def partial_fit(self, X, y, sample_weight=None):
"""Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence and early stopping
should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data.
y : numpy array of shape (n_samples,)
Subset of target values.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : object
Returns an instance of self.
"""
if not hasattr(self, 'coef_'):
self._validate_params()
<DeepExtract>
if self.early_stopping and True:
raise ValueError('early_stopping should be False with partial_fit')
if self.learning_rate in ('constant', 'invscaling', 'adaptive') and self.eta0 <= 0.0:
raise ValueError('eta0 must be > 0')
if self.learning_rate == 'optimal' and self.alpha == 0:
raise ValueError("alpha must be > 0 since learning_rate is 'optimal'. alpha is used to compute the optimal learning rate.")
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
</DeepExtract>
return self._partial_fit(X, y, self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, max_iter=1, sample_weight=sample_weight, coef_init=None, intercept_init=None)
|
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
if False:
print('... computing %s norm ...' % 2)
if 2 == 2:
v0 = _init_arpack_v0(min(X.shape), 0)
value = sp.sparse.linalg.svds(X, k=1, return_singular_vectors=False, v0=v0)
elif sp.sparse.issparse(X):
value = sp.sparse.linalg.norm(X, ord=2)
else:
value = sp.linalg.norm(X, ord=2)
X_spectral_norm = value
all_frobenius = defaultdict(list)
if False:
print('... computing %s norm ...' % 'fro')
if 'fro' == 2:
v0 = _init_arpack_v0(min(X.shape), random_state)
value = sp.sparse.linalg.svds(X, k=1, return_singular_vectors=False, v0=v0)
elif sp.sparse.issparse(X):
value = sp.sparse.linalg.norm(X, ord='fro')
else:
value = sp.linalg.norm(X, ord='fro')
X_fro_norm = value
for pi in power_iter:
for pm in ['none', 'LU', 'QR']:
print('n_iter = %d on sklearn - %s' % (pi, pm))
print('... running SVD ...')
if method != 'fbpca':
gc.collect()
t0 = time()
(U, mu, V) = randomized_svd(X, n_comps, n_oversamples=n_oversamples, n_iter=pi, power_iteration_normalizer=pm, random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
(U, mu, V) = fbpca.pca(X, n_comps, raw=True, n_iter=pi, l=n_oversamples + n_comps)
call_time = time() - t0
(U, s, V, time) = (U, mu, V, call_time)
label = 'sklearn - %s' % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm)
if not sp.sparse.issparse(X) or X.shape[0] * X.shape[1] * X.dtype.itemsize < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
f = norm_diff(A, norm='fro')
print('... computing fro norm by batches...')
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = 0.0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
f = np.sqrt(cum_norm)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print('n_iter = %d on fbca' % pi)
print('... running SVD ...')
if 'fbpca' != 'fbpca':
gc.collect()
t0 = time()
(U, mu, V) = randomized_svd(X, n_comps, n_oversamples=n_oversamples, n_iter=pi, power_iteration_normalizer=pm, random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
(U, mu, V) = fbpca.pca(X, n_comps, raw=True, n_iter=pi, l=n_oversamples + n_comps)
call_time = time() - t0
(U, s, V, time) = (U, mu, V, call_time)
label = 'fbpca'
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm)
if not sp.sparse.issparse(X) or X.shape[0] * X.shape[1] * X.dtype.itemsize < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
f = norm_diff(A, norm='fro')
print('... computing fro norm by batches...')
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = 0.0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
f = np.sqrt(cum_norm)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = '%s: spectral norm diff vs running time' % dataset_name
plt.figure()
colors = ['g', 'b', 'y']
for (i, l) in enumerate(sorted(all_spectral.keys())):
if l != 'fbpca':
plt.plot(all_time[l], all_spectral[l], label=l, marker='o', c=colors.pop())
else:
plt.plot(all_time[l], all_spectral[l], label=l, marker='^', c='red')
for (label, x, y) in zip(power_iter, list(all_time[l]), list(all_spectral[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -20), textcoords='offset points', ha='right', va='bottom')
plt.legend(loc='upper right')
plt.suptitle(title)
plt.ylabel('norm discrepancy')
plt.xlabel('running time [s]')
title = '%s: Frobenius norm diff vs running time' % dataset_name
plt.figure()
colors = ['g', 'b', 'y']
for (i, l) in enumerate(sorted(all_frobenius.keys())):
if l != 'fbpca':
plt.plot(all_time[l], all_frobenius[l], label=l, marker='o', c=colors.pop())
else:
plt.plot(all_time[l], all_frobenius[l], label=l, marker='^', c='red')
for (label, x, y) in zip(power_iter, list(all_time[l]), list(all_frobenius[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -20), textcoords='offset points', ha='right', va='bottom')
plt.legend(loc='upper right')
plt.suptitle(title)
plt.ylabel('norm discrepancy')
plt.xlabel('running time [s]')
</DeepExtract>
|
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
<DeepExtract>
if False:
print('... computing %s norm ...' % 2)
if 2 == 2:
v0 = _init_arpack_v0(min(X.shape), 0)
value = sp.sparse.linalg.svds(X, k=1, return_singular_vectors=False, v0=v0)
elif sp.sparse.issparse(X):
value = sp.sparse.linalg.norm(X, ord=2)
else:
value = sp.linalg.norm(X, ord=2)
X_spectral_norm = value
</DeepExtract>
all_frobenius = defaultdict(list)
<DeepExtract>
if False:
print('... computing %s norm ...' % 'fro')
if 'fro' == 2:
v0 = _init_arpack_v0(min(X.shape), random_state)
value = sp.sparse.linalg.svds(X, k=1, return_singular_vectors=False, v0=v0)
elif sp.sparse.issparse(X):
value = sp.sparse.linalg.norm(X, ord='fro')
else:
value = sp.linalg.norm(X, ord='fro')
X_fro_norm = value
</DeepExtract>
for pi in power_iter:
for pm in ['none', 'LU', 'QR']:
print('n_iter = %d on sklearn - %s' % (pi, pm))
<DeepExtract>
print('... running SVD ...')
if method != 'fbpca':
gc.collect()
t0 = time()
(U, mu, V) = randomized_svd(X, n_comps, n_oversamples=n_oversamples, n_iter=pi, power_iteration_normalizer=pm, random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
(U, mu, V) = fbpca.pca(X, n_comps, raw=True, n_iter=pi, l=n_oversamples + n_comps)
call_time = time() - t0
(U, s, V, time) = (U, mu, V, call_time)
</DeepExtract>
label = 'sklearn - %s' % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm)
<DeepExtract>
if not sp.sparse.issparse(X) or X.shape[0] * X.shape[1] * X.dtype.itemsize < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
f = norm_diff(A, norm='fro')
print('... computing fro norm by batches...')
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = 0.0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
f = np.sqrt(cum_norm)
</DeepExtract>
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print('n_iter = %d on fbca' % pi)
<DeepExtract>
print('... running SVD ...')
if 'fbpca' != 'fbpca':
gc.collect()
t0 = time()
(U, mu, V) = randomized_svd(X, n_comps, n_oversamples=n_oversamples, n_iter=pi, power_iteration_normalizer=pm, random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
(U, mu, V) = fbpca.pca(X, n_comps, raw=True, n_iter=pi, l=n_oversamples + n_comps)
call_time = time() - t0
(U, s, V, time) = (U, mu, V, call_time)
</DeepExtract>
label = 'fbpca'
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm)
<DeepExtract>
if not sp.sparse.issparse(X) or X.shape[0] * X.shape[1] * X.dtype.itemsize < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
f = norm_diff(A, norm='fro')
print('... computing fro norm by batches...')
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = 0.0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
f = np.sqrt(cum_norm)
</DeepExtract>
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = '%s: spectral norm diff vs running time' % dataset_name
<DeepExtract>
plt.figure()
colors = ['g', 'b', 'y']
for (i, l) in enumerate(sorted(all_spectral.keys())):
if l != 'fbpca':
plt.plot(all_time[l], all_spectral[l], label=l, marker='o', c=colors.pop())
else:
plt.plot(all_time[l], all_spectral[l], label=l, marker='^', c='red')
for (label, x, y) in zip(power_iter, list(all_time[l]), list(all_spectral[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -20), textcoords='offset points', ha='right', va='bottom')
plt.legend(loc='upper right')
plt.suptitle(title)
plt.ylabel('norm discrepancy')
plt.xlabel('running time [s]')
</DeepExtract>
title = '%s: Frobenius norm diff vs running time' % dataset_name
<DeepExtract>
plt.figure()
colors = ['g', 'b', 'y']
for (i, l) in enumerate(sorted(all_frobenius.keys())):
if l != 'fbpca':
plt.plot(all_time[l], all_frobenius[l], label=l, marker='o', c=colors.pop())
else:
plt.plot(all_time[l], all_frobenius[l], label=l, marker='^', c='red')
for (label, x, y) in zip(power_iter, list(all_time[l]), list(all_frobenius[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -20), textcoords='offset points', ha='right', va='bottom')
plt.legend(loc='upper right')
plt.suptitle(title)
plt.ylabel('norm discrepancy')
plt.xlabel('running time [s]')
</DeepExtract>
|
def _binary_roc_auc_score(y_true, y_score, sample_weight=None, max_fpr=None):
"""Binary roc auc score."""
if len(np.unique(y_true)) != 2:
raise ValueError('Only one class present in y_true. ROC AUC score is not defined in that case.')
(fps, tps, thresholds) = _binary_clf_curve(y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(np.r_[True, np.logical_or(np.diff(fps, 2), np.diff(tps, 2)), True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn('No negative samples in y_true, false positive value should be meaningless', UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn('No positive samples in y_true, true positive value should be meaningless', UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
(fpr, tpr, _) = (fpr, tpr, thresholds)
if max_fpr is None or max_fpr == 1:
return auc(fpr, tpr)
if max_fpr <= 0 or max_fpr > 1:
raise ValueError('Expected max_fpr in range (0, 1], got: %r' % max_fpr)
stop = np.searchsorted(fpr, max_fpr, 'right')
x_interp = [fpr[stop - 1], fpr[stop]]
y_interp = [tpr[stop - 1], tpr[stop]]
tpr = np.append(tpr[:stop], np.interp(max_fpr, x_interp, y_interp))
fpr = np.append(fpr[:stop], max_fpr)
check_consistent_length(fpr, tpr)
fpr = column_or_1d(fpr)
tpr = column_or_1d(tpr)
if fpr.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute area under curve, but x.shape = %s' % fpr.shape)
direction = 1
dx = np.diff(fpr)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError('x is neither increasing nor decreasing : {}.'.format(fpr))
area = direction * np.trapz(tpr, fpr)
if isinstance(area, np.memmap):
area = area.dtype.type(area)
partial_auc = area
min_area = 0.5 * max_fpr ** 2
max_area = max_fpr
return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))
|
def _binary_roc_auc_score(y_true, y_score, sample_weight=None, max_fpr=None):
"""Binary roc auc score."""
if len(np.unique(y_true)) != 2:
raise ValueError('Only one class present in y_true. ROC AUC score is not defined in that case.')
<DeepExtract>
(fps, tps, thresholds) = _binary_clf_curve(y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(np.r_[True, np.logical_or(np.diff(fps, 2), np.diff(tps, 2)), True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn('No negative samples in y_true, false positive value should be meaningless', UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn('No positive samples in y_true, true positive value should be meaningless', UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
(fpr, tpr, _) = (fpr, tpr, thresholds)
</DeepExtract>
if max_fpr is None or max_fpr == 1:
return auc(fpr, tpr)
if max_fpr <= 0 or max_fpr > 1:
raise ValueError('Expected max_fpr in range (0, 1], got: %r' % max_fpr)
stop = np.searchsorted(fpr, max_fpr, 'right')
x_interp = [fpr[stop - 1], fpr[stop]]
y_interp = [tpr[stop - 1], tpr[stop]]
tpr = np.append(tpr[:stop], np.interp(max_fpr, x_interp, y_interp))
fpr = np.append(fpr[:stop], max_fpr)
<DeepExtract>
check_consistent_length(fpr, tpr)
fpr = column_or_1d(fpr)
tpr = column_or_1d(tpr)
if fpr.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute area under curve, but x.shape = %s' % fpr.shape)
direction = 1
dx = np.diff(fpr)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError('x is neither increasing nor decreasing : {}.'.format(fpr))
area = direction * np.trapz(tpr, fpr)
if isinstance(area, np.memmap):
area = area.dtype.type(area)
partial_auc = area
</DeepExtract>
min_area = 0.5 * max_fpr ** 2
max_area = max_fpr
return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))
|
def graphical_lasso(emp_cov, alpha, *, cov_init=None, mode='cd', tol=0.0001, enet_tol=0.0001, max_iter=100, verbose=False, return_costs=False, eps=np.finfo(np.float64).eps, return_n_iter=False):
"""L1-penalized covariance estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
.. versionchanged:: v0.20
graph_lasso has been renamed to graphical_lasso
Parameters
----------
emp_cov : ndarray of shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
Range is (0, inf].
cov_init : array of shape (n_features, n_features), default=None
The initial guess for the covariance. If None, then the empirical
covariance is used.
mode : {'cd', 'lars'}, default='cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : float, default=1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped. Range is (0, inf].
enet_tol : float, default=1e-4
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'. Range is (0, inf].
max_iter : int, default=100
The maximum number of iterations.
verbose : bool, default=False
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : bool, default=False
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, default=eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Default is `np.finfo(np.float64).eps`.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
covariance : ndarray of shape (n_features, n_features)
The estimated covariance matrix.
precision : ndarray of shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
GraphicalLassoCV : Sparse inverse covariance with
cross-validated choice of the l1 penalty.
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
(_, n_features) = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = -2.0 * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return (emp_cov, precision_, (cost, d_gap), 0)
else:
return (emp_cov, precision_, (cost, d_gap))
elif return_n_iter:
return (emp_cov, linalg.inv(emp_cov), 0)
else:
return (emp_cov, linalg.inv(emp_cov))
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = linalg.pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
d_gap = np.inf
sub_covariance = np.copy(covariance_[1:, 1:], order='C')
for i in range(max_iter):
for idx in range(n_features):
if idx > 0:
di = idx - 1
sub_covariance[di] = covariance_[di][indices != idx]
sub_covariance[:, di] = covariance_[:, di][indices != idx]
else:
sub_covariance[:] = covariance_[1:, 1:]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
coefs = -(precision_[indices != idx, idx] / (precision_[idx, idx] + 1000 * eps))
(coefs, _, _, _) = cd_fast.enet_coordinate_descent_gram(coefs, alpha, 0, sub_covariance, row, row, max_iter, enet_tol, check_random_state(None), False)
else:
(_, _, coefs) = lars_path_gram(Xy=row, Gram=sub_covariance, n_samples=row.size, alpha_min=alpha / (n_features - 1), copy_Gram=True, eps=eps, method='lars', return_path=False)
precision_[idx, idx] = 1.0 / (covariance_[idx, idx] - np.dot(covariance_[indices != idx, idx], coefs))
precision_[indices != idx, idx] = -precision_[idx, idx] * coefs
precision_[idx, indices != idx] = -precision_[idx, idx] * coefs
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
if not np.isfinite(precision_.sum()):
raise FloatingPointError('The system is too ill-conditioned for this solver')
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum())
d_gap = gap
p = precision_.shape[0]
cost = -2.0 * log_likelihood(emp_cov, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum())
cost = cost
if verbose:
print('[graphical_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e' % (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is too ill-conditioned for this solver')
else:
warnings.warn('graphical_lasso: did not converge after %i iteration: dual gap: %.3e' % (max_iter, d_gap), ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0] + '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return (covariance_, precision_, costs, i + 1)
else:
return (covariance_, precision_, costs)
elif return_n_iter:
return (covariance_, precision_, i + 1)
else:
return (covariance_, precision_)
|
def graphical_lasso(emp_cov, alpha, *, cov_init=None, mode='cd', tol=0.0001, enet_tol=0.0001, max_iter=100, verbose=False, return_costs=False, eps=np.finfo(np.float64).eps, return_n_iter=False):
"""L1-penalized covariance estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
.. versionchanged:: v0.20
graph_lasso has been renamed to graphical_lasso
Parameters
----------
emp_cov : ndarray of shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
Range is (0, inf].
cov_init : array of shape (n_features, n_features), default=None
The initial guess for the covariance. If None, then the empirical
covariance is used.
mode : {'cd', 'lars'}, default='cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : float, default=1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped. Range is (0, inf].
enet_tol : float, default=1e-4
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'. Range is (0, inf].
max_iter : int, default=100
The maximum number of iterations.
verbose : bool, default=False
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : bool, default=False
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, default=eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Default is `np.finfo(np.float64).eps`.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
covariance : ndarray of shape (n_features, n_features)
The estimated covariance matrix.
precision : ndarray of shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
GraphicalLassoCV : Sparse inverse covariance with
cross-validated choice of the l1 penalty.
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
(_, n_features) = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = -2.0 * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return (emp_cov, precision_, (cost, d_gap), 0)
else:
return (emp_cov, precision_, (cost, d_gap))
elif return_n_iter:
return (emp_cov, linalg.inv(emp_cov), 0)
else:
return (emp_cov, linalg.inv(emp_cov))
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = linalg.pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
d_gap = np.inf
sub_covariance = np.copy(covariance_[1:, 1:], order='C')
for i in range(max_iter):
for idx in range(n_features):
if idx > 0:
di = idx - 1
sub_covariance[di] = covariance_[di][indices != idx]
sub_covariance[:, di] = covariance_[:, di][indices != idx]
else:
sub_covariance[:] = covariance_[1:, 1:]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
coefs = -(precision_[indices != idx, idx] / (precision_[idx, idx] + 1000 * eps))
(coefs, _, _, _) = cd_fast.enet_coordinate_descent_gram(coefs, alpha, 0, sub_covariance, row, row, max_iter, enet_tol, check_random_state(None), False)
else:
(_, _, coefs) = lars_path_gram(Xy=row, Gram=sub_covariance, n_samples=row.size, alpha_min=alpha / (n_features - 1), copy_Gram=True, eps=eps, method='lars', return_path=False)
precision_[idx, idx] = 1.0 / (covariance_[idx, idx] - np.dot(covariance_[indices != idx, idx], coefs))
precision_[indices != idx, idx] = -precision_[idx, idx] * coefs
precision_[idx, indices != idx] = -precision_[idx, idx] * coefs
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
if not np.isfinite(precision_.sum()):
raise FloatingPointError('The system is too ill-conditioned for this solver')
<DeepExtract>
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum())
d_gap = gap
</DeepExtract>
<DeepExtract>
p = precision_.shape[0]
cost = -2.0 * log_likelihood(emp_cov, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum())
cost = cost
</DeepExtract>
if verbose:
print('[graphical_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e' % (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is too ill-conditioned for this solver')
else:
warnings.warn('graphical_lasso: did not converge after %i iteration: dual gap: %.3e' % (max_iter, d_gap), ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0] + '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return (covariance_, precision_, costs, i + 1)
else:
return (covariance_, precision_, costs)
elif return_n_iter:
return (covariance_, precision_, i + 1)
else:
return (covariance_, precision_)
|
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
y : array-like of shape (n_samples,)
Target values (strings or integers in classification, real numbers
in regression)
For classification, labels must correspond to classes.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, default=None
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
if not self.warm_start:
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'oob_scores_'):
del self.oob_scores_
if hasattr(self, 'oob_score_'):
del self.oob_score_
if hasattr(self, 'init_'):
del self.init_
if hasattr(self, '_rng'):
del self._rng
(X, y) = self._validate_data(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE, multi_output=True)
sample_weight_is_none = sample_weight is None
sample_weight = _check_sample_weight(sample_weight, X)
y = column_or_1d(y, warn=True)
if is_classifier(self):
else:
if self.loss == 'log_loss':
loss_class = _gb_losses.MultinomialDeviance if len(self.classes_) > 2 else _gb_losses.BinomialDeviance
else:
loss_class = _gb_losses.LOSS_FUNCTIONS[self.loss]
if is_classifier(self):
self._loss = loss_class(self.n_classes_)
elif self.loss in ('huber', 'quantile'):
self._loss = loss_class(self.alpha)
else:
self._loss = loss_class()
if isinstance(self.max_features, str):
if self.max_features == 'auto':
if is_classifier(self):
max_features = max(1, int(np.sqrt(self.n_features_in_)))
else:
max_features = self.n_features_in_
elif self.max_features == 'sqrt':
max_features = max(1, int(np.sqrt(self.n_features_in_)))
else:
max_features = max(1, int(np.log2(self.n_features_in_)))
elif self.max_features is None:
max_features = self.n_features_in_
elif isinstance(self.max_features, Integral):
max_features = self.max_features
else:
max_features = max(1, int(self.max_features * self.n_features_in_))
self.max_features_ = max_features
if self.n_iter_no_change is not None:
stratify = y if is_classifier(self) else None
(X, X_val, y, y_val, sample_weight, sample_weight_val) = train_test_split(X, y, sample_weight, random_state=self.random_state, test_size=self.validation_fraction, stratify=stratify)
if is_classifier(self):
if self._n_classes != np.unique(y).shape[0]:
raise ValueError('The training data after the early stopping split is missing some classes. Try using another random seed.')
else:
X_val = y_val = sample_weight_val = None
if not self._is_initialized():
self.init_ = self.init
if self.init_ is None:
self.init_ = self._loss.init_estimator()
self.estimators_ = np.empty((self.n_estimators, self._loss.K), dtype=object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros(self.n_estimators, dtype=np.float64)
self.oob_scores_ = np.zeros(self.n_estimators, dtype=np.float64)
self.oob_score_ = np.nan
if self.init_ == 'zero':
raw_predictions = np.zeros(shape=(X.shape[0], self._loss.K), dtype=np.float64)
else:
if sample_weight_is_none:
self.init_.fit(X, y)
else:
msg = 'The initial estimator {} does not support sample weights.'.format(self.init_.__class__.__name__)
try:
self.init_.fit(X, y, sample_weight=sample_weight)
except TypeError as e:
if "unexpected keyword argument 'sample_weight'" in str(e):
raise ValueError(msg) from e
else:
raise
except ValueError as e:
if 'pass parameters to specific steps of your pipeline using the stepname__parameter' in str(e):
raise ValueError(msg) from e
else:
raise
raw_predictions = self._loss.get_init_raw_predictions(X, self.init_)
begin_at_stage = 0
self._rng = check_random_state(self.random_state)
else:
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to estimators_.shape[0]=%d when warm_start==True' % (self.n_estimators, self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
X = check_array(X, dtype=DTYPE, order='C', accept_sparse='csr', force_all_finite=False)
raw_predictions = self._raw_predict_init(X)
predict_stages(self.estimators_, X, self.learning_rate, raw_predictions)
raw_predictions = raw_predictions
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' % (total_n_estimators, self.estimators_[0]))
self.estimators_ = np.resize(self.estimators_, (total_n_estimators, self._loss.K))
self.train_score_ = np.resize(self.train_score_, total_n_estimators)
if self.subsample < 1 or hasattr(self, 'oob_improvement_'):
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = np.resize(self.oob_improvement_, total_n_estimators)
self.oob_scores_ = np.resize(self.oob_scores_, total_n_estimators)
self.oob_score_ = np.nan
else:
self.oob_improvement_ = np.zeros((total_n_estimators,), dtype=np.float64)
self.oob_scores_ = np.zeros((total_n_estimators,), dtype=np.float64)
self.oob_score_ = np.nan
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples,), dtype=bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self._loss
if self.verbose:
verbose_reporter = VerboseReporter(verbose=self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
if self.n_iter_no_change is not None:
loss_history = np.full(self.n_iter_no_change, np.inf)
y_val_pred_iter = self._staged_raw_predict(X_val, check_input=False)
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag, self._rng)
if i == 0:
initial_loss = loss_(y[~sample_mask], raw_predictions[~sample_mask], sample_weight[~sample_mask])
raw_predictions = self._fit_stage(i, X, y, raw_predictions, sample_weight, sample_mask, self._rng, X_csc, X_csr)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask], raw_predictions[sample_mask], sample_weight[sample_mask])
self.oob_scores_[i] = loss_(y[~sample_mask], raw_predictions[~sample_mask], sample_weight[~sample_mask])
previous_loss = initial_loss if i == 0 else self.oob_scores_[i - 1]
self.oob_improvement_[i] = previous_loss - self.oob_scores_[i]
self.oob_score_ = self.oob_scores_[-1]
else:
self.train_score_[i] = loss_(y, raw_predictions, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
if self.n_iter_no_change is not None:
validation_loss = loss_(y_val, next(y_val_pred_iter), sample_weight_val)
if np.any(validation_loss + self.tol < loss_history):
loss_history[i % len(loss_history)] = validation_loss
else:
break
n_stages = i + 1
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
self.oob_scores_ = self.oob_scores_[:n_stages]
self.oob_score_ = self.oob_scores_[-1]
self.n_estimators_ = n_stages
return self
|
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
y : array-like of shape (n_samples,)
Target values (strings or integers in classification, real numbers
in regression)
For classification, labels must correspond to classes.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, default=None
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
if not self.warm_start:
<DeepExtract>
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'oob_scores_'):
del self.oob_scores_
if hasattr(self, 'oob_score_'):
del self.oob_score_
if hasattr(self, 'init_'):
del self.init_
if hasattr(self, '_rng'):
del self._rng
</DeepExtract>
(X, y) = self._validate_data(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE, multi_output=True)
sample_weight_is_none = sample_weight is None
sample_weight = _check_sample_weight(sample_weight, X)
y = column_or_1d(y, warn=True)
if is_classifier(self):
<DeepExtract>
</DeepExtract>
else:
<DeepExtract>
</DeepExtract>
<DeepExtract>
if self.loss == 'log_loss':
loss_class = _gb_losses.MultinomialDeviance if len(self.classes_) > 2 else _gb_losses.BinomialDeviance
else:
loss_class = _gb_losses.LOSS_FUNCTIONS[self.loss]
if is_classifier(self):
self._loss = loss_class(self.n_classes_)
elif self.loss in ('huber', 'quantile'):
self._loss = loss_class(self.alpha)
else:
self._loss = loss_class()
if isinstance(self.max_features, str):
if self.max_features == 'auto':
if is_classifier(self):
max_features = max(1, int(np.sqrt(self.n_features_in_)))
else:
max_features = self.n_features_in_
elif self.max_features == 'sqrt':
max_features = max(1, int(np.sqrt(self.n_features_in_)))
else:
max_features = max(1, int(np.log2(self.n_features_in_)))
elif self.max_features is None:
max_features = self.n_features_in_
elif isinstance(self.max_features, Integral):
max_features = self.max_features
else:
max_features = max(1, int(self.max_features * self.n_features_in_))
self.max_features_ = max_features
</DeepExtract>
if self.n_iter_no_change is not None:
stratify = y if is_classifier(self) else None
(X, X_val, y, y_val, sample_weight, sample_weight_val) = train_test_split(X, y, sample_weight, random_state=self.random_state, test_size=self.validation_fraction, stratify=stratify)
if is_classifier(self):
if self._n_classes != np.unique(y).shape[0]:
raise ValueError('The training data after the early stopping split is missing some classes. Try using another random seed.')
else:
X_val = y_val = sample_weight_val = None
if not self._is_initialized():
<DeepExtract>
self.init_ = self.init
if self.init_ is None:
self.init_ = self._loss.init_estimator()
self.estimators_ = np.empty((self.n_estimators, self._loss.K), dtype=object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros(self.n_estimators, dtype=np.float64)
self.oob_scores_ = np.zeros(self.n_estimators, dtype=np.float64)
self.oob_score_ = np.nan
</DeepExtract>
if self.init_ == 'zero':
raw_predictions = np.zeros(shape=(X.shape[0], self._loss.K), dtype=np.float64)
else:
if sample_weight_is_none:
self.init_.fit(X, y)
else:
msg = 'The initial estimator {} does not support sample weights.'.format(self.init_.__class__.__name__)
try:
self.init_.fit(X, y, sample_weight=sample_weight)
except TypeError as e:
if "unexpected keyword argument 'sample_weight'" in str(e):
raise ValueError(msg) from e
else:
raise
except ValueError as e:
if 'pass parameters to specific steps of your pipeline using the stepname__parameter' in str(e):
raise ValueError(msg) from e
else:
raise
raw_predictions = self._loss.get_init_raw_predictions(X, self.init_)
begin_at_stage = 0
self._rng = check_random_state(self.random_state)
else:
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to estimators_.shape[0]=%d when warm_start==True' % (self.n_estimators, self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
X = check_array(X, dtype=DTYPE, order='C', accept_sparse='csr', force_all_finite=False)
<DeepExtract>
raw_predictions = self._raw_predict_init(X)
predict_stages(self.estimators_, X, self.learning_rate, raw_predictions)
raw_predictions = raw_predictions
</DeepExtract>
<DeepExtract>
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' % (total_n_estimators, self.estimators_[0]))
self.estimators_ = np.resize(self.estimators_, (total_n_estimators, self._loss.K))
self.train_score_ = np.resize(self.train_score_, total_n_estimators)
if self.subsample < 1 or hasattr(self, 'oob_improvement_'):
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = np.resize(self.oob_improvement_, total_n_estimators)
self.oob_scores_ = np.resize(self.oob_scores_, total_n_estimators)
self.oob_score_ = np.nan
else:
self.oob_improvement_ = np.zeros((total_n_estimators,), dtype=np.float64)
self.oob_scores_ = np.zeros((total_n_estimators,), dtype=np.float64)
self.oob_score_ = np.nan
</DeepExtract>
<DeepExtract>
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples,), dtype=bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self._loss
if self.verbose:
verbose_reporter = VerboseReporter(verbose=self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
if self.n_iter_no_change is not None:
loss_history = np.full(self.n_iter_no_change, np.inf)
y_val_pred_iter = self._staged_raw_predict(X_val, check_input=False)
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag, self._rng)
if i == 0:
initial_loss = loss_(y[~sample_mask], raw_predictions[~sample_mask], sample_weight[~sample_mask])
raw_predictions = self._fit_stage(i, X, y, raw_predictions, sample_weight, sample_mask, self._rng, X_csc, X_csr)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask], raw_predictions[sample_mask], sample_weight[sample_mask])
self.oob_scores_[i] = loss_(y[~sample_mask], raw_predictions[~sample_mask], sample_weight[~sample_mask])
previous_loss = initial_loss if i == 0 else self.oob_scores_[i - 1]
self.oob_improvement_[i] = previous_loss - self.oob_scores_[i]
self.oob_score_ = self.oob_scores_[-1]
else:
self.train_score_[i] = loss_(y, raw_predictions, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
if self.n_iter_no_change is not None:
validation_loss = loss_(y_val, next(y_val_pred_iter), sample_weight_val)
if np.any(validation_loss + self.tol < loss_history):
loss_history[i % len(loss_history)] = validation_loss
else:
break
n_stages = i + 1
</DeepExtract>
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
self.oob_scores_ = self.oob_scores_[:n_stages]
self.oob_score_ = self.oob_scores_[-1]
self.n_estimators_ = n_stages
return self
|
def _pprint_estimator(self, object, stream, indent, allowance, context, level):
stream.write(object.__class__.__name__ + '(')
if self._indent_at_name:
indent += len(object.__class__.__name__)
if self._changed_only:
params = object.get_params(deep=False)
init_func = getattr(object.__init__, 'deprecated_original', object.__init__)
init_params = inspect.signature(init_func).parameters
init_params = {name: param.default for (name, param) in init_params.items()}
def has_changed(k, v):
if k not in init_params:
params = True
if init_params[k] == inspect._empty:
params = True
if isinstance(v, BaseEstimator) and v.__class__ != init_params[k].__class__:
params = True
if repr(v) != repr(init_params[k]) and (not (is_scalar_nan(init_params[k]) and is_scalar_nan(v))):
params = True
params = False
params = {k: v for (k, v) in params.items() if has_changed(k, v)}
else:
params = object.get_params(deep=False)
params = OrderedDict(((name, val) for (name, val) in sorted(params.items())))
return self._format_params_or_dict_items(params.items(), stream, indent, allowance + 1, context, level, is_dict=False)
stream.write(')')
|
def _pprint_estimator(self, object, stream, indent, allowance, context, level):
stream.write(object.__class__.__name__ + '(')
if self._indent_at_name:
indent += len(object.__class__.__name__)
if self._changed_only:
<DeepExtract>
params = object.get_params(deep=False)
init_func = getattr(object.__init__, 'deprecated_original', object.__init__)
init_params = inspect.signature(init_func).parameters
init_params = {name: param.default for (name, param) in init_params.items()}
def has_changed(k, v):
if k not in init_params:
params = True
if init_params[k] == inspect._empty:
params = True
if isinstance(v, BaseEstimator) and v.__class__ != init_params[k].__class__:
params = True
if repr(v) != repr(init_params[k]) and (not (is_scalar_nan(init_params[k]) and is_scalar_nan(v))):
params = True
params = False
params = {k: v for (k, v) in params.items() if has_changed(k, v)}
</DeepExtract>
else:
params = object.get_params(deep=False)
params = OrderedDict(((name, val) for (name, val) in sorted(params.items())))
<DeepExtract>
return self._format_params_or_dict_items(params.items(), stream, indent, allowance + 1, context, level, is_dict=False)
</DeepExtract>
stream.write(')')
|
def fowlkes_mallows_score(labels_true, labels_pred, *, sparse=False):
"""Measure the similarity of two clusterings of a set of points.
.. versionadded:: 0.18
The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pair of
points that belongs in the same clusters in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pair of points that belongs in the same clusters in
``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of
**False Negative** (i.e the number of pair of points that belongs in the
same clusters in ``labels_pred`` and not in ``labels_True``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : int array, shape = (``n_samples``,)
A clustering of the data into disjoint subsets.
labels_pred : array, shape = (``n_samples``, )
A clustering of the data into disjoint subsets.
sparse : bool, default=False
Compute contingency matrix internally with sparse matrix.
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<https://www.tandfonline.com/doi/abs/10.1080/01621459.1983.10478008>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true = check_array(labels_true, ensure_2d=False, ensure_min_samples=0, dtype=None)
labels_pred = check_array(labels_pred, ensure_2d=False, ensure_min_samples=0, dtype=None)
type_label = type_of_target(labels_true)
type_pred = type_of_target(labels_pred)
if 'continuous' in (type_pred, type_label):
msg = f'Clustering metrics expects discrete values but received {type_label} values for label, and {type_pred} values for target'
warnings.warn(msg, UserWarning)
if labels_true.ndim != 1:
raise ValueError('labels_true must be 1D: shape is %r' % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError('labels_pred must be 1D: shape is %r' % (labels_pred.shape,))
check_consistent_length(labels_true, labels_pred)
(labels_true, labels_pred) = (labels_true, labels_pred)
(n_samples,) = labels_true.shape
if eps is not None and True:
raise ValueError("Cannot set 'eps' when sparse=True")
(classes, class_idx) = np.unique(labels_true, return_inverse=True)
(clusters, cluster_idx) = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
contingency = sp.coo_matrix((np.ones(class_idx.shape[0]), (class_idx, cluster_idx)), shape=(n_classes, n_clusters), dtype=dtype)
if True:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
contingency = contingency + eps
c = contingency
c = c.astype(np.int64, copy=False)
tk = np.dot(c.data, c.data) - n_samples
pk = np.sum(np.asarray(c.sum(axis=0)).ravel() ** 2) - n_samples
qk = np.sum(np.asarray(c.sum(axis=1)).ravel() ** 2) - n_samples
return np.sqrt(tk / pk) * np.sqrt(tk / qk) if tk != 0.0 else 0.0
|
def fowlkes_mallows_score(labels_true, labels_pred, *, sparse=False):
"""Measure the similarity of two clusterings of a set of points.
.. versionadded:: 0.18
The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pair of
points that belongs in the same clusters in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pair of points that belongs in the same clusters in
``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of
**False Negative** (i.e the number of pair of points that belongs in the
same clusters in ``labels_pred`` and not in ``labels_True``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : int array, shape = (``n_samples``,)
A clustering of the data into disjoint subsets.
labels_pred : array, shape = (``n_samples``, )
A clustering of the data into disjoint subsets.
sparse : bool, default=False
Compute contingency matrix internally with sparse matrix.
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<https://www.tandfonline.com/doi/abs/10.1080/01621459.1983.10478008>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
<DeepExtract>
labels_true = check_array(labels_true, ensure_2d=False, ensure_min_samples=0, dtype=None)
labels_pred = check_array(labels_pred, ensure_2d=False, ensure_min_samples=0, dtype=None)
type_label = type_of_target(labels_true)
type_pred = type_of_target(labels_pred)
if 'continuous' in (type_pred, type_label):
msg = f'Clustering metrics expects discrete values but received {type_label} values for label, and {type_pred} values for target'
warnings.warn(msg, UserWarning)
if labels_true.ndim != 1:
raise ValueError('labels_true must be 1D: shape is %r' % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError('labels_pred must be 1D: shape is %r' % (labels_pred.shape,))
check_consistent_length(labels_true, labels_pred)
(labels_true, labels_pred) = (labels_true, labels_pred)
</DeepExtract>
(n_samples,) = labels_true.shape
<DeepExtract>
if eps is not None and True:
raise ValueError("Cannot set 'eps' when sparse=True")
(classes, class_idx) = np.unique(labels_true, return_inverse=True)
(clusters, cluster_idx) = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
contingency = sp.coo_matrix((np.ones(class_idx.shape[0]), (class_idx, cluster_idx)), shape=(n_classes, n_clusters), dtype=dtype)
if True:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
contingency = contingency + eps
c = contingency
</DeepExtract>
c = c.astype(np.int64, copy=False)
tk = np.dot(c.data, c.data) - n_samples
pk = np.sum(np.asarray(c.sum(axis=0)).ravel() ** 2) - n_samples
qk = np.sum(np.asarray(c.sum(axis=1)).ravel() ** 2) - n_samples
return np.sqrt(tk / pk) * np.sqrt(tk / qk) if tk != 0.0 else 0.0
|
def grow(self):
"""Grow the tree, from root to leaves."""
while self.splittable_nodes:
node = heappop(self.splittable_nodes)
tic = time()
(sample_indices_left, sample_indices_right, right_child_pos) = self.splitter.split_indices(node.split_info, node.sample_indices)
self.total_apply_split_time += time() - tic
depth = node.depth + 1
n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)
n_leaf_nodes += 2
left_child_node = TreeNode(depth, sample_indices_left, node.split_info.sum_gradient_left, node.split_info.sum_hessian_left, value=node.split_info.value_left)
right_child_node = TreeNode(depth, sample_indices_right, node.split_info.sum_gradient_right, node.split_info.sum_hessian_right, value=node.split_info.value_right)
node.right_child = right_child_node
node.left_child = left_child_node
left_child_node.partition_start = node.partition_start
left_child_node.partition_stop = node.partition_start + right_child_pos
right_child_node.partition_start = left_child_node.partition_stop
right_child_node.partition_stop = node.partition_stop
if self.interaction_cst is not None:
(left_child_node.allowed_features, left_child_node.interaction_cst_indices) = self._compute_interactions(node)
right_child_node.interaction_cst_indices = left_child_node.interaction_cst_indices
right_child_node.allowed_features = left_child_node.allowed_features
if not self.has_missing_values[node.split_info.feature_idx]:
node.split_info.missing_go_to_left = left_child_node.n_samples > right_child_node.n_samples
self.n_nodes += 2
self.n_categorical_splits += node.split_info.is_categorical
if self.max_leaf_nodes is not None and n_leaf_nodes == self.max_leaf_nodes:
self._finalize_leaf(left_child_node)
self._finalize_leaf(right_child_node)
self._finalize_splittable_nodes()
return (left_child_node, right_child_node)
if self.max_depth is not None and depth == self.max_depth:
self._finalize_leaf(left_child_node)
self._finalize_leaf(right_child_node)
return (left_child_node, right_child_node)
if left_child_node.n_samples < self.min_samples_leaf * 2:
self._finalize_leaf(left_child_node)
if right_child_node.n_samples < self.min_samples_leaf * 2:
self._finalize_leaf(right_child_node)
if self.with_monotonic_cst:
if self.monotonic_cst[node.split_info.feature_idx] == MonotonicConstraint.NO_CST:
lower_left = lower_right = node.children_lower_bound
upper_left = upper_right = node.children_upper_bound
else:
mid = (left_child_node.value + right_child_node.value) / 2
if self.monotonic_cst[node.split_info.feature_idx] == MonotonicConstraint.POS:
(lower_left, upper_left) = (node.children_lower_bound, mid)
(lower_right, upper_right) = (mid, node.children_upper_bound)
else:
(lower_left, upper_left) = (mid, node.children_upper_bound)
(lower_right, upper_right) = (node.children_lower_bound, mid)
left_child_node.set_children_bounds(lower_left, upper_left)
right_child_node.set_children_bounds(lower_right, upper_right)
should_split_left = not left_child_node.is_leaf
should_split_right = not right_child_node.is_leaf
if should_split_left or should_split_right:
n_samples_left = left_child_node.sample_indices.shape[0]
n_samples_right = right_child_node.sample_indices.shape[0]
if n_samples_left < n_samples_right:
smallest_child = left_child_node
largest_child = right_child_node
else:
smallest_child = right_child_node
largest_child = left_child_node
tic = time()
smallest_child.histograms = self.histogram_builder.compute_histograms_brute(smallest_child.sample_indices, smallest_child.allowed_features)
largest_child.histograms = self.histogram_builder.compute_histograms_subtraction(node.histograms, smallest_child.histograms, smallest_child.allowed_features)
self.total_compute_hist_time += time() - tic
tic = time()
if should_split_left:
self._compute_best_split_and_push(left_child_node)
if should_split_right:
self._compute_best_split_and_push(right_child_node)
self.total_find_split_time += time() - tic
for child in (left_child_node, right_child_node):
if child.is_leaf:
del child.histograms
del node.histograms
return (left_child_node, right_child_node)
for leaf in self.finalized_leaves:
leaf.value *= self.shrinkage
</DeepExtract>
|
def grow(self):
"""Grow the tree, from root to leaves."""
while self.splittable_nodes:
<DeepExtract>
node = heappop(self.splittable_nodes)
tic = time()
(sample_indices_left, sample_indices_right, right_child_pos) = self.splitter.split_indices(node.split_info, node.sample_indices)
self.total_apply_split_time += time() - tic
depth = node.depth + 1
n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)
n_leaf_nodes += 2
left_child_node = TreeNode(depth, sample_indices_left, node.split_info.sum_gradient_left, node.split_info.sum_hessian_left, value=node.split_info.value_left)
right_child_node = TreeNode(depth, sample_indices_right, node.split_info.sum_gradient_right, node.split_info.sum_hessian_right, value=node.split_info.value_right)
node.right_child = right_child_node
node.left_child = left_child_node
left_child_node.partition_start = node.partition_start
left_child_node.partition_stop = node.partition_start + right_child_pos
right_child_node.partition_start = left_child_node.partition_stop
right_child_node.partition_stop = node.partition_stop
if self.interaction_cst is not None:
(left_child_node.allowed_features, left_child_node.interaction_cst_indices) = self._compute_interactions(node)
right_child_node.interaction_cst_indices = left_child_node.interaction_cst_indices
right_child_node.allowed_features = left_child_node.allowed_features
if not self.has_missing_values[node.split_info.feature_idx]:
node.split_info.missing_go_to_left = left_child_node.n_samples > right_child_node.n_samples
self.n_nodes += 2
self.n_categorical_splits += node.split_info.is_categorical
if self.max_leaf_nodes is not None and n_leaf_nodes == self.max_leaf_nodes:
self._finalize_leaf(left_child_node)
self._finalize_leaf(right_child_node)
self._finalize_splittable_nodes()
return (left_child_node, right_child_node)
if self.max_depth is not None and depth == self.max_depth:
self._finalize_leaf(left_child_node)
self._finalize_leaf(right_child_node)
return (left_child_node, right_child_node)
if left_child_node.n_samples < self.min_samples_leaf * 2:
self._finalize_leaf(left_child_node)
if right_child_node.n_samples < self.min_samples_leaf * 2:
self._finalize_leaf(right_child_node)
if self.with_monotonic_cst:
if self.monotonic_cst[node.split_info.feature_idx] == MonotonicConstraint.NO_CST:
lower_left = lower_right = node.children_lower_bound
upper_left = upper_right = node.children_upper_bound
else:
mid = (left_child_node.value + right_child_node.value) / 2
if self.monotonic_cst[node.split_info.feature_idx] == MonotonicConstraint.POS:
(lower_left, upper_left) = (node.children_lower_bound, mid)
(lower_right, upper_right) = (mid, node.children_upper_bound)
else:
(lower_left, upper_left) = (mid, node.children_upper_bound)
(lower_right, upper_right) = (node.children_lower_bound, mid)
left_child_node.set_children_bounds(lower_left, upper_left)
right_child_node.set_children_bounds(lower_right, upper_right)
should_split_left = not left_child_node.is_leaf
should_split_right = not right_child_node.is_leaf
if should_split_left or should_split_right:
n_samples_left = left_child_node.sample_indices.shape[0]
n_samples_right = right_child_node.sample_indices.shape[0]
if n_samples_left < n_samples_right:
smallest_child = left_child_node
largest_child = right_child_node
else:
smallest_child = right_child_node
largest_child = left_child_node
tic = time()
smallest_child.histograms = self.histogram_builder.compute_histograms_brute(smallest_child.sample_indices, smallest_child.allowed_features)
largest_child.histograms = self.histogram_builder.compute_histograms_subtraction(node.histograms, smallest_child.histograms, smallest_child.allowed_features)
self.total_compute_hist_time += time() - tic
tic = time()
if should_split_left:
self._compute_best_split_and_push(left_child_node)
if should_split_right:
self._compute_best_split_and_push(right_child_node)
self.total_find_split_time += time() - tic
for child in (left_child_node, right_child_node):
if child.is_leaf:
del child.histograms
del node.histograms
return (left_child_node, right_child_node)
</DeepExtract>
<DeepExtract>
for leaf in self.finalized_leaves:
leaf.value *= self.shrinkage
</DeepExtract>
|
@pytest.mark.parametrize('precompute, inner_precompute', [(True, True), ('auto', False), (False, False)])
def test_lassoCV_does_not_set_precompute(monkeypatch, precompute, inner_precompute):
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
calls = 0
class LassoMock(Lasso):
def fit(self, X, y):
super().fit(X, y)
nonlocal calls
calls += 1
assert self.precompute == inner_precompute
monkeypatch.setattr('sklearn.linear_model._coordinate_descent.Lasso', LassoMock)
clf = LassoCV(precompute=precompute)
clf.fit(X, y)
assert calls > 0
|
@pytest.mark.parametrize('precompute, inner_precompute', [(True, True), ('auto', False), (False, False)])
def test_lassoCV_does_not_set_precompute(monkeypatch, precompute, inner_precompute):
<DeepExtract>
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
</DeepExtract>
calls = 0
class LassoMock(Lasso):
def fit(self, X, y):
super().fit(X, y)
nonlocal calls
calls += 1
assert self.precompute == inner_precompute
monkeypatch.setattr('sklearn.linear_model._coordinate_descent.Lasso', LassoMock)
clf = LassoCV(precompute=precompute)
clf.fit(X, y)
assert calls > 0
|
def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values (integers). Will be cast to X's dtype if necessary.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
(X, y) = self._validate_data(X, y, dtype=[np.float64, np.float32], y_numeric=True, ensure_min_samples=2)
(n_samples, n_features) = X.shape
coef_ = np.zeros(n_features, dtype=X.dtype)
(X, y, X_offset_, y_offset_, X_scale_) = _preprocess_data(X, y, self.fit_intercept, copy=self.copy_X)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
eps = np.finfo(np.float64).eps
alpha_ = 1.0 / (np.var(y) + eps)
lambda_ = np.ones(n_features, dtype=X.dtype)
self.scores_ = list()
coef_old_ = None
def update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_):
coef_[keep_lambda] = alpha_ * np.linalg.multi_dot([sigma_, X[:, keep_lambda].T, y])
return coef_
update_sigma = self._update_sigma if n_samples >= n_features else self._update_sigma_woodbury
for iter_ in range(self.n_iter):
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
coef_[keep_lambda] = alpha_ * np.linalg.multi_dot([sigma_, X[:, keep_lambda].T, y])
coef_ = coef_
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1.0 - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = (gamma_ + 2.0 * lambda_1) / (coef_[keep_lambda] ** 2 + 2.0 * lambda_2)
alpha_ = (n_samples - gamma_.sum() + 2.0 * alpha_1) / (rmse_ + 2.0 * alpha_2)
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) + np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print('Converged after %s iterations' % iter_)
break
coef_old_ = np.copy(coef_)
if not keep_lambda.any():
break
if keep_lambda.any():
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
coef_[keep_lambda] = alpha_ * np.linalg.multi_dot([sigma_, X[:, keep_lambda].T, y])
coef_ = coef_
else:
sigma_ = np.array([]).reshape(0, 0)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
|
def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values (integers). Will be cast to X's dtype if necessary.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
(X, y) = self._validate_data(X, y, dtype=[np.float64, np.float32], y_numeric=True, ensure_min_samples=2)
(n_samples, n_features) = X.shape
coef_ = np.zeros(n_features, dtype=X.dtype)
(X, y, X_offset_, y_offset_, X_scale_) = _preprocess_data(X, y, self.fit_intercept, copy=self.copy_X)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
eps = np.finfo(np.float64).eps
alpha_ = 1.0 / (np.var(y) + eps)
lambda_ = np.ones(n_features, dtype=X.dtype)
self.scores_ = list()
coef_old_ = None
def update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_):
coef_[keep_lambda] = alpha_ * np.linalg.multi_dot([sigma_, X[:, keep_lambda].T, y])
return coef_
update_sigma = self._update_sigma if n_samples >= n_features else self._update_sigma_woodbury
for iter_ in range(self.n_iter):
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
<DeepExtract>
coef_[keep_lambda] = alpha_ * np.linalg.multi_dot([sigma_, X[:, keep_lambda].T, y])
coef_ = coef_
</DeepExtract>
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1.0 - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = (gamma_ + 2.0 * lambda_1) / (coef_[keep_lambda] ** 2 + 2.0 * lambda_2)
alpha_ = (n_samples - gamma_.sum() + 2.0 * alpha_1) / (rmse_ + 2.0 * alpha_2)
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) + np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print('Converged after %s iterations' % iter_)
break
coef_old_ = np.copy(coef_)
if not keep_lambda.any():
break
if keep_lambda.any():
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
<DeepExtract>
coef_[keep_lambda] = alpha_ * np.linalg.multi_dot([sigma_, X[:, keep_lambda].T, y])
coef_ = coef_
</DeepExtract>
else:
sigma_ = np.array([]).reshape(0, 0)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
|
def test_cross_val_predict_with_method():
(X, y) = make_classification(n_classes=2, random_state=0)
for method in ['decision_function', 'predict_proba', 'predict_log_proba']:
check_cross_val_predict_binary(LogisticRegression(solver='liblinear'), X, y, method)
iris = load_iris()
(X, y) = (iris.data, iris.target)
(X, y) = shuffle(X, y, random_state=0)
for method in ['decision_function', 'predict_proba', 'predict_log_proba']:
check_cross_val_predict_multiclass(LogisticRegression(solver='liblinear'), X, y, method)
</DeepExtract>
|
def test_cross_val_predict_with_method():
<DeepExtract>
(X, y) = make_classification(n_classes=2, random_state=0)
for method in ['decision_function', 'predict_proba', 'predict_log_proba']:
check_cross_val_predict_binary(LogisticRegression(solver='liblinear'), X, y, method)
</DeepExtract>
<DeepExtract>
iris = load_iris()
(X, y) = (iris.data, iris.target)
(X, y) = shuffle(X, y, random_state=0)
for method in ['decision_function', 'predict_proba', 'predict_log_proba']:
check_cross_val_predict_multiclass(LogisticRegression(solver='liblinear'), X, y, method)
</DeepExtract>
|
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'. If the method is 'barnes_hut' and the metric is
'precomputed', X may be a precomputed sparse graph.
y : None
Ignored.
Returns
-------
X_new : array of shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self._validate_params()
self._validate_params()
self._check_params_vs_input(X)
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
return self
|
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'. If the method is 'barnes_hut' and the metric is
'precomputed', X may be a precomputed sparse graph.
y : None
Ignored.
Returns
-------
X_new : array of shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self._validate_params()
<DeepExtract>
self._validate_params()
self._check_params_vs_input(X)
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
</DeepExtract>
return self
|
def test_base_chain_random_order():
(X, y) = make_classification(n_samples=1000, n_features=100, n_classes=16, n_informative=10, random_state=0)
Y_multi = np.array([[int(yyy) for yyy in format(yy, '#06b')[2:]] for yy in y])
(X, Y) = (X, Y_multi)
for chain in [ClassifierChain(LogisticRegression()), RegressorChain(Ridge())]:
chain_random = clone(chain).set_params(order='random', random_state=42)
chain_random.fit(X, Y)
chain_fixed = clone(chain).set_params(order=chain_random.order_)
chain_fixed.fit(X, Y)
assert_array_equal(chain_fixed.order_, chain_random.order_)
assert list(chain_random.order) != list(range(4))
assert len(chain_random.order_) == 4
assert len(set(chain_random.order_)) == 4
for (est1, est2) in zip(chain_random.estimators_, chain_fixed.estimators_):
assert_array_almost_equal(est1.coef_, est2.coef_)
|
def test_base_chain_random_order():
<DeepExtract>
(X, y) = make_classification(n_samples=1000, n_features=100, n_classes=16, n_informative=10, random_state=0)
Y_multi = np.array([[int(yyy) for yyy in format(yy, '#06b')[2:]] for yy in y])
(X, Y) = (X, Y_multi)
</DeepExtract>
for chain in [ClassifierChain(LogisticRegression()), RegressorChain(Ridge())]:
chain_random = clone(chain).set_params(order='random', random_state=42)
chain_random.fit(X, Y)
chain_fixed = clone(chain).set_params(order=chain_random.order_)
chain_fixed.fit(X, Y)
assert_array_equal(chain_fixed.order_, chain_random.order_)
assert list(chain_random.order) != list(range(4))
assert len(chain_random.order_) == 4
assert len(set(chain_random.order_)) == 4
for (est1, est2) in zip(chain_random.estimators_, chain_fixed.estimators_):
assert_array_almost_equal(est1.coef_, est2.coef_)
|
def set_config(assume_finite=None, working_memory=None, print_changed_only=None, display=None, pairwise_dist_chunk_size=None, enable_cython_pairwise_dist=None, array_api_dispatch=None, transform_output=None):
"""Set global scikit-learn configuration
.. versionadded:: 0.19
Parameters
----------
assume_finite : bool, default=None
If True, validation for finiteness will be skipped,
saving time, but leading to potential crashes. If
False, validation for finiteness will be performed,
avoiding error. Global default: False.
.. versionadded:: 0.19
working_memory : int, default=None
If set, scikit-learn will attempt to limit the size of temporary arrays
to this number of MiB (per job when parallelised), often saving both
computation time and memory on expensive operations that can be
performed in chunks. Global default: 1024.
.. versionadded:: 0.20
print_changed_only : bool, default=None
If True, only the parameters that were set to non-default
values will be printed when printing an estimator. For example,
``print(SVC())`` while True will only print 'SVC()' while the default
behaviour would be to print 'SVC(C=1.0, cache_size=200, ...)' with
all the non-changed parameters.
.. versionadded:: 0.21
display : {'text', 'diagram'}, default=None
If 'diagram', estimators will be displayed as a diagram in a Jupyter
lab or notebook context. If 'text', estimators will be displayed as
text. Default is 'diagram'.
.. versionadded:: 0.23
pairwise_dist_chunk_size : int, default=None
The number of row vectors per chunk for the accelerated pairwise-
distances reduction backend. Default is 256 (suitable for most of
modern laptops' caches and architectures).
Intended for easier benchmarking and testing of scikit-learn internals.
End users are not expected to benefit from customizing this configuration
setting.
.. versionadded:: 1.1
enable_cython_pairwise_dist : bool, default=None
Use the accelerated pairwise-distances reduction backend when
possible. Global default: True.
Intended for easier benchmarking and testing of scikit-learn internals.
End users are not expected to benefit from customizing this configuration
setting.
.. versionadded:: 1.1
array_api_dispatch : bool, default=None
Use Array API dispatching when inputs follow the Array API standard.
Default is False.
See the :ref:`User Guide <array_api>` for more details.
.. versionadded:: 1.2
transform_output : str, default=None
Configure output of `transform` and `fit_transform`.
See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py`
for an example on how to use the API.
- `"default"`: Default output format of a transformer
- `"pandas"`: DataFrame output
- `None`: Transform configuration is unchanged
.. versionadded:: 1.2
See Also
--------
config_context : Context manager for global scikit-learn configuration.
get_config : Retrieve current values of the global configuration.
"""
if not hasattr(_threadlocal, 'global_config'):
_threadlocal.global_config = _global_config.copy()
local_config = _threadlocal.global_config
if assume_finite is not None:
local_config['assume_finite'] = assume_finite
if working_memory is not None:
local_config['working_memory'] = working_memory
if print_changed_only is not None:
local_config['print_changed_only'] = print_changed_only
if display is not None:
local_config['display'] = display
if pairwise_dist_chunk_size is not None:
local_config['pairwise_dist_chunk_size'] = pairwise_dist_chunk_size
if enable_cython_pairwise_dist is not None:
local_config['enable_cython_pairwise_dist'] = enable_cython_pairwise_dist
if array_api_dispatch is not None:
local_config['array_api_dispatch'] = array_api_dispatch
if transform_output is not None:
local_config['transform_output'] = transform_output
|
def set_config(assume_finite=None, working_memory=None, print_changed_only=None, display=None, pairwise_dist_chunk_size=None, enable_cython_pairwise_dist=None, array_api_dispatch=None, transform_output=None):
"""Set global scikit-learn configuration
.. versionadded:: 0.19
Parameters
----------
assume_finite : bool, default=None
If True, validation for finiteness will be skipped,
saving time, but leading to potential crashes. If
False, validation for finiteness will be performed,
avoiding error. Global default: False.
.. versionadded:: 0.19
working_memory : int, default=None
If set, scikit-learn will attempt to limit the size of temporary arrays
to this number of MiB (per job when parallelised), often saving both
computation time and memory on expensive operations that can be
performed in chunks. Global default: 1024.
.. versionadded:: 0.20
print_changed_only : bool, default=None
If True, only the parameters that were set to non-default
values will be printed when printing an estimator. For example,
``print(SVC())`` while True will only print 'SVC()' while the default
behaviour would be to print 'SVC(C=1.0, cache_size=200, ...)' with
all the non-changed parameters.
.. versionadded:: 0.21
display : {'text', 'diagram'}, default=None
If 'diagram', estimators will be displayed as a diagram in a Jupyter
lab or notebook context. If 'text', estimators will be displayed as
text. Default is 'diagram'.
.. versionadded:: 0.23
pairwise_dist_chunk_size : int, default=None
The number of row vectors per chunk for the accelerated pairwise-
distances reduction backend. Default is 256 (suitable for most of
modern laptops' caches and architectures).
Intended for easier benchmarking and testing of scikit-learn internals.
End users are not expected to benefit from customizing this configuration
setting.
.. versionadded:: 1.1
enable_cython_pairwise_dist : bool, default=None
Use the accelerated pairwise-distances reduction backend when
possible. Global default: True.
Intended for easier benchmarking and testing of scikit-learn internals.
End users are not expected to benefit from customizing this configuration
setting.
.. versionadded:: 1.1
array_api_dispatch : bool, default=None
Use Array API dispatching when inputs follow the Array API standard.
Default is False.
See the :ref:`User Guide <array_api>` for more details.
.. versionadded:: 1.2
transform_output : str, default=None
Configure output of `transform` and `fit_transform`.
See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py`
for an example on how to use the API.
- `"default"`: Default output format of a transformer
- `"pandas"`: DataFrame output
- `None`: Transform configuration is unchanged
.. versionadded:: 1.2
See Also
--------
config_context : Context manager for global scikit-learn configuration.
get_config : Retrieve current values of the global configuration.
"""
<DeepExtract>
if not hasattr(_threadlocal, 'global_config'):
_threadlocal.global_config = _global_config.copy()
local_config = _threadlocal.global_config
</DeepExtract>
if assume_finite is not None:
local_config['assume_finite'] = assume_finite
if working_memory is not None:
local_config['working_memory'] = working_memory
if print_changed_only is not None:
local_config['print_changed_only'] = print_changed_only
if display is not None:
local_config['display'] = display
if pairwise_dist_chunk_size is not None:
local_config['pairwise_dist_chunk_size'] = pairwise_dist_chunk_size
if enable_cython_pairwise_dist is not None:
local_config['enable_cython_pairwise_dist'] = enable_cython_pairwise_dist
if array_api_dispatch is not None:
local_config['array_api_dispatch'] = array_api_dispatch
if transform_output is not None:
local_config['transform_output'] = transform_output
|
def _check_w_h(self, X, W, H, update_H):
"""Check W and H, or initialize them."""
(n_samples, n_features) = X.shape
if self.init == 'custom' and update_H:
H = check_array(H)
if np.shape(H) != (self._n_components, n_features):
raise ValueError('Array with wrong shape passed to %s. Expected %s, but got %s ' % ('NMF (input H)', (self._n_components, n_features), np.shape(H)))
check_non_negative(H, 'NMF (input H)')
if np.max(H) == 0:
raise ValueError('Array passed to %s is full of zeros.' % 'NMF (input H)')
W = check_array(W)
if np.shape(W) != (n_samples, self._n_components):
raise ValueError('Array with wrong shape passed to %s. Expected %s, but got %s ' % ('NMF (input W)', (n_samples, self._n_components), np.shape(W)))
check_non_negative(W, 'NMF (input W)')
if np.max(W) == 0:
raise ValueError('Array passed to %s is full of zeros.' % 'NMF (input W)')
if H.dtype != X.dtype or W.dtype != X.dtype:
raise TypeError('H and W should have the same dtype as X. Got H.dtype = {} and W.dtype = {}.'.format(H.dtype, W.dtype))
elif not update_H:
H = check_array(H)
if np.shape(H) != (self._n_components, n_features):
raise ValueError('Array with wrong shape passed to %s. Expected %s, but got %s ' % ('NMF (input H)', (self._n_components, n_features), np.shape(H)))
check_non_negative(H, 'NMF (input H)')
if np.max(H) == 0:
raise ValueError('Array passed to %s is full of zeros.' % 'NMF (input H)')
if H.dtype != X.dtype:
raise TypeError('H should have the same dtype as X. Got H.dtype = {}.'.format(H.dtype))
if self.solver == 'mu':
avg = np.sqrt(X.mean() / self._n_components)
W = np.full((n_samples, self._n_components), avg, dtype=X.dtype)
else:
W = np.zeros((n_samples, self._n_components), dtype=X.dtype)
else:
check_non_negative(X, 'NMF initialization')
(n_samples, n_features) = X.shape
if self.init is not None and self.init != 'random' and (self._n_components > min(n_samples, n_features)):
raise ValueError("init = '{}' can only be used when n_components <= min(n_samples, n_features)".format(self.init))
if self.init is None:
if self._n_components <= min(n_samples, n_features):
self.init = 'nndsvda'
else:
self.init = 'random'
if self.init == 'random':
avg = np.sqrt(X.mean() / self._n_components)
rng = check_random_state(self.random_state)
H = avg * rng.standard_normal(size=(self._n_components, n_features)).astype(X.dtype, copy=False)
W = avg * rng.standard_normal(size=(n_samples, self._n_components)).astype(X.dtype, copy=False)
np.abs(H, out=H)
np.abs(W, out=W)
(W, H) = (W, H)
(U, S, V) = randomized_svd(X, self._n_components, random_state=self.random_state)
W = np.zeros_like(U)
H = np.zeros_like(V)
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, self._n_components):
(x, y) = (U[:, j], V[j, :])
(x_p, y_p) = (np.maximum(x, 0), np.maximum(y, 0))
(x_n, y_n) = (np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0)))
(x_p_nrm, y_p_nrm) = (norm(x_p), norm(y_p))
(x_n_nrm, y_n_nrm) = (norm(x_n), norm(y_n))
(m_p, m_n) = (x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm)
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if self.init == 'nndsvd':
pass
elif self.init == 'nndsvda':
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif self.init == 'nndsvdar':
rng = check_random_state(self.random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.standard_normal(size=len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.standard_normal(size=len(H[H == 0])) / 100)
else:
raise ValueError('Invalid init parameter: got %r instead of one of %r' % (self.init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
(W, H) = (W, H)
return (W, H)
|
def _check_w_h(self, X, W, H, update_H):
"""Check W and H, or initialize them."""
(n_samples, n_features) = X.shape
if self.init == 'custom' and update_H:
<DeepExtract>
H = check_array(H)
if np.shape(H) != (self._n_components, n_features):
raise ValueError('Array with wrong shape passed to %s. Expected %s, but got %s ' % ('NMF (input H)', (self._n_components, n_features), np.shape(H)))
check_non_negative(H, 'NMF (input H)')
if np.max(H) == 0:
raise ValueError('Array passed to %s is full of zeros.' % 'NMF (input H)')
</DeepExtract>
<DeepExtract>
W = check_array(W)
if np.shape(W) != (n_samples, self._n_components):
raise ValueError('Array with wrong shape passed to %s. Expected %s, but got %s ' % ('NMF (input W)', (n_samples, self._n_components), np.shape(W)))
check_non_negative(W, 'NMF (input W)')
if np.max(W) == 0:
raise ValueError('Array passed to %s is full of zeros.' % 'NMF (input W)')
</DeepExtract>
if H.dtype != X.dtype or W.dtype != X.dtype:
raise TypeError('H and W should have the same dtype as X. Got H.dtype = {} and W.dtype = {}.'.format(H.dtype, W.dtype))
elif not update_H:
<DeepExtract>
H = check_array(H)
if np.shape(H) != (self._n_components, n_features):
raise ValueError('Array with wrong shape passed to %s. Expected %s, but got %s ' % ('NMF (input H)', (self._n_components, n_features), np.shape(H)))
check_non_negative(H, 'NMF (input H)')
if np.max(H) == 0:
raise ValueError('Array passed to %s is full of zeros.' % 'NMF (input H)')
</DeepExtract>
if H.dtype != X.dtype:
raise TypeError('H should have the same dtype as X. Got H.dtype = {}.'.format(H.dtype))
if self.solver == 'mu':
avg = np.sqrt(X.mean() / self._n_components)
W = np.full((n_samples, self._n_components), avg, dtype=X.dtype)
else:
W = np.zeros((n_samples, self._n_components), dtype=X.dtype)
else:
<DeepExtract>
check_non_negative(X, 'NMF initialization')
(n_samples, n_features) = X.shape
if self.init is not None and self.init != 'random' and (self._n_components > min(n_samples, n_features)):
raise ValueError("init = '{}' can only be used when n_components <= min(n_samples, n_features)".format(self.init))
if self.init is None:
if self._n_components <= min(n_samples, n_features):
self.init = 'nndsvda'
else:
self.init = 'random'
if self.init == 'random':
avg = np.sqrt(X.mean() / self._n_components)
rng = check_random_state(self.random_state)
H = avg * rng.standard_normal(size=(self._n_components, n_features)).astype(X.dtype, copy=False)
W = avg * rng.standard_normal(size=(n_samples, self._n_components)).astype(X.dtype, copy=False)
np.abs(H, out=H)
np.abs(W, out=W)
(W, H) = (W, H)
(U, S, V) = randomized_svd(X, self._n_components, random_state=self.random_state)
W = np.zeros_like(U)
H = np.zeros_like(V)
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, self._n_components):
(x, y) = (U[:, j], V[j, :])
(x_p, y_p) = (np.maximum(x, 0), np.maximum(y, 0))
(x_n, y_n) = (np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0)))
(x_p_nrm, y_p_nrm) = (norm(x_p), norm(y_p))
(x_n_nrm, y_n_nrm) = (norm(x_n), norm(y_n))
(m_p, m_n) = (x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm)
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if self.init == 'nndsvd':
pass
elif self.init == 'nndsvda':
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif self.init == 'nndsvdar':
rng = check_random_state(self.random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.standard_normal(size=len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.standard_normal(size=len(H[H == 0])) / 100)
else:
raise ValueError('Invalid init parameter: got %r instead of one of %r' % (self.init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
(W, H) = (W, H)
</DeepExtract>
return (W, H)
|
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_iris_warn_multiple_version(monkeypatch, gzip_response):
"""Check that a warning is raised when multiple versions exist and no version is
requested."""
data_id = 61
data_name = 'iris'
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and gzip_response:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
msg = 'Multiple active versions of the dataset matching the name iris exist. Versions may be fundamentally different, returning version 1.'
with pytest.warns(UserWarning, match=msg):
fetch_openml(name=data_name, as_frame=False, cache=False, parser='liac-arff')
|
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_iris_warn_multiple_version(monkeypatch, gzip_response):
"""Check that a warning is raised when multiple versions exist and no version is
requested."""
data_id = 61
data_name = 'iris'
<DeepExtract>
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and gzip_response:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
</DeepExtract>
msg = 'Multiple active versions of the dataset matching the name iris exist. Versions may be fundamentally different, returning version 1.'
with pytest.warns(UserWarning, match=msg):
fetch_openml(name=data_name, as_frame=False, cache=False, parser='liac-arff')
|
def _fit_lbfgs(X, y, activations, deltas, coef_grads, intercept_grads, layer_units):
self._coef_indptr = []
self._intercept_indptr = []
start = 0
for i in range(self.n_layers_ - 1):
(n_fan_in, n_fan_out) = (layer_units[i], layer_units[i + 1])
end = start + n_fan_in * n_fan_out
self._coef_indptr.append((start, end, (n_fan_in, n_fan_out)))
start = end
for i in range(self.n_layers_ - 1):
end = start + layer_units[i + 1]
self._intercept_indptr.append((start, end))
start = end
packed_coef_inter = np.hstack([l.ravel() for l in self.coefs_ + self.intercepts_])
if self.verbose is True or self.verbose >= 1:
iprint = 1
else:
iprint = -1
opt_res = scipy.optimize.minimize(self._loss_grad_lbfgs, packed_coef_inter, method='L-BFGS-B', jac=True, options={'maxfun': self.max_fun, 'maxiter': self.max_iter, 'iprint': iprint, 'gtol': self.tol}, args=(X, y, activations, deltas, coef_grads, intercept_grads))
self.n_iter_ = _check_optimize_result('lbfgs', opt_res, self.max_iter)
self.loss_ = opt_res.fun
for i in range(self.n_layers_ - 1):
(start, end, shape) = self._coef_indptr[i]
self.coefs_[i] = np.reshape(opt_res.x[start:end], shape)
(start, end) = self._intercept_indptr[i]
self.intercepts_[i] = opt_res.x[start:end]
</DeepExtract>
|
def _fit_lbfgs(X, y, activations, deltas, coef_grads, intercept_grads, layer_units):
self._coef_indptr = []
self._intercept_indptr = []
start = 0
for i in range(self.n_layers_ - 1):
(n_fan_in, n_fan_out) = (layer_units[i], layer_units[i + 1])
end = start + n_fan_in * n_fan_out
self._coef_indptr.append((start, end, (n_fan_in, n_fan_out)))
start = end
for i in range(self.n_layers_ - 1):
end = start + layer_units[i + 1]
self._intercept_indptr.append((start, end))
start = end
<DeepExtract>
packed_coef_inter = np.hstack([l.ravel() for l in self.coefs_ + self.intercepts_])
</DeepExtract>
if self.verbose is True or self.verbose >= 1:
iprint = 1
else:
iprint = -1
opt_res = scipy.optimize.minimize(self._loss_grad_lbfgs, packed_coef_inter, method='L-BFGS-B', jac=True, options={'maxfun': self.max_fun, 'maxiter': self.max_iter, 'iprint': iprint, 'gtol': self.tol}, args=(X, y, activations, deltas, coef_grads, intercept_grads))
self.n_iter_ = _check_optimize_result('lbfgs', opt_res, self.max_iter)
self.loss_ = opt_res.fun
<DeepExtract>
for i in range(self.n_layers_ - 1):
(start, end, shape) = self._coef_indptr[i]
self.coefs_[i] = np.reshape(opt_res.x[start:end], shape)
(start, end) = self._intercept_indptr[i]
self.intercepts_[i] = opt_res.x[start:end]
</DeepExtract>
|
def inplace_swap_column(X, m, n):
"""
Swap two columns of a CSC/CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two columns are to be swapped. It should be of
CSR or CSC format.
m : int
Index of the column of X to be swapped.
n : int
Index of the column of X to be swapped.
"""
if m < 0:
m += X.shape[1]
if n < 0:
n += X.shape[1]
if isinstance(X, sp.csc_matrix):
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError('m and n should be valid integers')
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
if m > n:
(m, n) = (n, m)
indptr = X.indptr
m_start = indptr[m]
m_stop = indptr[m + 1]
n_start = indptr[n]
n_stop = indptr[n + 1]
nz_m = m_stop - m_start
nz_n = n_stop - n_start
if nz_m != nz_n:
X.indptr[m + 2:n] += nz_n - nz_m
X.indptr[m + 1] = m_start + nz_n
X.indptr[n] = n_stop - nz_m
X.indices = np.concatenate([X.indices[:m_start], X.indices[n_start:n_stop], X.indices[m_stop:n_start], X.indices[m_start:m_stop], X.indices[n_stop:]])
X.data = np.concatenate([X.data[:m_start], X.data[n_start:n_stop], X.data[m_stop:n_start], X.data[m_start:m_stop], X.data[n_stop:]])
elif isinstance(X, sp.csr_matrix):
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError('m and n should be valid integers')
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
m_mask = X.indices == m
X.indices[X.indices == n] = m
X.indices[m_mask] = n
else:
input_type = X.format if sp.issparse(X) else type(X)
err = 'Expected a CSR or CSC sparse matrix, got %s.' % input_type
raise TypeError(err)
</DeepExtract>
|
def inplace_swap_column(X, m, n):
"""
Swap two columns of a CSC/CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two columns are to be swapped. It should be of
CSR or CSC format.
m : int
Index of the column of X to be swapped.
n : int
Index of the column of X to be swapped.
"""
if m < 0:
m += X.shape[1]
if n < 0:
n += X.shape[1]
if isinstance(X, sp.csc_matrix):
<DeepExtract>
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError('m and n should be valid integers')
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
if m > n:
(m, n) = (n, m)
indptr = X.indptr
m_start = indptr[m]
m_stop = indptr[m + 1]
n_start = indptr[n]
n_stop = indptr[n + 1]
nz_m = m_stop - m_start
nz_n = n_stop - n_start
if nz_m != nz_n:
X.indptr[m + 2:n] += nz_n - nz_m
X.indptr[m + 1] = m_start + nz_n
X.indptr[n] = n_stop - nz_m
X.indices = np.concatenate([X.indices[:m_start], X.indices[n_start:n_stop], X.indices[m_stop:n_start], X.indices[m_start:m_stop], X.indices[n_stop:]])
X.data = np.concatenate([X.data[:m_start], X.data[n_start:n_stop], X.data[m_stop:n_start], X.data[m_start:m_stop], X.data[n_stop:]])
</DeepExtract>
elif isinstance(X, sp.csr_matrix):
<DeepExtract>
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError('m and n should be valid integers')
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
m_mask = X.indices == m
X.indices[X.indices == n] = m
X.indices[m_mask] = n
</DeepExtract>
else:
<DeepExtract>
input_type = X.format if sp.issparse(X) else type(X)
err = 'Expected a CSR or CSC sparse matrix, got %s.' % input_type
raise TypeError(err)
</DeepExtract>
|
def load_linnerud(*, return_X_y=False, as_frame=False):
"""Load and return the physical exercise Linnerud dataset.
This dataset is suitable for multi-output regression tasks.
============== ============================
Samples total 20
Dimensionality 3 (for both data and target)
Features integer
Targets integer
============== ============================
Read more in the :ref:`User Guide <linnerrud_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (20, 3)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, dataframe} of shape (20, 3)
The regression targets. If `as_frame=True`, `target` will be
a pandas DataFrame.
feature_names: list
The names of the dataset columns.
target_names: list
The names of the target columns.
frame: DataFrame of shape (20, 6)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
data_filename: str
The path to the location of the data.
target_filename: str
The path to the location of the target.
.. versionadded:: 0.20
(data, target) : tuple if ``return_X_y`` is True
Returns a tuple of two ndarrays or dataframe of shape
`(20, 3)`. Each row represents one sample and each column represents the
features in `X` and a target in `y` of a given sample.
.. versionadded:: 0.18
"""
data_filename = 'linnerud_exercise.csv'
target_filename = 'linnerud_physiological.csv'
with _open_text(DATA_MODULE, data_filename) as f:
header_exercise = f.readline().split()
f.seek(0)
data_exercise = np.loadtxt(f, skiprows=1)
with _open_text(DATA_MODULE, target_filename) as f:
header_physiological = f.readline().split()
f.seek(0)
data_physiological = np.loadtxt(f, skiprows=1)
fdescr = _read_text(descr_module, 'linnerud.rst')
fdescr = fdescr
frame = None
if as_frame:
pd = check_pandas_support('{} with as_frame=True'.format('load_linnerud'))
if not sparse_data:
data_df = pd.DataFrame(data_exercise, columns=header_exercise)
else:
data_df = pd.DataFrame.sparse.from_spmatrix(data_exercise, columns=header_exercise)
target_df = pd.DataFrame(data_physiological, columns=header_physiological)
combined_df = pd.concat([data_df, target_df], axis=1)
X = combined_df[header_exercise]
y = combined_df[header_physiological]
if y.shape[1] == 1:
y = y.iloc[:, 0]
(frame, data_exercise, data_physiological) = (combined_df, X, y)
if return_X_y:
return (data_exercise, data_physiological)
return Bunch(data=data_exercise, feature_names=header_exercise, target=data_physiological, target_names=header_physiological, frame=frame, DESCR=fdescr, data_filename=data_filename, target_filename=target_filename, data_module=DATA_MODULE)
|
def load_linnerud(*, return_X_y=False, as_frame=False):
"""Load and return the physical exercise Linnerud dataset.
This dataset is suitable for multi-output regression tasks.
============== ============================
Samples total 20
Dimensionality 3 (for both data and target)
Features integer
Targets integer
============== ============================
Read more in the :ref:`User Guide <linnerrud_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (20, 3)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, dataframe} of shape (20, 3)
The regression targets. If `as_frame=True`, `target` will be
a pandas DataFrame.
feature_names: list
The names of the dataset columns.
target_names: list
The names of the target columns.
frame: DataFrame of shape (20, 6)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
data_filename: str
The path to the location of the data.
target_filename: str
The path to the location of the target.
.. versionadded:: 0.20
(data, target) : tuple if ``return_X_y`` is True
Returns a tuple of two ndarrays or dataframe of shape
`(20, 3)`. Each row represents one sample and each column represents the
features in `X` and a target in `y` of a given sample.
.. versionadded:: 0.18
"""
data_filename = 'linnerud_exercise.csv'
target_filename = 'linnerud_physiological.csv'
with _open_text(DATA_MODULE, data_filename) as f:
header_exercise = f.readline().split()
f.seek(0)
data_exercise = np.loadtxt(f, skiprows=1)
with _open_text(DATA_MODULE, target_filename) as f:
header_physiological = f.readline().split()
f.seek(0)
data_physiological = np.loadtxt(f, skiprows=1)
<DeepExtract>
fdescr = _read_text(descr_module, 'linnerud.rst')
fdescr = fdescr
</DeepExtract>
frame = None
if as_frame:
<DeepExtract>
pd = check_pandas_support('{} with as_frame=True'.format('load_linnerud'))
if not sparse_data:
data_df = pd.DataFrame(data_exercise, columns=header_exercise)
else:
data_df = pd.DataFrame.sparse.from_spmatrix(data_exercise, columns=header_exercise)
target_df = pd.DataFrame(data_physiological, columns=header_physiological)
combined_df = pd.concat([data_df, target_df], axis=1)
X = combined_df[header_exercise]
y = combined_df[header_physiological]
if y.shape[1] == 1:
y = y.iloc[:, 0]
(frame, data_exercise, data_physiological) = (combined_df, X, y)
</DeepExtract>
if return_X_y:
return (data_exercise, data_physiological)
return Bunch(data=data_exercise, feature_names=header_exercise, target=data_physiological, target_names=header_physiological, frame=frame, DESCR=fdescr, data_filename=data_filename, target_filename=target_filename, data_module=DATA_MODULE)
|
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Partially fitted estimator.
"""
first_time = not hasattr(self, 'components_')
if first_time:
self._validate_params()
dtype = [np.float64, np.float32] if first_time else self.components_.dtype
X = self._validate_data(X, reset=first_time, accept_sparse='csr', dtype=dtype)
check_non_negative(X, 'LatentDirichletAllocation.partial_fit')
X = X
(n_samples, n_features) = X.shape
batch_size = self.batch_size
if first_time:
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1.0 / self.n_components
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1.0 / self.n_components
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.0
init_var = 1.0 / init_gamma
self.components_ = self.random_state_.gamma(init_gamma, init_var, (self.n_components, n_features)).astype(X.dtype, copy=False)
self.exp_dirichlet_component_ = np.exp(_dirichlet_expectation_2d(self.components_))
if n_features != self.components_.shape[1]:
raise ValueError('The provided data has %d dimensions while the model was trained with feature size %d.' % (n_features, self.components_.shape[1]))
n_jobs = effective_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
(_, suff_stats) = self._e_step(X[idx_slice, :], cal_sstats=True, random_init=True, parallel=parallel)
if False:
self.components_ = self.topic_word_prior_ + suff_stats
else:
weight = np.power(self.learning_offset + self.n_batch_iter_, -self.learning_decay)
doc_ratio = float(self.total_samples) / X[idx_slice, :].shape[0]
self.components_ *= 1 - weight
self.components_ += weight * (self.topic_word_prior_ + doc_ratio * suff_stats)
self.exp_dirichlet_component_ = np.exp(_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
return self
|
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Partially fitted estimator.
"""
first_time = not hasattr(self, 'components_')
if first_time:
self._validate_params()
<DeepExtract>
dtype = [np.float64, np.float32] if first_time else self.components_.dtype
X = self._validate_data(X, reset=first_time, accept_sparse='csr', dtype=dtype)
check_non_negative(X, 'LatentDirichletAllocation.partial_fit')
X = X
</DeepExtract>
(n_samples, n_features) = X.shape
batch_size = self.batch_size
if first_time:
<DeepExtract>
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1.0 / self.n_components
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1.0 / self.n_components
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.0
init_var = 1.0 / init_gamma
self.components_ = self.random_state_.gamma(init_gamma, init_var, (self.n_components, n_features)).astype(X.dtype, copy=False)
self.exp_dirichlet_component_ = np.exp(_dirichlet_expectation_2d(self.components_))
</DeepExtract>
if n_features != self.components_.shape[1]:
raise ValueError('The provided data has %d dimensions while the model was trained with feature size %d.' % (n_features, self.components_.shape[1]))
n_jobs = effective_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
<DeepExtract>
(_, suff_stats) = self._e_step(X[idx_slice, :], cal_sstats=True, random_init=True, parallel=parallel)
if False:
self.components_ = self.topic_word_prior_ + suff_stats
else:
weight = np.power(self.learning_offset + self.n_batch_iter_, -self.learning_decay)
doc_ratio = float(self.total_samples) / X[idx_slice, :].shape[0]
self.components_ *= 1 - weight
self.components_ += weight * (self.topic_word_prior_ + doc_ratio * suff_stats)
self.exp_dirichlet_component_ = np.exp(_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
</DeepExtract>
return self
|
def test_load_compressed():
with _open_binary(TEST_DATA_MODULE, datafile) as f:
(X, y) = load_svmlight_file(f, **kwargs)
with NamedTemporaryFile(prefix='sklearn-test', suffix='.gz') as tmp:
tmp.close()
with _open_binary(TEST_DATA_MODULE, datafile) as f:
with gzip.open(tmp.name, 'wb') as fh_out:
shutil.copyfileobj(f, fh_out)
(Xgz, ygz) = load_svmlight_file(tmp.name)
os.remove(tmp.name)
assert_array_almost_equal(X.toarray(), Xgz.toarray())
assert_array_almost_equal(y, ygz)
with NamedTemporaryFile(prefix='sklearn-test', suffix='.bz2') as tmp:
tmp.close()
with _open_binary(TEST_DATA_MODULE, datafile) as f:
with BZ2File(tmp.name, 'wb') as fh_out:
shutil.copyfileobj(f, fh_out)
(Xbz, ybz) = load_svmlight_file(tmp.name)
os.remove(tmp.name)
assert_array_almost_equal(X.toarray(), Xbz.toarray())
assert_array_almost_equal(y, ybz)
|
def test_load_compressed():
<DeepExtract>
with _open_binary(TEST_DATA_MODULE, datafile) as f:
(X, y) = load_svmlight_file(f, **kwargs)
</DeepExtract>
with NamedTemporaryFile(prefix='sklearn-test', suffix='.gz') as tmp:
tmp.close()
with _open_binary(TEST_DATA_MODULE, datafile) as f:
with gzip.open(tmp.name, 'wb') as fh_out:
shutil.copyfileobj(f, fh_out)
(Xgz, ygz) = load_svmlight_file(tmp.name)
os.remove(tmp.name)
assert_array_almost_equal(X.toarray(), Xgz.toarray())
assert_array_almost_equal(y, ygz)
with NamedTemporaryFile(prefix='sklearn-test', suffix='.bz2') as tmp:
tmp.close()
with _open_binary(TEST_DATA_MODULE, datafile) as f:
with BZ2File(tmp.name, 'wb') as fh_out:
shutil.copyfileobj(f, fh_out)
(Xbz, ybz) = load_svmlight_file(tmp.name)
os.remove(tmp.name)
assert_array_almost_equal(X.toarray(), Xbz.toarray())
assert_array_almost_equal(y, ybz)
|
def test_selectpercentile_tiebreaking():
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert X1.shape[1] == 1
scores = sel.scores_
support = sel.get_support()
assert_allclose(np.sort(scores[support]), np.sort(scores)[-support.sum():])
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert X2.shape[1] == 2
scores = sel.scores_
support = sel.get_support()
assert_allclose(np.sort(scores[support]), np.sort(scores)[-support.sum():])
</DeepExtract>
|
def test_selectpercentile_tiebreaking():
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert X1.shape[1] == 1
<DeepExtract>
scores = sel.scores_
support = sel.get_support()
assert_allclose(np.sort(scores[support]), np.sort(scores)[-support.sum():])
</DeepExtract>
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert X2.shape[1] == 2
<DeepExtract>
scores = sel.scores_
support = sel.get_support()
assert_allclose(np.sort(scores[support]), np.sort(scores)[-support.sum():])
</DeepExtract>
|
def fit(self, X, y, sample_weight=None):
"""Fit a Generalized Linear Model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Fitted model.
"""
self._validate_params()
(X, y) = self._validate_data(X, y, accept_sparse=['csc', 'csr'], dtype=[np.float64, np.float32], y_numeric=True, multi_output=False)
if self.solver == 'lbfgs':
loss_dtype = np.float64
else:
loss_dtype = min(max(y.dtype, X.dtype), np.float64)
y = check_array(y, dtype=loss_dtype, order='C', ensure_2d=False)
sample_weight = _check_sample_weight(sample_weight, X, dtype=loss_dtype)
(n_samples, n_features) = X.shape
self._base_loss = HalfSquaredError()
linear_loss = LinearModelLoss(base_loss=self._base_loss, fit_intercept=self.fit_intercept)
if not linear_loss.base_loss.in_y_true_range(y):
raise ValueError(f'Some value(s) of y are out of the valid range of the loss {self._base_loss.__class__.__name__!r}.')
sample_weight = sample_weight / sample_weight.sum()
if self.warm_start and hasattr(self, 'coef_'):
if self.fit_intercept:
coef = np.concatenate((self.coef_, np.array([self.intercept_])))
else:
coef = self.coef_
coef = coef.astype(loss_dtype, copy=False)
else:
coef = linear_loss.init_zero_coef(X, dtype=loss_dtype)
if self.fit_intercept:
coef[-1] = linear_loss.base_loss.link.link(np.average(y, weights=sample_weight))
l2_reg_strength = self.alpha
n_threads = _openmp_effective_n_threads()
if self.solver == 'lbfgs':
func = linear_loss.loss_gradient
opt_res = scipy.optimize.minimize(func, coef, method='L-BFGS-B', jac=True, options={'maxiter': self.max_iter, 'maxls': 50, 'iprint': self.verbose - 1, 'gtol': self.tol, 'ftol': 64 * np.finfo(float).eps}, args=(X, y, sample_weight, l2_reg_strength, n_threads))
self.n_iter_ = _check_optimize_result('lbfgs', opt_res)
coef = opt_res.x
elif self.solver == 'newton-cholesky':
sol = NewtonCholeskySolver(coef=coef, linear_loss=linear_loss, l2_reg_strength=l2_reg_strength, tol=self.tol, max_iter=self.max_iter, n_threads=n_threads, verbose=self.verbose)
coef = sol.solve(X, y, sample_weight)
self.n_iter_ = sol.iteration
elif issubclass(self.solver, NewtonSolver):
sol = self.solver(coef=coef, linear_loss=linear_loss, l2_reg_strength=l2_reg_strength, tol=self.tol, max_iter=self.max_iter, n_threads=n_threads)
coef = sol.solve(X, y, sample_weight)
self.n_iter_ = sol.iteration
else:
raise ValueError(f'Invalid solver={self.solver}.')
if self.fit_intercept:
self.intercept_ = coef[-1]
self.coef_ = coef[:-1]
else:
self.intercept_ = 0.0
self.coef_ = coef
return self
|
def fit(self, X, y, sample_weight=None):
"""Fit a Generalized Linear Model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Fitted model.
"""
self._validate_params()
(X, y) = self._validate_data(X, y, accept_sparse=['csc', 'csr'], dtype=[np.float64, np.float32], y_numeric=True, multi_output=False)
if self.solver == 'lbfgs':
loss_dtype = np.float64
else:
loss_dtype = min(max(y.dtype, X.dtype), np.float64)
y = check_array(y, dtype=loss_dtype, order='C', ensure_2d=False)
sample_weight = _check_sample_weight(sample_weight, X, dtype=loss_dtype)
(n_samples, n_features) = X.shape
<DeepExtract>
self._base_loss = HalfSquaredError()
</DeepExtract>
linear_loss = LinearModelLoss(base_loss=self._base_loss, fit_intercept=self.fit_intercept)
if not linear_loss.base_loss.in_y_true_range(y):
raise ValueError(f'Some value(s) of y are out of the valid range of the loss {self._base_loss.__class__.__name__!r}.')
sample_weight = sample_weight / sample_weight.sum()
if self.warm_start and hasattr(self, 'coef_'):
if self.fit_intercept:
coef = np.concatenate((self.coef_, np.array([self.intercept_])))
else:
coef = self.coef_
coef = coef.astype(loss_dtype, copy=False)
else:
coef = linear_loss.init_zero_coef(X, dtype=loss_dtype)
if self.fit_intercept:
coef[-1] = linear_loss.base_loss.link.link(np.average(y, weights=sample_weight))
l2_reg_strength = self.alpha
n_threads = _openmp_effective_n_threads()
if self.solver == 'lbfgs':
func = linear_loss.loss_gradient
opt_res = scipy.optimize.minimize(func, coef, method='L-BFGS-B', jac=True, options={'maxiter': self.max_iter, 'maxls': 50, 'iprint': self.verbose - 1, 'gtol': self.tol, 'ftol': 64 * np.finfo(float).eps}, args=(X, y, sample_weight, l2_reg_strength, n_threads))
self.n_iter_ = _check_optimize_result('lbfgs', opt_res)
coef = opt_res.x
elif self.solver == 'newton-cholesky':
sol = NewtonCholeskySolver(coef=coef, linear_loss=linear_loss, l2_reg_strength=l2_reg_strength, tol=self.tol, max_iter=self.max_iter, n_threads=n_threads, verbose=self.verbose)
coef = sol.solve(X, y, sample_weight)
self.n_iter_ = sol.iteration
elif issubclass(self.solver, NewtonSolver):
sol = self.solver(coef=coef, linear_loss=linear_loss, l2_reg_strength=l2_reg_strength, tol=self.tol, max_iter=self.max_iter, n_threads=n_threads)
coef = sol.solve(X, y, sample_weight)
self.n_iter_ = sol.iteration
else:
raise ValueError(f'Invalid solver={self.solver}.')
if self.fit_intercept:
self.intercept_ = coef[-1]
self.coef_ = coef[:-1]
else:
self.intercept_ = 0.0
self.coef_ = coef
return self
|
def __add__(self, other):
ret = ParseResults(self.__toklist)
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update(self.__accumNames)
ret.__name = self.__name
ret = ret
ret += other
return ret
|
def __add__(self, other):
<DeepExtract>
ret = ParseResults(self.__toklist)
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update(self.__accumNames)
ret.__name = self.__name
ret = ret
</DeepExtract>
ret += other
return ret
|
def check_n_features_in(name, estimator_orig):
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
if 'warm_start' in estimator.get_params():
estimator.set_params(warm_start=False)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
if '1darray' in _safe_tags(estimator, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator, key='pairwise'):
X = kernel(X, X)
X = X
if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
assert not hasattr(estimator, 'n_features_in_')
estimator.fit(X, y)
assert hasattr(estimator, 'n_features_in_')
assert estimator.n_features_in_ == X.shape[1]
|
def check_n_features_in(name, estimator_orig):
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
if 'warm_start' in estimator.get_params():
estimator.set_params(warm_start=False)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
<DeepExtract>
if '1darray' in _safe_tags(estimator, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator, key='pairwise'):
X = kernel(X, X)
X = X
</DeepExtract>
if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
<DeepExtract>
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
assert not hasattr(estimator, 'n_features_in_')
estimator.fit(X, y)
assert hasattr(estimator, 'n_features_in_')
assert estimator.n_features_in_ == X.shape[1]
|
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training samples.
Y : array-like of shape (n_samples,) or (n_samples, n_targets)
Targets.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
check_consistent_length(X, Y)
X = self._validate_data(X, dtype=np.float64, copy=self.copy, ensure_min_samples=2)
Y = check_array(Y, input_name='Y', dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n_components = self.n_components
rank_upper_bound = min(X.shape[0], X.shape[1], Y.shape[1])
if n_components > rank_upper_bound:
raise ValueError(f'`n_components` upper bound is {rank_upper_bound}. Got {n_components} instead. Reduce `n_components`.')
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
if self.scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
(X, Y, self._x_mean, self._y_mean, self._x_std, self._y_std) = (X, Y, x_mean, y_mean, x_std, y_std)
C = np.dot(X.T, Y)
(U, s, Vt) = svd(C, full_matrices=False)
U = U[:, :n_components]
Vt = Vt[:n_components]
(U, Vt) = svd_flip(U, Vt)
V = Vt.T
self.x_weights_ = U
self.y_weights_ = V
self._n_features_out = self.x_weights_.shape[1]
return self
|
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training samples.
Y : array-like of shape (n_samples,) or (n_samples, n_targets)
Targets.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
check_consistent_length(X, Y)
X = self._validate_data(X, dtype=np.float64, copy=self.copy, ensure_min_samples=2)
Y = check_array(Y, input_name='Y', dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n_components = self.n_components
rank_upper_bound = min(X.shape[0], X.shape[1], Y.shape[1])
if n_components > rank_upper_bound:
raise ValueError(f'`n_components` upper bound is {rank_upper_bound}. Got {n_components} instead. Reduce `n_components`.')
<DeepExtract>
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
if self.scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
(X, Y, self._x_mean, self._y_mean, self._x_std, self._y_std) = (X, Y, x_mean, y_mean, x_std, y_std)
</DeepExtract>
C = np.dot(X.T, Y)
(U, s, Vt) = svd(C, full_matrices=False)
U = U[:, :n_components]
Vt = Vt[:n_components]
(U, Vt) = svd_flip(U, Vt)
V = Vt.T
self.x_weights_ = U
self.y_weights_ = V
self._n_features_out = self.x_weights_.shape[1]
return self
|
def plot_lda_cov(lda, splot):
(v, w) = linalg.eigh(lda.covariance_)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi
ell = mpl.patches.Ellipse(lda.means_[0], 2 * v[0] ** 0.5, 2 * v[1] ** 0.5, angle=180 + angle, facecolor='red', edgecolor='black', linewidth=2)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.2)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
(v, w) = linalg.eigh(lda.covariance_)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi
ell = mpl.patches.Ellipse(lda.means_[1], 2 * v[0] ** 0.5, 2 * v[1] ** 0.5, angle=180 + angle, facecolor='blue', edgecolor='black', linewidth=2)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.2)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
</DeepExtract>
|
def plot_lda_cov(lda, splot):
<DeepExtract>
(v, w) = linalg.eigh(lda.covariance_)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi
ell = mpl.patches.Ellipse(lda.means_[0], 2 * v[0] ** 0.5, 2 * v[1] ** 0.5, angle=180 + angle, facecolor='red', edgecolor='black', linewidth=2)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.2)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
</DeepExtract>
<DeepExtract>
(v, w) = linalg.eigh(lda.covariance_)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi
ell = mpl.patches.Ellipse(lda.means_[1], 2 * v[0] ** 0.5, 2 * v[1] ** 0.5, angle=180 + angle, facecolor='blue', edgecolor='black', linewidth=2)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.2)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
</DeepExtract>
|
def fit(self, X, y=None):
"""Fit the local outlier factor detector from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : LocalOutlierFactor
The fitted local outlier factor detector.
"""
self._validate_params()
self._fit(X)
n_samples = self.n_samples_fit_
if self.n_neighbors > n_samples:
warnings.warn('n_neighbors (%s) is greater than the total number of samples (%s). n_neighbors will be set to (n_samples - 1) for estimation.' % (self.n_neighbors, n_samples))
self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))
(self._distances_fit_X_, _neighbors_indices_fit_X_) = self.kneighbors(n_neighbors=self.n_neighbors_)
if self._fit_X.dtype == np.float32:
self._distances_fit_X_ = self._distances_fit_X_.astype(self._fit_X.dtype, copy=False)
dist_k = self._distances_fit_X_[_neighbors_indices_fit_X_, self.n_neighbors_ - 1]
reach_dist_array = np.maximum(self._distances_fit_X_, dist_k)
self._lrd = 1.0 / (np.mean(reach_dist_array, axis=1) + 1e-10)
lrd_ratios_array = self._lrd[_neighbors_indices_fit_X_] / self._lrd[:, np.newaxis]
self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)
if self.contamination == 'auto':
self.offset_ = -1.5
else:
self.offset_ = np.percentile(self.negative_outlier_factor_, 100.0 * self.contamination)
return self
|
def fit(self, X, y=None):
"""Fit the local outlier factor detector from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed'
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : LocalOutlierFactor
The fitted local outlier factor detector.
"""
self._validate_params()
self._fit(X)
n_samples = self.n_samples_fit_
if self.n_neighbors > n_samples:
warnings.warn('n_neighbors (%s) is greater than the total number of samples (%s). n_neighbors will be set to (n_samples - 1) for estimation.' % (self.n_neighbors, n_samples))
self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))
(self._distances_fit_X_, _neighbors_indices_fit_X_) = self.kneighbors(n_neighbors=self.n_neighbors_)
if self._fit_X.dtype == np.float32:
self._distances_fit_X_ = self._distances_fit_X_.astype(self._fit_X.dtype, copy=False)
<DeepExtract>
dist_k = self._distances_fit_X_[_neighbors_indices_fit_X_, self.n_neighbors_ - 1]
reach_dist_array = np.maximum(self._distances_fit_X_, dist_k)
self._lrd = 1.0 / (np.mean(reach_dist_array, axis=1) + 1e-10)
</DeepExtract>
lrd_ratios_array = self._lrd[_neighbors_indices_fit_X_] / self._lrd[:, np.newaxis]
self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)
if self.contamination == 'auto':
self.offset_ = -1.5
else:
self.offset_ = np.percentile(self.negative_outlier_factor_, 100.0 * self.contamination)
return self
|
@pytest.mark.parametrize('name', sorted(set(ALL_METRICS) - set(REGRESSION_METRICS) - METRICS_WITHOUT_SAMPLE_WEIGHT - METRIC_UNDEFINED_BINARY))
def test_binary_sample_weight_invariance(name):
n_samples = 50
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples,))
y_pred = random_state.randint(0, 2, size=(n_samples,))
y_score = random_state.random_sample(size=(n_samples,))
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y_true))
metric = partial(metric, k=1) if name == 'top_k_accuracy_score' else metric
unweighted_score = metric(y_true, y_score, sample_weight=None)
assert_allclose(unweighted_score, metric(y_true, y_score, sample_weight=np.ones(shape=len(y_true))), err_msg='For %s sample_weight=None is not equivalent to sample_weight=ones' % name)
weighted_score = metric(y_true, y_score, sample_weight=sample_weight)
with pytest.raises(AssertionError):
assert_allclose(unweighted_score, weighted_score)
raise ValueError('Unweighted and weighted scores are unexpectedly almost equal (%s) and (%s) for %s' % (unweighted_score, weighted_score, name))
weighted_score_list = metric(y_true, y_score, sample_weight=sample_weight.tolist())
assert_allclose(weighted_score, weighted_score_list, err_msg='Weighted scores for array and list sample_weight input are not equal (%s != %s) for %s' % (weighted_score, weighted_score_list, name))
repeat_weighted_score = metric(np.repeat(y_true, sample_weight, axis=0), np.repeat(y_score, sample_weight, axis=0), sample_weight=None)
assert_allclose(weighted_score, repeat_weighted_score, err_msg='Weighting %s is not equal to repeating samples' % name)
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y_true[1::2]
y2_subset = y_score[1::2]
weighted_score_subset = metric(y1_subset, y2_subset, sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y_true, y_score, sample_weight=sample_weight_zeroed)
assert_allclose(weighted_score_subset, weighted_score_zeroed, err_msg='Zeroing weights does not give the same result as removing the corresponding samples (%s != %s) for %s' % (weighted_score_zeroed, weighted_score_subset, name))
if not name.startswith('unnormalized'):
for scaling in [2, 0.3]:
assert_allclose(weighted_score, metric(y_true, y_score, sample_weight=sample_weight * scaling), err_msg='%s sample_weight is not invariant under scaling' % name)
error_message = 'Found input variables with inconsistent numbers of samples: \\[{}, {}, {}\\]'.format(_num_samples(y_true), _num_samples(y_score), _num_samples(sample_weight) * 2)
with pytest.raises(ValueError, match=error_message):
metric(y_true, y_score, sample_weight=np.hstack([sample_weight, sample_weight]))
else:
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y_true))
metric = partial(metric, k=1) if name == 'top_k_accuracy_score' else metric
unweighted_score = metric(y_true, y_pred, sample_weight=None)
assert_allclose(unweighted_score, metric(y_true, y_pred, sample_weight=np.ones(shape=len(y_true))), err_msg='For %s sample_weight=None is not equivalent to sample_weight=ones' % name)
weighted_score = metric(y_true, y_pred, sample_weight=sample_weight)
with pytest.raises(AssertionError):
assert_allclose(unweighted_score, weighted_score)
raise ValueError('Unweighted and weighted scores are unexpectedly almost equal (%s) and (%s) for %s' % (unweighted_score, weighted_score, name))
weighted_score_list = metric(y_true, y_pred, sample_weight=sample_weight.tolist())
assert_allclose(weighted_score, weighted_score_list, err_msg='Weighted scores for array and list sample_weight input are not equal (%s != %s) for %s' % (weighted_score, weighted_score_list, name))
repeat_weighted_score = metric(np.repeat(y_true, sample_weight, axis=0), np.repeat(y_pred, sample_weight, axis=0), sample_weight=None)
assert_allclose(weighted_score, repeat_weighted_score, err_msg='Weighting %s is not equal to repeating samples' % name)
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y_true[1::2]
y2_subset = y_pred[1::2]
weighted_score_subset = metric(y1_subset, y2_subset, sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y_true, y_pred, sample_weight=sample_weight_zeroed)
assert_allclose(weighted_score_subset, weighted_score_zeroed, err_msg='Zeroing weights does not give the same result as removing the corresponding samples (%s != %s) for %s' % (weighted_score_zeroed, weighted_score_subset, name))
if not name.startswith('unnormalized'):
for scaling in [2, 0.3]:
assert_allclose(weighted_score, metric(y_true, y_pred, sample_weight=sample_weight * scaling), err_msg='%s sample_weight is not invariant under scaling' % name)
error_message = 'Found input variables with inconsistent numbers of samples: \\[{}, {}, {}\\]'.format(_num_samples(y_true), _num_samples(y_pred), _num_samples(sample_weight) * 2)
with pytest.raises(ValueError, match=error_message):
metric(y_true, y_pred, sample_weight=np.hstack([sample_weight, sample_weight]))
</DeepExtract>
|
@pytest.mark.parametrize('name', sorted(set(ALL_METRICS) - set(REGRESSION_METRICS) - METRICS_WITHOUT_SAMPLE_WEIGHT - METRIC_UNDEFINED_BINARY))
def test_binary_sample_weight_invariance(name):
n_samples = 50
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples,))
y_pred = random_state.randint(0, 2, size=(n_samples,))
y_score = random_state.random_sample(size=(n_samples,))
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
<DeepExtract>
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y_true))
metric = partial(metric, k=1) if name == 'top_k_accuracy_score' else metric
unweighted_score = metric(y_true, y_score, sample_weight=None)
assert_allclose(unweighted_score, metric(y_true, y_score, sample_weight=np.ones(shape=len(y_true))), err_msg='For %s sample_weight=None is not equivalent to sample_weight=ones' % name)
weighted_score = metric(y_true, y_score, sample_weight=sample_weight)
with pytest.raises(AssertionError):
assert_allclose(unweighted_score, weighted_score)
raise ValueError('Unweighted and weighted scores are unexpectedly almost equal (%s) and (%s) for %s' % (unweighted_score, weighted_score, name))
weighted_score_list = metric(y_true, y_score, sample_weight=sample_weight.tolist())
assert_allclose(weighted_score, weighted_score_list, err_msg='Weighted scores for array and list sample_weight input are not equal (%s != %s) for %s' % (weighted_score, weighted_score_list, name))
repeat_weighted_score = metric(np.repeat(y_true, sample_weight, axis=0), np.repeat(y_score, sample_weight, axis=0), sample_weight=None)
assert_allclose(weighted_score, repeat_weighted_score, err_msg='Weighting %s is not equal to repeating samples' % name)
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y_true[1::2]
y2_subset = y_score[1::2]
weighted_score_subset = metric(y1_subset, y2_subset, sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y_true, y_score, sample_weight=sample_weight_zeroed)
assert_allclose(weighted_score_subset, weighted_score_zeroed, err_msg='Zeroing weights does not give the same result as removing the corresponding samples (%s != %s) for %s' % (weighted_score_zeroed, weighted_score_subset, name))
if not name.startswith('unnormalized'):
for scaling in [2, 0.3]:
assert_allclose(weighted_score, metric(y_true, y_score, sample_weight=sample_weight * scaling), err_msg='%s sample_weight is not invariant under scaling' % name)
error_message = 'Found input variables with inconsistent numbers of samples: \\[{}, {}, {}\\]'.format(_num_samples(y_true), _num_samples(y_score), _num_samples(sample_weight) * 2)
with pytest.raises(ValueError, match=error_message):
metric(y_true, y_score, sample_weight=np.hstack([sample_weight, sample_weight]))
</DeepExtract>
else:
<DeepExtract>
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y_true))
metric = partial(metric, k=1) if name == 'top_k_accuracy_score' else metric
unweighted_score = metric(y_true, y_pred, sample_weight=None)
assert_allclose(unweighted_score, metric(y_true, y_pred, sample_weight=np.ones(shape=len(y_true))), err_msg='For %s sample_weight=None is not equivalent to sample_weight=ones' % name)
weighted_score = metric(y_true, y_pred, sample_weight=sample_weight)
with pytest.raises(AssertionError):
assert_allclose(unweighted_score, weighted_score)
raise ValueError('Unweighted and weighted scores are unexpectedly almost equal (%s) and (%s) for %s' % (unweighted_score, weighted_score, name))
weighted_score_list = metric(y_true, y_pred, sample_weight=sample_weight.tolist())
assert_allclose(weighted_score, weighted_score_list, err_msg='Weighted scores for array and list sample_weight input are not equal (%s != %s) for %s' % (weighted_score, weighted_score_list, name))
repeat_weighted_score = metric(np.repeat(y_true, sample_weight, axis=0), np.repeat(y_pred, sample_weight, axis=0), sample_weight=None)
assert_allclose(weighted_score, repeat_weighted_score, err_msg='Weighting %s is not equal to repeating samples' % name)
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y_true[1::2]
y2_subset = y_pred[1::2]
weighted_score_subset = metric(y1_subset, y2_subset, sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y_true, y_pred, sample_weight=sample_weight_zeroed)
assert_allclose(weighted_score_subset, weighted_score_zeroed, err_msg='Zeroing weights does not give the same result as removing the corresponding samples (%s != %s) for %s' % (weighted_score_zeroed, weighted_score_subset, name))
if not name.startswith('unnormalized'):
for scaling in [2, 0.3]:
assert_allclose(weighted_score, metric(y_true, y_pred, sample_weight=sample_weight * scaling), err_msg='%s sample_weight is not invariant under scaling' % name)
error_message = 'Found input variables with inconsistent numbers of samples: \\[{}, {}, {}\\]'.format(_num_samples(y_true), _num_samples(y_pred), _num_samples(sample_weight) * 2)
with pytest.raises(ValueError, match=error_message):
metric(y_true, y_pred, sample_weight=np.hstack([sample_weight, sample_weight]))
</DeepExtract>
|
def fit(self, X, y=None):
"""Fit the shrunk covariance model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
X = self._validate_data(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(X, assume_centered=self.assume_centered)
covariance = check_array(covariance)
n_features = covariance.shape[0]
mu = np.trace(covariance) / n_features
shrunk_cov = (1.0 - self.shrinkage) * covariance
shrunk_cov.flat[::n_features + 1] += self.shrinkage * mu
covariance = shrunk_cov
self._set_covariance(covariance)
return self
|
def fit(self, X, y=None):
"""Fit the shrunk covariance model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
X = self._validate_data(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(X, assume_centered=self.assume_centered)
<DeepExtract>
covariance = check_array(covariance)
n_features = covariance.shape[0]
mu = np.trace(covariance) / n_features
shrunk_cov = (1.0 - self.shrinkage) * covariance
shrunk_cov.flat[::n_features + 1] += self.shrinkage * mu
covariance = shrunk_cov
</DeepExtract>
self._set_covariance(covariance)
return self
|
def test_cross_validate():
cv = KFold()
(X_reg, y_reg) = make_regression(n_samples=30, random_state=0)
reg = Ridge(random_state=0)
(X_clf, y_clf) = make_classification(n_samples=30, random_state=0)
clf = SVC(kernel='linear', random_state=0)
for (X, y, est) in ((X_reg, y_reg, reg), (X_clf, y_clf, clf)):
mse_scorer = check_scoring(est, scoring='neg_mean_squared_error')
r2_scorer = check_scoring(est, scoring='r2')
train_mse_scores = []
test_mse_scores = []
train_r2_scores = []
test_r2_scores = []
fitted_estimators = []
for (train, test) in cv.split(X, y):
est = clone(est).fit(X[train], y[train])
train_mse_scores.append(mse_scorer(est, X[train], y[train]))
train_r2_scores.append(r2_scorer(est, X[train], y[train]))
test_mse_scores.append(mse_scorer(est, X[test], y[test]))
test_r2_scores.append(r2_scorer(est, X[test], y[test]))
fitted_estimators.append(est)
train_mse_scores = np.array(train_mse_scores)
test_mse_scores = np.array(test_mse_scores)
train_r2_scores = np.array(train_r2_scores)
test_r2_scores = np.array(test_r2_scores)
fitted_estimators = np.array(fitted_estimators)
scores = (train_mse_scores, test_mse_scores, train_r2_scores, test_r2_scores, fitted_estimators)
(train_mse_scores, test_mse_scores, train_r2_scores, test_r2_scores, fitted_estimators) = scores
for (return_train_score, dict_len) in ((True, 4), (False, 3)):
if return_train_score:
mse_scores_dict = cross_validate(est, X, y, scoring='neg_mean_squared_error', return_train_score=True, cv=cv)
assert_array_almost_equal(mse_scores_dict['train_score'], train_mse_scores)
else:
mse_scores_dict = cross_validate(est, X, y, scoring='neg_mean_squared_error', return_train_score=False, cv=cv)
assert isinstance(mse_scores_dict, dict)
assert len(mse_scores_dict) == dict_len
assert_array_almost_equal(mse_scores_dict['test_score'], test_mse_scores)
if return_train_score:
r2_scores_dict = cross_validate(est, X, y, scoring=['r2'], return_train_score=True, cv=cv)
assert_array_almost_equal(r2_scores_dict['train_r2'], train_r2_scores, True)
else:
r2_scores_dict = cross_validate(est, X, y, scoring=['r2'], return_train_score=False, cv=cv)
assert isinstance(r2_scores_dict, dict)
assert len(r2_scores_dict) == dict_len
assert_array_almost_equal(r2_scores_dict['test_r2'], test_r2_scores)
mse_scores_dict = cross_validate(est, X, y, scoring='neg_mean_squared_error', return_estimator=True, cv=cv)
for (k, est) in enumerate(mse_scores_dict['estimator']):
assert_almost_equal(est.coef_, fitted_estimators[k].coef_)
assert_almost_equal(est.intercept_, fitted_estimators[k].intercept_)
(train_mse_scores, test_mse_scores, train_r2_scores, test_r2_scores, fitted_estimators) = scores
def custom_scorer(clf, X, y):
y_pred = est.predict(X)
return {'r2': r2_score(y, y_pred), 'neg_mean_squared_error': -mean_squared_error(y, y_pred)}
all_scoring = (('r2', 'neg_mean_squared_error'), {'r2': make_scorer(r2_score), 'neg_mean_squared_error': 'neg_mean_squared_error'}, custom_scorer)
keys_sans_train = {'test_r2', 'test_neg_mean_squared_error', 'fit_time', 'score_time'}
keys_with_train = keys_sans_train.union({'train_r2', 'train_neg_mean_squared_error'})
for return_train_score in (True, False):
for scoring in all_scoring:
if return_train_score:
cv_results = cross_validate(est, X, y, scoring=scoring, return_train_score=True, cv=cv)
assert_array_almost_equal(cv_results['train_r2'], train_r2_scores)
assert_array_almost_equal(cv_results['train_neg_mean_squared_error'], train_mse_scores)
else:
cv_results = cross_validate(est, X, y, scoring=scoring, return_train_score=False, cv=cv)
assert isinstance(cv_results, dict)
assert set(cv_results.keys()) == (keys_with_train if return_train_score else keys_sans_train)
assert_array_almost_equal(cv_results['test_r2'], test_r2_scores)
assert_array_almost_equal(cv_results['test_neg_mean_squared_error'], test_mse_scores)
assert type(cv_results['test_r2']) == np.ndarray
assert type(cv_results['test_neg_mean_squared_error']) == np.ndarray
assert type(cv_results['fit_time']) == np.ndarray
assert type(cv_results['score_time']) == np.ndarray
assert np.all(cv_results['fit_time'] >= 0)
assert np.all(cv_results['fit_time'] < 10)
assert np.all(cv_results['score_time'] >= 0)
assert np.all(cv_results['score_time'] < 10)
</DeepExtract>
|
def test_cross_validate():
cv = KFold()
(X_reg, y_reg) = make_regression(n_samples=30, random_state=0)
reg = Ridge(random_state=0)
(X_clf, y_clf) = make_classification(n_samples=30, random_state=0)
clf = SVC(kernel='linear', random_state=0)
for (X, y, est) in ((X_reg, y_reg, reg), (X_clf, y_clf, clf)):
mse_scorer = check_scoring(est, scoring='neg_mean_squared_error')
r2_scorer = check_scoring(est, scoring='r2')
train_mse_scores = []
test_mse_scores = []
train_r2_scores = []
test_r2_scores = []
fitted_estimators = []
for (train, test) in cv.split(X, y):
est = clone(est).fit(X[train], y[train])
train_mse_scores.append(mse_scorer(est, X[train], y[train]))
train_r2_scores.append(r2_scorer(est, X[train], y[train]))
test_mse_scores.append(mse_scorer(est, X[test], y[test]))
test_r2_scores.append(r2_scorer(est, X[test], y[test]))
fitted_estimators.append(est)
train_mse_scores = np.array(train_mse_scores)
test_mse_scores = np.array(test_mse_scores)
train_r2_scores = np.array(train_r2_scores)
test_r2_scores = np.array(test_r2_scores)
fitted_estimators = np.array(fitted_estimators)
scores = (train_mse_scores, test_mse_scores, train_r2_scores, test_r2_scores, fitted_estimators)
<DeepExtract>
(train_mse_scores, test_mse_scores, train_r2_scores, test_r2_scores, fitted_estimators) = scores
for (return_train_score, dict_len) in ((True, 4), (False, 3)):
if return_train_score:
mse_scores_dict = cross_validate(est, X, y, scoring='neg_mean_squared_error', return_train_score=True, cv=cv)
assert_array_almost_equal(mse_scores_dict['train_score'], train_mse_scores)
else:
mse_scores_dict = cross_validate(est, X, y, scoring='neg_mean_squared_error', return_train_score=False, cv=cv)
assert isinstance(mse_scores_dict, dict)
assert len(mse_scores_dict) == dict_len
assert_array_almost_equal(mse_scores_dict['test_score'], test_mse_scores)
if return_train_score:
r2_scores_dict = cross_validate(est, X, y, scoring=['r2'], return_train_score=True, cv=cv)
assert_array_almost_equal(r2_scores_dict['train_r2'], train_r2_scores, True)
else:
r2_scores_dict = cross_validate(est, X, y, scoring=['r2'], return_train_score=False, cv=cv)
assert isinstance(r2_scores_dict, dict)
assert len(r2_scores_dict) == dict_len
assert_array_almost_equal(r2_scores_dict['test_r2'], test_r2_scores)
mse_scores_dict = cross_validate(est, X, y, scoring='neg_mean_squared_error', return_estimator=True, cv=cv)
for (k, est) in enumerate(mse_scores_dict['estimator']):
assert_almost_equal(est.coef_, fitted_estimators[k].coef_)
assert_almost_equal(est.intercept_, fitted_estimators[k].intercept_)
</DeepExtract>
<DeepExtract>
(train_mse_scores, test_mse_scores, train_r2_scores, test_r2_scores, fitted_estimators) = scores
def custom_scorer(clf, X, y):
y_pred = est.predict(X)
return {'r2': r2_score(y, y_pred), 'neg_mean_squared_error': -mean_squared_error(y, y_pred)}
all_scoring = (('r2', 'neg_mean_squared_error'), {'r2': make_scorer(r2_score), 'neg_mean_squared_error': 'neg_mean_squared_error'}, custom_scorer)
keys_sans_train = {'test_r2', 'test_neg_mean_squared_error', 'fit_time', 'score_time'}
keys_with_train = keys_sans_train.union({'train_r2', 'train_neg_mean_squared_error'})
for return_train_score in (True, False):
for scoring in all_scoring:
if return_train_score:
cv_results = cross_validate(est, X, y, scoring=scoring, return_train_score=True, cv=cv)
assert_array_almost_equal(cv_results['train_r2'], train_r2_scores)
assert_array_almost_equal(cv_results['train_neg_mean_squared_error'], train_mse_scores)
else:
cv_results = cross_validate(est, X, y, scoring=scoring, return_train_score=False, cv=cv)
assert isinstance(cv_results, dict)
assert set(cv_results.keys()) == (keys_with_train if return_train_score else keys_sans_train)
assert_array_almost_equal(cv_results['test_r2'], test_r2_scores)
assert_array_almost_equal(cv_results['test_neg_mean_squared_error'], test_mse_scores)
assert type(cv_results['test_r2']) == np.ndarray
assert type(cv_results['test_neg_mean_squared_error']) == np.ndarray
assert type(cv_results['fit_time']) == np.ndarray
assert type(cv_results['score_time']) == np.ndarray
assert np.all(cv_results['fit_time'] >= 0)
assert np.all(cv_results['fit_time'] < 10)
assert np.all(cv_results['score_time'] >= 0)
assert np.all(cv_results['score_time'] < 10)
</DeepExtract>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.