before
stringlengths 87
36.6k
| after
stringlengths 116
37.2k
|
|---|---|
def predict_log_proba(self, X):
"""Return log of posterior probabilities of classification.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples/test vectors.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Posterior log-probabilities of classification per class.
"""
check_is_fitted(self)
(xp, is_array_api) = get_namespace(X)
decision = self.decision_function(X)
if self.classes_.size == 2:
proba = _expit(decision)
probas_ = xp.stack([1 - proba, proba], axis=1)
else:
probas_ = softmax(decision)
return np.log(probas_)
|
def predict_log_proba(self, X):
"""Return log of posterior probabilities of classification.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples/test vectors.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Posterior log-probabilities of classification per class.
"""
<DeepExtract>
check_is_fitted(self)
(xp, is_array_api) = get_namespace(X)
decision = self.decision_function(X)
if self.classes_.size == 2:
proba = _expit(decision)
probas_ = xp.stack([1 - proba, proba], axis=1)
else:
probas_ = softmax(decision)
</DeepExtract>
return np.log(probas_)
|
def _xi_cluster(reachability_plot, predecessor_plot, ordering, xi, min_samples, min_cluster_size, predecessor_correction):
"""Automatically extract clusters according to the Xi-steep method.
This is rouphly an implementation of Figure 19 of the OPTICS paper.
Parameters
----------
reachability_plot : array-like of shape (n_samples,)
The reachability plot, i.e. reachability ordered according to
the calculated ordering, all computed by OPTICS.
predecessor_plot : array-like of shape (n_samples,)
Predecessors ordered according to the calculated ordering.
xi : float, between 0 and 1
Determines the minimum steepness on the reachability plot that
constitutes a cluster boundary. For example, an upwards point in the
reachability plot is defined by the ratio from one point to its
successor being at most 1-xi.
min_samples : int > 1
The same as the min_samples given to OPTICS. Up and down steep regions
can't have more then ``min_samples`` consecutive non-steep points.
min_cluster_size : int > 1
Minimum number of samples in an OPTICS cluster.
predecessor_correction : bool
Correct clusters based on the calculated predecessors.
Returns
-------
clusters : ndarray of shape (n_clusters, 2)
The list of clusters in the form of [start, end] in each row, with all
indices inclusive. The clusters are ordered in a way that larger
clusters encompassing smaller clusters come after those smaller
clusters.
"""
reachability_plot = np.hstack((reachability_plot, np.inf))
xi_complement = 1 - xi
sdas = []
clusters = []
index = 0
mib = 0.0
with np.errstate(invalid='ignore'):
ratio = reachability_plot[:-1] / reachability_plot[1:]
steep_upward = ratio <= xi_complement
steep_downward = ratio >= 1 / xi_complement
downward = ratio > 1
upward = ratio < 1
for steep_index in iter(np.flatnonzero(steep_upward | steep_downward)):
if steep_index < index:
continue
mib = max(mib, np.max(reachability_plot[index:steep_index + 1]))
if steep_downward[steep_index]:
if np.isinf(mib):
sdas = []
res = [sda for sda in sdas if mib <= reachability_plot[sda['start']] * xi_complement]
for sda in res:
sda['mib'] = max(sda['mib'], mib)
sdas = res
D_start = steep_index
n_samples = len(steep_downward)
non_xward_points = 0
index = D_start
end = D_start
while index < n_samples:
if steep_downward[index]:
non_xward_points = 0
end = index
elif not upward[index]:
non_xward_points += 1
if non_xward_points > min_samples:
break
else:
D_end = end
index += 1
D_end = end
D = {'start': D_start, 'end': D_end, 'mib': 0.0}
sdas.append(D)
index = D_end + 1
mib = reachability_plot[index]
else:
if np.isinf(mib):
sdas = []
res = [sda for sda in sdas if mib <= reachability_plot[sda['start']] * xi_complement]
for sda in res:
sda['mib'] = max(sda['mib'], mib)
sdas = res
U_start = steep_index
n_samples = len(steep_upward)
non_xward_points = 0
index = U_start
end = U_start
while index < n_samples:
if steep_upward[index]:
non_xward_points = 0
end = index
elif not downward[index]:
non_xward_points += 1
if non_xward_points > min_samples:
break
else:
U_end = end
index += 1
U_end = end
index = U_end + 1
mib = reachability_plot[index]
U_clusters = []
for D in sdas:
c_start = D['start']
c_end = U_end
if reachability_plot[c_end + 1] * xi_complement < D['mib']:
continue
D_max = reachability_plot[D['start']]
if D_max * xi_complement >= reachability_plot[c_end + 1]:
while reachability_plot[c_start + 1] > reachability_plot[c_end + 1] and c_start < D['end']:
c_start += 1
elif reachability_plot[c_end + 1] * xi_complement >= D_max:
while reachability_plot[c_end - 1] > D_max and c_end > U_start:
c_end -= 1
if predecessor_correction:
while c_start < c_end:
if reachability_plot[c_start] > reachability_plot[c_end]:
(c_start, c_end) = (c_start, c_end)
p_e = ordering[predecessor_plot[c_end]]
for i in range(c_start, c_end):
if p_e == ordering[i]:
(c_start, c_end) = (c_start, c_end)
c_end -= 1
(c_start, c_end) = (None, None)
if c_start is None:
continue
if c_end - c_start + 1 < min_cluster_size:
continue
if c_start > D['end']:
continue
if c_end < U_start:
continue
U_clusters.append((c_start, c_end))
U_clusters.reverse()
clusters.extend(U_clusters)
return np.array(clusters)
|
def _xi_cluster(reachability_plot, predecessor_plot, ordering, xi, min_samples, min_cluster_size, predecessor_correction):
"""Automatically extract clusters according to the Xi-steep method.
This is rouphly an implementation of Figure 19 of the OPTICS paper.
Parameters
----------
reachability_plot : array-like of shape (n_samples,)
The reachability plot, i.e. reachability ordered according to
the calculated ordering, all computed by OPTICS.
predecessor_plot : array-like of shape (n_samples,)
Predecessors ordered according to the calculated ordering.
xi : float, between 0 and 1
Determines the minimum steepness on the reachability plot that
constitutes a cluster boundary. For example, an upwards point in the
reachability plot is defined by the ratio from one point to its
successor being at most 1-xi.
min_samples : int > 1
The same as the min_samples given to OPTICS. Up and down steep regions
can't have more then ``min_samples`` consecutive non-steep points.
min_cluster_size : int > 1
Minimum number of samples in an OPTICS cluster.
predecessor_correction : bool
Correct clusters based on the calculated predecessors.
Returns
-------
clusters : ndarray of shape (n_clusters, 2)
The list of clusters in the form of [start, end] in each row, with all
indices inclusive. The clusters are ordered in a way that larger
clusters encompassing smaller clusters come after those smaller
clusters.
"""
reachability_plot = np.hstack((reachability_plot, np.inf))
xi_complement = 1 - xi
sdas = []
clusters = []
index = 0
mib = 0.0
with np.errstate(invalid='ignore'):
ratio = reachability_plot[:-1] / reachability_plot[1:]
steep_upward = ratio <= xi_complement
steep_downward = ratio >= 1 / xi_complement
downward = ratio > 1
upward = ratio < 1
for steep_index in iter(np.flatnonzero(steep_upward | steep_downward)):
if steep_index < index:
continue
mib = max(mib, np.max(reachability_plot[index:steep_index + 1]))
if steep_downward[steep_index]:
<DeepExtract>
if np.isinf(mib):
sdas = []
res = [sda for sda in sdas if mib <= reachability_plot[sda['start']] * xi_complement]
for sda in res:
sda['mib'] = max(sda['mib'], mib)
sdas = res
</DeepExtract>
D_start = steep_index
<DeepExtract>
n_samples = len(steep_downward)
non_xward_points = 0
index = D_start
end = D_start
while index < n_samples:
if steep_downward[index]:
non_xward_points = 0
end = index
elif not upward[index]:
non_xward_points += 1
if non_xward_points > min_samples:
break
else:
D_end = end
index += 1
D_end = end
</DeepExtract>
D = {'start': D_start, 'end': D_end, 'mib': 0.0}
sdas.append(D)
index = D_end + 1
mib = reachability_plot[index]
else:
<DeepExtract>
if np.isinf(mib):
sdas = []
res = [sda for sda in sdas if mib <= reachability_plot[sda['start']] * xi_complement]
for sda in res:
sda['mib'] = max(sda['mib'], mib)
sdas = res
</DeepExtract>
U_start = steep_index
<DeepExtract>
n_samples = len(steep_upward)
non_xward_points = 0
index = U_start
end = U_start
while index < n_samples:
if steep_upward[index]:
non_xward_points = 0
end = index
elif not downward[index]:
non_xward_points += 1
if non_xward_points > min_samples:
break
else:
U_end = end
index += 1
U_end = end
</DeepExtract>
index = U_end + 1
mib = reachability_plot[index]
U_clusters = []
for D in sdas:
c_start = D['start']
c_end = U_end
if reachability_plot[c_end + 1] * xi_complement < D['mib']:
continue
D_max = reachability_plot[D['start']]
if D_max * xi_complement >= reachability_plot[c_end + 1]:
while reachability_plot[c_start + 1] > reachability_plot[c_end + 1] and c_start < D['end']:
c_start += 1
elif reachability_plot[c_end + 1] * xi_complement >= D_max:
while reachability_plot[c_end - 1] > D_max and c_end > U_start:
c_end -= 1
if predecessor_correction:
<DeepExtract>
while c_start < c_end:
if reachability_plot[c_start] > reachability_plot[c_end]:
(c_start, c_end) = (c_start, c_end)
p_e = ordering[predecessor_plot[c_end]]
for i in range(c_start, c_end):
if p_e == ordering[i]:
(c_start, c_end) = (c_start, c_end)
c_end -= 1
(c_start, c_end) = (None, None)
</DeepExtract>
if c_start is None:
continue
if c_end - c_start + 1 < min_cluster_size:
continue
if c_start > D['end']:
continue
if c_end < U_start:
continue
U_clusters.append((c_start, c_end))
U_clusters.reverse()
clusters.extend(U_clusters)
return np.array(clusters)
|
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None, intercept_init=None, sample_weight=None):
if self.warm_start and getattr(self, 'coef_', None) is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
self.t_ = 1.0
first_call = not hasattr(self, 'classes_')
(X, y) = self._validate_data(X, y, accept_sparse='csr', dtype=[np.float64, np.float32], order='C', accept_large_sparse=False, reset=first_call)
(n_samples, n_features) = X.shape
_check_partial_fit_first_call(self, sample_weight)
n_classes = self.classes_.shape[0]
self._expanded_class_weight = compute_class_weight(self.class_weight, classes=self.classes_, y=y)
coef_init = _check_sample_weight(coef_init, X, dtype=X.dtype)
if getattr(self, 'coef_', None) is None or intercept_init is not None:
self._allocate_parameter_mem(n_classes=n_classes, n_features=n_features, input_dtype=X.dtype, coef_init=intercept_init, intercept_init=intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError('Number of features %d does not match previous data %d.' % (n_features, self.coef_.shape[-1]))
self.loss_function_ = self._get_loss_function(loss)
if not hasattr(self, 't_'):
self.t_ = 1.0
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=coef_init, max_iter=self.max_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=coef_init, max_iter=self.max_iter)
else:
raise ValueError('The number of classes has to be greater than one; got %d class' % n_classes)
return self
if self.tol is not None and self.tol > -np.inf and (self.n_iter_ == self.max_iter):
warnings.warn('Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit.', ConvergenceWarning)
return self
|
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None, intercept_init=None, sample_weight=None):
if self.warm_start and getattr(self, 'coef_', None) is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
self.t_ = 1.0
<DeepExtract>
first_call = not hasattr(self, 'classes_')
(X, y) = self._validate_data(X, y, accept_sparse='csr', dtype=[np.float64, np.float32], order='C', accept_large_sparse=False, reset=first_call)
(n_samples, n_features) = X.shape
_check_partial_fit_first_call(self, sample_weight)
n_classes = self.classes_.shape[0]
self._expanded_class_weight = compute_class_weight(self.class_weight, classes=self.classes_, y=y)
coef_init = _check_sample_weight(coef_init, X, dtype=X.dtype)
if getattr(self, 'coef_', None) is None or intercept_init is not None:
self._allocate_parameter_mem(n_classes=n_classes, n_features=n_features, input_dtype=X.dtype, coef_init=intercept_init, intercept_init=intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError('Number of features %d does not match previous data %d.' % (n_features, self.coef_.shape[-1]))
self.loss_function_ = self._get_loss_function(loss)
if not hasattr(self, 't_'):
self.t_ = 1.0
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=coef_init, max_iter=self.max_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=coef_init, max_iter=self.max_iter)
else:
raise ValueError('The number of classes has to be greater than one; got %d class' % n_classes)
return self
</DeepExtract>
if self.tol is not None and self.tol > -np.inf and (self.n_iter_ == self.max_iter):
warnings.warn('Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit.', ConvergenceWarning)
return self
|
def fit(self, X, y=None):
"""Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
self.components_ = None
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
self.singular_values_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.noise_variance_ = None
X = self._validate_data(X, accept_sparse=['csr', 'csc', 'lil'], copy=self.copy, dtype=[np.float64, np.float32])
(n_samples, n_features) = X.shape
if self.batch_size is None:
self.batch_size_ = 5 * n_features
else:
self.batch_size_ = self.batch_size
for batch in gen_batches(n_samples, self.batch_size_, min_batch_size=self.n_components or 0):
X_batch = X[batch]
if sparse.issparse(X_batch):
X_batch = X_batch.toarray()
first_pass = not hasattr(self, 'components_')
if first_pass:
self._validate_params()
if False:
if sparse.issparse(X_batch):
raise TypeError('IncrementalPCA.partial_fit does not support sparse input. Either convert data to dense or use IncrementalPCA.fit to do so in batches.')
X_batch = self._validate_data(X_batch, copy=self.copy, dtype=[np.float64, np.float32], reset=first_pass)
(n_samples, n_features) = X_batch.shape
if first_pass:
self.components_ = None
if self.n_components is None:
if self.components_ is None:
self.n_components_ = min(n_samples, n_features)
else:
self.n_components_ = self.components_.shape[0]
elif not self.n_components <= n_features:
raise ValueError('n_components=%r invalid for n_features=%d, need more rows than columns for IncrementalPCA processing' % (self.n_components, n_features))
elif not self.n_components <= n_samples:
raise ValueError('n_components=%r must be less or equal to the batch number of samples %d.' % (self.n_components, n_samples))
else:
self.n_components_ = self.n_components
if self.components_ is not None and self.components_.shape[0] != self.n_components_:
raise ValueError('Number of input features has changed from %i to %i between calls to partial_fit! Try setting n_components to a fixed value.' % (self.components_.shape[0], self.n_components_))
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
(col_mean, col_var, n_total_samples) = _incremental_mean_and_var(X_batch, last_mean=self.mean_, last_variance=self.var_, last_sample_count=np.repeat(self.n_samples_seen_, X_batch.shape[1]))
n_total_samples = n_total_samples[0]
if self.n_samples_seen_ == 0:
X_batch -= col_mean
else:
col_batch_mean = np.mean(X_batch, axis=0)
X_batch -= col_batch_mean
mean_correction = np.sqrt(self.n_samples_seen_ / n_total_samples * n_samples) * (self.mean_ - col_batch_mean)
X_batch = np.vstack((self.singular_values_.reshape((-1, 1)) * self.components_, X_batch, mean_correction))
(U, S, Vt) = linalg.svd(X_batch, full_matrices=False, check_finite=False)
(U, Vt) = svd_flip(U, Vt, u_based_decision=False)
explained_variance = S ** 2 / (n_total_samples - 1)
explained_variance_ratio = S ** 2 / np.sum(col_var * n_total_samples)
self.n_samples_seen_ = n_total_samples
self.components_ = Vt[:self.n_components_]
self.singular_values_ = S[:self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[:self.n_components_]
self.explained_variance_ratio_ = explained_variance_ratio[:self.n_components_]
if self.n_components_ not in (n_samples, n_features):
self.noise_variance_ = explained_variance[self.n_components_:].mean()
else:
self.noise_variance_ = 0.0
return self
return self
|
def fit(self, X, y=None):
"""Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
self.components_ = None
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
self.singular_values_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.noise_variance_ = None
X = self._validate_data(X, accept_sparse=['csr', 'csc', 'lil'], copy=self.copy, dtype=[np.float64, np.float32])
(n_samples, n_features) = X.shape
if self.batch_size is None:
self.batch_size_ = 5 * n_features
else:
self.batch_size_ = self.batch_size
for batch in gen_batches(n_samples, self.batch_size_, min_batch_size=self.n_components or 0):
X_batch = X[batch]
if sparse.issparse(X_batch):
X_batch = X_batch.toarray()
<DeepExtract>
first_pass = not hasattr(self, 'components_')
if first_pass:
self._validate_params()
if False:
if sparse.issparse(X_batch):
raise TypeError('IncrementalPCA.partial_fit does not support sparse input. Either convert data to dense or use IncrementalPCA.fit to do so in batches.')
X_batch = self._validate_data(X_batch, copy=self.copy, dtype=[np.float64, np.float32], reset=first_pass)
(n_samples, n_features) = X_batch.shape
if first_pass:
self.components_ = None
if self.n_components is None:
if self.components_ is None:
self.n_components_ = min(n_samples, n_features)
else:
self.n_components_ = self.components_.shape[0]
elif not self.n_components <= n_features:
raise ValueError('n_components=%r invalid for n_features=%d, need more rows than columns for IncrementalPCA processing' % (self.n_components, n_features))
elif not self.n_components <= n_samples:
raise ValueError('n_components=%r must be less or equal to the batch number of samples %d.' % (self.n_components, n_samples))
else:
self.n_components_ = self.n_components
if self.components_ is not None and self.components_.shape[0] != self.n_components_:
raise ValueError('Number of input features has changed from %i to %i between calls to partial_fit! Try setting n_components to a fixed value.' % (self.components_.shape[0], self.n_components_))
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
(col_mean, col_var, n_total_samples) = _incremental_mean_and_var(X_batch, last_mean=self.mean_, last_variance=self.var_, last_sample_count=np.repeat(self.n_samples_seen_, X_batch.shape[1]))
n_total_samples = n_total_samples[0]
if self.n_samples_seen_ == 0:
X_batch -= col_mean
else:
col_batch_mean = np.mean(X_batch, axis=0)
X_batch -= col_batch_mean
mean_correction = np.sqrt(self.n_samples_seen_ / n_total_samples * n_samples) * (self.mean_ - col_batch_mean)
X_batch = np.vstack((self.singular_values_.reshape((-1, 1)) * self.components_, X_batch, mean_correction))
(U, S, Vt) = linalg.svd(X_batch, full_matrices=False, check_finite=False)
(U, Vt) = svd_flip(U, Vt, u_based_decision=False)
explained_variance = S ** 2 / (n_total_samples - 1)
explained_variance_ratio = S ** 2 / np.sum(col_var * n_total_samples)
self.n_samples_seen_ = n_total_samples
self.components_ = Vt[:self.n_components_]
self.singular_values_ = S[:self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[:self.n_components_]
self.explained_variance_ratio_ = explained_variance_ratio[:self.n_components_]
if self.n_components_ not in (n_samples, n_features):
self.noise_variance_ = explained_variance[self.n_components_:].mean()
else:
self.noise_variance_ = 0.0
return self
</DeepExtract>
return self
|
def gradient_hessian_product(self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1):
"""Computes gradient and hessp (hessian product function) w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
hessp : callable
Function that takes in a vector input of shape of gradient and
and returns matrix-vector product with hessian.
"""
((n_samples, n_features), n_classes) = (X.shape, self.base_loss.n_classes)
n_dof = n_features + int(self.fit_intercept)
(weights, intercept) = self.weight_intercept(coef)
if not self.base_loss.is_multiclass:
raw_prediction = X @ weights + intercept
else:
raw_prediction = X @ weights.T + intercept
(weights, intercept, raw_prediction) = (weights, intercept, raw_prediction)
if not self.base_loss.is_multiclass:
(grad_pointwise, hess_pointwise) = self.base_loss.gradient_hessian(y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=n_threads)
grad = np.empty_like(coef, dtype=weights.dtype)
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
hessian_sum = hess_pointwise.sum()
if sparse.issparse(X):
hX = sparse.dia_matrix((hess_pointwise, 0), shape=(n_samples, n_samples)) @ X
else:
hX = hess_pointwise[:, np.newaxis] * X
if self.fit_intercept:
hX_sum = np.squeeze(np.asarray(hX.sum(axis=0)))
hX_sum = np.atleast_1d(hX_sum)
def hessp(s):
ret = np.empty_like(s)
if sparse.issparse(X):
ret[:n_features] = X.T @ (hX @ s[:n_features])
else:
ret[:n_features] = np.linalg.multi_dot([X.T, hX, s[:n_features]])
ret[:n_features] += l2_reg_strength * s[:n_features]
if self.fit_intercept:
ret[:n_features] += s[-1] * hX_sum
ret[-1] = hX_sum @ s[:n_features] + hessian_sum * s[-1]
return ret
else:
(grad_pointwise, proba) = self.base_loss.gradient_proba(y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=n_threads)
grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order='F')
grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
if self.fit_intercept:
grad[:, -1] = grad_pointwise.sum(axis=0)
def hessp(s):
s = s.reshape((n_classes, -1), order='F')
if self.fit_intercept:
s_intercept = s[:, -1]
s = s[:, :-1]
else:
s_intercept = 0
tmp = X @ s.T + s_intercept
tmp += (-proba * tmp).sum(axis=1)[:, np.newaxis]
tmp *= proba
if sample_weight is not None:
tmp *= sample_weight[:, np.newaxis]
hess_prod = np.empty((n_classes, n_dof), dtype=weights.dtype, order='F')
hess_prod[:, :n_features] = tmp.T @ X + l2_reg_strength * s
if self.fit_intercept:
hess_prod[:, -1] = tmp.sum(axis=0)
if coef.ndim == 1:
return hess_prod.ravel(order='F')
else:
return hess_prod
if coef.ndim == 1:
return (grad.ravel(order='F'), hessp)
return (grad, hessp)
|
def gradient_hessian_product(self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1):
"""Computes gradient and hessp (hessian product function) w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
hessp : callable
Function that takes in a vector input of shape of gradient and
and returns matrix-vector product with hessian.
"""
((n_samples, n_features), n_classes) = (X.shape, self.base_loss.n_classes)
n_dof = n_features + int(self.fit_intercept)
<DeepExtract>
(weights, intercept) = self.weight_intercept(coef)
if not self.base_loss.is_multiclass:
raw_prediction = X @ weights + intercept
else:
raw_prediction = X @ weights.T + intercept
(weights, intercept, raw_prediction) = (weights, intercept, raw_prediction)
</DeepExtract>
if not self.base_loss.is_multiclass:
(grad_pointwise, hess_pointwise) = self.base_loss.gradient_hessian(y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=n_threads)
grad = np.empty_like(coef, dtype=weights.dtype)
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
hessian_sum = hess_pointwise.sum()
if sparse.issparse(X):
hX = sparse.dia_matrix((hess_pointwise, 0), shape=(n_samples, n_samples)) @ X
else:
hX = hess_pointwise[:, np.newaxis] * X
if self.fit_intercept:
hX_sum = np.squeeze(np.asarray(hX.sum(axis=0)))
hX_sum = np.atleast_1d(hX_sum)
def hessp(s):
ret = np.empty_like(s)
if sparse.issparse(X):
ret[:n_features] = X.T @ (hX @ s[:n_features])
else:
ret[:n_features] = np.linalg.multi_dot([X.T, hX, s[:n_features]])
ret[:n_features] += l2_reg_strength * s[:n_features]
if self.fit_intercept:
ret[:n_features] += s[-1] * hX_sum
ret[-1] = hX_sum @ s[:n_features] + hessian_sum * s[-1]
return ret
else:
(grad_pointwise, proba) = self.base_loss.gradient_proba(y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=n_threads)
grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order='F')
grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
if self.fit_intercept:
grad[:, -1] = grad_pointwise.sum(axis=0)
def hessp(s):
s = s.reshape((n_classes, -1), order='F')
if self.fit_intercept:
s_intercept = s[:, -1]
s = s[:, :-1]
else:
s_intercept = 0
tmp = X @ s.T + s_intercept
tmp += (-proba * tmp).sum(axis=1)[:, np.newaxis]
tmp *= proba
if sample_weight is not None:
tmp *= sample_weight[:, np.newaxis]
hess_prod = np.empty((n_classes, n_dof), dtype=weights.dtype, order='F')
hess_prod[:, :n_features] = tmp.T @ X + l2_reg_strength * s
if self.fit_intercept:
hess_prod[:, -1] = tmp.sum(axis=0)
if coef.ndim == 1:
return hess_prod.ravel(order='F')
else:
return hess_prod
if coef.ndim == 1:
return (grad.ravel(order='F'), hessp)
return (grad, hessp)
|
def check_dataframe_column_names_consistency(name, estimator_orig):
try:
import pandas as pd
except ImportError:
raise SkipTest('pandas is not installed: not checking column name consistency for pandas')
tags = _safe_tags(estimator_orig)
is_supported_X_types = '2darray' in tags['X_types'] or 'categorical' in tags['X_types']
if not is_supported_X_types or tags['no_validation']:
return
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
X_orig = rng.normal(size=(150, 8))
if '1darray' in _safe_tags(estimator, key='X_types'):
X_orig = X_orig[:, 0]
if _safe_tags(estimator, key='requires_positive_X'):
X_orig = X_orig - X_orig.min()
if 'categorical' in _safe_tags(estimator, key='X_types'):
X_orig = (X_orig - X_orig.min()).astype(np.int32)
if estimator.__class__.__name__ == 'SkewedChi2Sampler':
X_orig = X_orig - X_orig.min()
if _is_pairwise_metric(estimator):
X_orig = pairwise_distances(X_orig, metric='euclidean')
elif _safe_tags(estimator, key='pairwise'):
X_orig = kernel(X_orig, X_orig)
X_orig = X_orig
(n_samples, n_features) = X_orig.shape
names = np.array([f'col_{i}' for i in range(n_features)])
X = pd.DataFrame(X_orig, columns=names)
if is_regressor(estimator):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
with warnings.catch_warnings():
warnings.filterwarnings('error', message='X does not have valid feature names', category=UserWarning, module='sklearn')
estimator.fit(X, y)
if not hasattr(estimator, 'feature_names_in_'):
raise ValueError('Estimator does not have a feature_names_in_ attribute after fitting with a dataframe')
assert isinstance(estimator.feature_names_in_, np.ndarray)
assert estimator.feature_names_in_.dtype == object
assert_array_equal(estimator.feature_names_in_, names)
module_name = estimator_orig.__module__
if module_name.startswith('sklearn.') and (not ('test_' in module_name or module_name.endswith('_testing'))) and ('feature_names_in_' not in estimator_orig.__doc__):
raise ValueError(f'Estimator {name} does not document its feature_names_in_ attribute')
check_methods = []
for method in ('predict', 'transform', 'decision_function', 'predict_proba', 'score', 'score_samples', 'predict_log_proba'):
if not hasattr(estimator, method):
continue
callable_method = getattr(estimator, method)
if method == 'score':
callable_method = partial(callable_method, y=y)
check_methods.append((method, callable_method))
for (_, method) in check_methods:
with warnings.catch_warnings():
warnings.filterwarnings('error', message='X does not have valid feature names', category=UserWarning, module='sklearn')
method(X)
invalid_names = [(names[::-1], 'Feature names must be in the same order as they were in fit.'), ([f'another_prefix_{i}' for i in range(n_features)], 'Feature names unseen at fit time:\n- another_prefix_0\n- another_prefix_1\n'), (names[:3], f'Feature names seen at fit time, yet now missing:\n- {min(names[3:])}\n')]
params = {key: value for (key, value) in estimator.get_params().items() if 'early_stopping' in key}
early_stopping_enabled = any((value is True for value in params.values()))
for (invalid_name, additional_message) in invalid_names:
X_bad = pd.DataFrame(X, columns=invalid_name)
expected_msg = re.escape(f'The feature names should match those that were passed during fit.\n{additional_message}')
for (name, method) in check_methods:
with raises(ValueError, match=expected_msg, err_msg=f'{name} did not raise'):
method(X_bad)
if not hasattr(estimator, 'partial_fit') or early_stopping_enabled:
continue
estimator = clone(estimator_orig)
if is_classifier(estimator):
classes = np.unique(y)
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
with raises(ValueError, match=expected_msg):
estimator.partial_fit(X_bad, y)
|
def check_dataframe_column_names_consistency(name, estimator_orig):
try:
import pandas as pd
except ImportError:
raise SkipTest('pandas is not installed: not checking column name consistency for pandas')
tags = _safe_tags(estimator_orig)
is_supported_X_types = '2darray' in tags['X_types'] or 'categorical' in tags['X_types']
if not is_supported_X_types or tags['no_validation']:
return
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
X_orig = rng.normal(size=(150, 8))
<DeepExtract>
if '1darray' in _safe_tags(estimator, key='X_types'):
X_orig = X_orig[:, 0]
if _safe_tags(estimator, key='requires_positive_X'):
X_orig = X_orig - X_orig.min()
if 'categorical' in _safe_tags(estimator, key='X_types'):
X_orig = (X_orig - X_orig.min()).astype(np.int32)
if estimator.__class__.__name__ == 'SkewedChi2Sampler':
X_orig = X_orig - X_orig.min()
if _is_pairwise_metric(estimator):
X_orig = pairwise_distances(X_orig, metric='euclidean')
elif _safe_tags(estimator, key='pairwise'):
X_orig = kernel(X_orig, X_orig)
X_orig = X_orig
</DeepExtract>
(n_samples, n_features) = X_orig.shape
names = np.array([f'col_{i}' for i in range(n_features)])
X = pd.DataFrame(X_orig, columns=names)
if is_regressor(estimator):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
<DeepExtract>
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
with warnings.catch_warnings():
warnings.filterwarnings('error', message='X does not have valid feature names', category=UserWarning, module='sklearn')
estimator.fit(X, y)
if not hasattr(estimator, 'feature_names_in_'):
raise ValueError('Estimator does not have a feature_names_in_ attribute after fitting with a dataframe')
assert isinstance(estimator.feature_names_in_, np.ndarray)
assert estimator.feature_names_in_.dtype == object
assert_array_equal(estimator.feature_names_in_, names)
module_name = estimator_orig.__module__
if module_name.startswith('sklearn.') and (not ('test_' in module_name or module_name.endswith('_testing'))) and ('feature_names_in_' not in estimator_orig.__doc__):
raise ValueError(f'Estimator {name} does not document its feature_names_in_ attribute')
check_methods = []
for method in ('predict', 'transform', 'decision_function', 'predict_proba', 'score', 'score_samples', 'predict_log_proba'):
if not hasattr(estimator, method):
continue
callable_method = getattr(estimator, method)
if method == 'score':
callable_method = partial(callable_method, y=y)
check_methods.append((method, callable_method))
for (_, method) in check_methods:
with warnings.catch_warnings():
warnings.filterwarnings('error', message='X does not have valid feature names', category=UserWarning, module='sklearn')
method(X)
invalid_names = [(names[::-1], 'Feature names must be in the same order as they were in fit.'), ([f'another_prefix_{i}' for i in range(n_features)], 'Feature names unseen at fit time:\n- another_prefix_0\n- another_prefix_1\n'), (names[:3], f'Feature names seen at fit time, yet now missing:\n- {min(names[3:])}\n')]
params = {key: value for (key, value) in estimator.get_params().items() if 'early_stopping' in key}
early_stopping_enabled = any((value is True for value in params.values()))
for (invalid_name, additional_message) in invalid_names:
X_bad = pd.DataFrame(X, columns=invalid_name)
expected_msg = re.escape(f'The feature names should match those that were passed during fit.\n{additional_message}')
for (name, method) in check_methods:
with raises(ValueError, match=expected_msg, err_msg=f'{name} did not raise'):
method(X_bad)
if not hasattr(estimator, 'partial_fit') or early_stopping_enabled:
continue
estimator = clone(estimator_orig)
if is_classifier(estimator):
classes = np.unique(y)
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
with raises(ValueError, match=expected_msg):
estimator.partial_fit(X_bad, y)
|
def docutils_node_to_jinja(list_item, only_pages=False, numbered=False):
"""Convert a docutils node to a structure that can be read by Jinja.
Parameters
----------
list_item : docutils list_item node
A parent item, potentially with children, corresponding to the level
of a TocTree.
only_pages : bool
Only include items for full pages in the output dictionary. Exclude
anchor links (TOC items with a URL that starts with #)
numbered: bool
Whether to add section number to title
Returns
-------
nav : dict
The TocTree, converted into a dictionary with key/values that work
within Jinja.
"""
if not list_item.children:
return None
reference = list_item.children[0].children[0]
title = reference.astext()
url = reference.attributes['refuri']
active = 'current' in list_item.attributes['classes']
secnumber = reference.attributes.get('secnumber', None)
if numbered and secnumber is not None:
secnumber = '.'.join((str(n) for n in secnumber))
title = f'{secnumber}. {title}'
if only_pages and '#' in url:
return None
nav = {}
nav['title'] = title
nav['url'] = url
nav['active'] = active
nav['children'] = []
if len(list_item.children) > 1:
subpage_list = list_item.children[1].children
for sub_page in subpage_list:
if not sub_page.children:
child_nav = None
reference = sub_page.children[0].children[0]
title = reference.astext()
url = reference.attributes['refuri']
active = 'current' in sub_page.attributes['classes']
secnumber = reference.attributes.get('secnumber', None)
if numbered and secnumber is not None:
secnumber = '.'.join((str(n) for n in secnumber))
title = f'{secnumber}. {title}'
if only_pages and '#' in url:
child_nav = None
nav = {}
nav['title'] = title
nav['url'] = url
nav['active'] = active
nav['children'] = []
if len(sub_page.children) > 1:
subpage_list = sub_page.children[1].children
for sub_page in subpage_list:
child_nav = docutils_node_to_jinja(sub_page, only_pages=only_pages, numbered=numbered)
if child_nav is not None:
nav['children'].append(child_nav)
child_nav = nav
if child_nav is not None:
nav['children'].append(child_nav)
return nav
|
def docutils_node_to_jinja(list_item, only_pages=False, numbered=False):
"""Convert a docutils node to a structure that can be read by Jinja.
Parameters
----------
list_item : docutils list_item node
A parent item, potentially with children, corresponding to the level
of a TocTree.
only_pages : bool
Only include items for full pages in the output dictionary. Exclude
anchor links (TOC items with a URL that starts with #)
numbered: bool
Whether to add section number to title
Returns
-------
nav : dict
The TocTree, converted into a dictionary with key/values that work
within Jinja.
"""
if not list_item.children:
return None
reference = list_item.children[0].children[0]
title = reference.astext()
url = reference.attributes['refuri']
active = 'current' in list_item.attributes['classes']
secnumber = reference.attributes.get('secnumber', None)
if numbered and secnumber is not None:
secnumber = '.'.join((str(n) for n in secnumber))
title = f'{secnumber}. {title}'
if only_pages and '#' in url:
return None
nav = {}
nav['title'] = title
nav['url'] = url
nav['active'] = active
nav['children'] = []
if len(list_item.children) > 1:
subpage_list = list_item.children[1].children
for sub_page in subpage_list:
<DeepExtract>
if not sub_page.children:
child_nav = None
reference = sub_page.children[0].children[0]
title = reference.astext()
url = reference.attributes['refuri']
active = 'current' in sub_page.attributes['classes']
secnumber = reference.attributes.get('secnumber', None)
if numbered and secnumber is not None:
secnumber = '.'.join((str(n) for n in secnumber))
title = f'{secnumber}. {title}'
if only_pages and '#' in url:
child_nav = None
nav = {}
nav['title'] = title
nav['url'] = url
nav['active'] = active
nav['children'] = []
if len(sub_page.children) > 1:
subpage_list = sub_page.children[1].children
for sub_page in subpage_list:
child_nav = docutils_node_to_jinja(sub_page, only_pages=only_pages, numbered=numbered)
if child_nav is not None:
nav['children'].append(child_nav)
child_nav = nav
</DeepExtract>
if child_nav is not None:
nav['children'].append(child_nav)
return nav
|
@pytest.mark.parametrize('penalty', ['l2', 'l1', 'elasticnet'])
def test_large_regularization(penalty):
_update_kwargs(kwargs)
model = linear_model.SGDClassifier(**kwargs)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
|
@pytest.mark.parametrize('penalty', ['l2', 'l1', 'elasticnet'])
def test_large_regularization(penalty):
<DeepExtract>
_update_kwargs(kwargs)
model = linear_model.SGDClassifier(**kwargs)
</DeepExtract>
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
|
def _ica_def(X, tol, g, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X.dtype)
n_iter = []
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
for i in range(max_iter):
(gwtx, g_wtx) = self.fun(np.dot(w.T, X), **fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
w1 -= np.linalg.multi_dot([w1, W[:j].T, W[:j]])
return w1
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
return (W, max(n_iter))
|
def _ica_def(X, tol, g, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X.dtype)
n_iter = []
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
for i in range(max_iter):
<DeepExtract>
(gwtx, g_wtx) = self.fun(np.dot(w.T, X), **fun_args)
</DeepExtract>
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
<DeepExtract>
w1 -= np.linalg.multi_dot([w1, W[:j].T, W[:j]])
return w1
</DeepExtract>
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
return (W, max(n_iter))
|
def __enter__(self):
temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
atexit.register(functools.partial(_delete_folder, temp_folder, warn=True))
has_prescott_openblas = any((True for info in threadpool_info() if info['internal_api'] == 'openblas' and info.get('architecture', 'prescott').lower() == 'prescott'))
if has_prescott_openblas:
aligned = True
if aligned:
memmap_backed_data = _create_aligned_memmap_backed_arrays(self.data, self.mmap_mode, temp_folder)
else:
filename = op.join(temp_folder, 'data.pkl')
joblib.dump(self.data, filename)
memmap_backed_data = joblib.load(filename, mmap_mode=self.mmap_mode)
result = memmap_backed_data if not True else (memmap_backed_data, temp_folder)
(data_read_only, self.temp_folder) = result
return data_read_only
|
def __enter__(self):
<DeepExtract>
temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
atexit.register(functools.partial(_delete_folder, temp_folder, warn=True))
has_prescott_openblas = any((True for info in threadpool_info() if info['internal_api'] == 'openblas' and info.get('architecture', 'prescott').lower() == 'prescott'))
if has_prescott_openblas:
aligned = True
if aligned:
memmap_backed_data = _create_aligned_memmap_backed_arrays(self.data, self.mmap_mode, temp_folder)
else:
filename = op.join(temp_folder, 'data.pkl')
joblib.dump(self.data, filename)
memmap_backed_data = joblib.load(filename, mmap_mode=self.mmap_mode)
result = memmap_backed_data if not True else (memmap_backed_data, temp_folder)
(data_read_only, self.temp_folder) = result
</DeepExtract>
return data_read_only
|
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : ndarray of shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self)
v = self._validate_data(X, accept_sparse='csr', reset=False)
rng = check_random_state(self.random_state)
ind = (np.arange(v.shape[0]), rng.randint(0, v.shape[1], v.shape[0]))
if sp.issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = -safe_sparse_dot(v, self.intercept_visible_) - np.logaddexp(0, safe_sparse_dot(v, self.components_.T) + self.intercept_hidden_).sum(axis=1)
fe_ = -safe_sparse_dot(v_, self.intercept_visible_) - np.logaddexp(0, safe_sparse_dot(v_, self.components_.T) + self.intercept_hidden_).sum(axis=1)
return v.shape[1] * log_logistic(fe_ - fe)
|
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : ndarray of shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self)
v = self._validate_data(X, accept_sparse='csr', reset=False)
rng = check_random_state(self.random_state)
ind = (np.arange(v.shape[0]), rng.randint(0, v.shape[1], v.shape[0]))
if sp.issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
<DeepExtract>
fe = -safe_sparse_dot(v, self.intercept_visible_) - np.logaddexp(0, safe_sparse_dot(v, self.components_.T) + self.intercept_hidden_).sum(axis=1)
</DeepExtract>
<DeepExtract>
fe_ = -safe_sparse_dot(v_, self.intercept_visible_) - np.logaddexp(0, safe_sparse_dot(v_, self.components_.T) + self.intercept_hidden_).sum(axis=1)
</DeepExtract>
return v.shape[1] * log_logistic(fe_ - fe)
|
def decode_rows(self, stream, conversors):
(data, rows, cols) = ([], [], [])
for (i, row) in enumerate(stream):
if not _RE_NONTRIVIAL_DATA.search(row):
values = [None if row in ('?', '') else row for row in next(csv.reader([row]))]
(values, errors) = zip(*_RE_DENSE_VALUES.findall(',' + row))
if not any(errors):
values = [_unquote(v) for v in values]
if _RE_SPARSE_LINE.match(row):
try:
values = {int(k): _unquote(v) for (k, v) in _RE_SPARSE_KEY_VALUES.findall(row)}
except ValueError:
for match in _RE_SPARSE_KEY_VALUES.finditer(row):
if not match.group(1):
raise BadLayout('Error parsing %r' % match.group())
raise BadLayout('Unknown parsing error')
else:
for match in _RE_DENSE_VALUES.finditer(row):
if match.group(2):
raise BadLayout('Error parsing %r' % match.group())
raise BadLayout('Unknown parsing error')
if not isinstance(values, dict):
raise BadLayout()
if not values:
continue
(row_cols, values) = zip(*sorted(values.items()))
try:
values = [value if value is None else conversors[key](value) for (key, value) in zip(row_cols, values)]
except ValueError as exc:
if 'float: ' in str(exc):
raise BadNumericalValue()
raise
except IndexError:
raise BadDataFormat(row)
data.extend(values)
rows.extend([i] * len(values))
cols.extend(row_cols)
return (data, rows, cols)
|
def decode_rows(self, stream, conversors):
(data, rows, cols) = ([], [], [])
for (i, row) in enumerate(stream):
<DeepExtract>
if not _RE_NONTRIVIAL_DATA.search(row):
values = [None if row in ('?', '') else row for row in next(csv.reader([row]))]
(values, errors) = zip(*_RE_DENSE_VALUES.findall(',' + row))
if not any(errors):
values = [_unquote(v) for v in values]
if _RE_SPARSE_LINE.match(row):
try:
values = {int(k): _unquote(v) for (k, v) in _RE_SPARSE_KEY_VALUES.findall(row)}
except ValueError:
for match in _RE_SPARSE_KEY_VALUES.finditer(row):
if not match.group(1):
raise BadLayout('Error parsing %r' % match.group())
raise BadLayout('Unknown parsing error')
else:
for match in _RE_DENSE_VALUES.finditer(row):
if match.group(2):
raise BadLayout('Error parsing %r' % match.group())
raise BadLayout('Unknown parsing error')
</DeepExtract>
if not isinstance(values, dict):
raise BadLayout()
if not values:
continue
(row_cols, values) = zip(*sorted(values.items()))
try:
values = [value if value is None else conversors[key](value) for (key, value) in zip(row_cols, values)]
except ValueError as exc:
if 'float: ' in str(exc):
raise BadNumericalValue()
raise
except IndexError:
raise BadDataFormat(row)
data.extend(values)
rows.extend([i] * len(values))
cols.extend(row_cols)
return (data, rows, cols)
|
def test_simple_impute_pd_na():
pd = pytest.importorskip('pandas')
df = pd.DataFrame({'feature': pd.Series(['abc', None, 'de'], dtype='string')})
imputer = SimpleImputer(missing_values=pd.NA, strategy='constant', fill_value='na')
assert_array_equal(imputer.fit_transform(df), np.array([['abc'], ['na'], ['de']], dtype=object))
assert imputer.fit_transform(df).dtype == np.array([['abc'], ['na'], ['de']], dtype=object).dtype
df = pd.DataFrame({'feature': pd.Series(['abc', 'de', 'fgh'], dtype='string')})
imputer = SimpleImputer(fill_value='ok', strategy='constant')
assert_array_equal(imputer.fit_transform(df), np.array([['abc'], ['de'], ['fgh']], dtype=object))
assert imputer.fit_transform(df).dtype == np.array([['abc'], ['de'], ['fgh']], dtype=object).dtype
df = pd.DataFrame({'feature': pd.Series([1, None, 3], dtype='Int64')})
imputer = SimpleImputer(missing_values=pd.NA, strategy='constant', fill_value=-1)
assert_allclose(imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype='float64'))
assert imputer.fit_transform(df).dtype == np.array([[1], [-1], [3]], dtype='float64').dtype
imputer = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=-1)
assert_allclose(imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype='float64'))
assert imputer.fit_transform(df).dtype == np.array([[1], [-1], [3]], dtype='float64').dtype
df = pd.DataFrame({'feature': pd.Series([1, None, 2, 3], dtype='Int64')})
imputer = SimpleImputer(missing_values=pd.NA, strategy='median')
assert_allclose(imputer.fit_transform(df), np.array([[1], [2], [2], [3]], dtype='float64'))
assert imputer.fit_transform(df).dtype == np.array([[1], [2], [2], [3]], dtype='float64').dtype
df = pd.DataFrame({'feature': pd.Series([1, None, 2], dtype='Int64')})
imputer = SimpleImputer(missing_values=pd.NA, strategy='mean')
assert_allclose(imputer.fit_transform(df), np.array([[1], [1.5], [2]], dtype='float64'))
assert imputer.fit_transform(df).dtype == np.array([[1], [1.5], [2]], dtype='float64').dtype
df = pd.DataFrame({'feature': pd.Series([1.0, None, 3.0], dtype='float64')})
imputer = SimpleImputer(missing_values=pd.NA, strategy='constant', fill_value=-2.0)
assert_allclose(imputer.fit_transform(df), np.array([[1.0], [-2.0], [3.0]], dtype='float64'))
assert imputer.fit_transform(df).dtype == np.array([[1.0], [-2.0], [3.0]], dtype='float64').dtype
df = pd.DataFrame({'feature': pd.Series([1.0, None, 2.0, 3.0], dtype='float64')})
imputer = SimpleImputer(missing_values=pd.NA, strategy='median')
assert_allclose(imputer.fit_transform(df), np.array([[1.0], [2.0], [2.0], [3.0]], dtype='float64'))
assert imputer.fit_transform(df).dtype == np.array([[1.0], [2.0], [2.0], [3.0]], dtype='float64').dtype
</DeepExtract>
|
def test_simple_impute_pd_na():
pd = pytest.importorskip('pandas')
df = pd.DataFrame({'feature': pd.Series(['abc', None, 'de'], dtype='string')})
imputer = SimpleImputer(missing_values=pd.NA, strategy='constant', fill_value='na')
<DeepExtract>
assert_array_equal(imputer.fit_transform(df), np.array([['abc'], ['na'], ['de']], dtype=object))
assert imputer.fit_transform(df).dtype == np.array([['abc'], ['na'], ['de']], dtype=object).dtype
</DeepExtract>
df = pd.DataFrame({'feature': pd.Series(['abc', 'de', 'fgh'], dtype='string')})
imputer = SimpleImputer(fill_value='ok', strategy='constant')
<DeepExtract>
assert_array_equal(imputer.fit_transform(df), np.array([['abc'], ['de'], ['fgh']], dtype=object))
assert imputer.fit_transform(df).dtype == np.array([['abc'], ['de'], ['fgh']], dtype=object).dtype
</DeepExtract>
df = pd.DataFrame({'feature': pd.Series([1, None, 3], dtype='Int64')})
imputer = SimpleImputer(missing_values=pd.NA, strategy='constant', fill_value=-1)
<DeepExtract>
assert_allclose(imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype='float64'))
assert imputer.fit_transform(df).dtype == np.array([[1], [-1], [3]], dtype='float64').dtype
</DeepExtract>
imputer = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=-1)
<DeepExtract>
assert_allclose(imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype='float64'))
assert imputer.fit_transform(df).dtype == np.array([[1], [-1], [3]], dtype='float64').dtype
</DeepExtract>
df = pd.DataFrame({'feature': pd.Series([1, None, 2, 3], dtype='Int64')})
imputer = SimpleImputer(missing_values=pd.NA, strategy='median')
<DeepExtract>
assert_allclose(imputer.fit_transform(df), np.array([[1], [2], [2], [3]], dtype='float64'))
assert imputer.fit_transform(df).dtype == np.array([[1], [2], [2], [3]], dtype='float64').dtype
</DeepExtract>
df = pd.DataFrame({'feature': pd.Series([1, None, 2], dtype='Int64')})
imputer = SimpleImputer(missing_values=pd.NA, strategy='mean')
<DeepExtract>
assert_allclose(imputer.fit_transform(df), np.array([[1], [1.5], [2]], dtype='float64'))
assert imputer.fit_transform(df).dtype == np.array([[1], [1.5], [2]], dtype='float64').dtype
</DeepExtract>
df = pd.DataFrame({'feature': pd.Series([1.0, None, 3.0], dtype='float64')})
imputer = SimpleImputer(missing_values=pd.NA, strategy='constant', fill_value=-2.0)
<DeepExtract>
assert_allclose(imputer.fit_transform(df), np.array([[1.0], [-2.0], [3.0]], dtype='float64'))
assert imputer.fit_transform(df).dtype == np.array([[1.0], [-2.0], [3.0]], dtype='float64').dtype
</DeepExtract>
df = pd.DataFrame({'feature': pd.Series([1.0, None, 2.0, 3.0], dtype='float64')})
imputer = SimpleImputer(missing_values=pd.NA, strategy='median')
<DeepExtract>
assert_allclose(imputer.fit_transform(df), np.array([[1.0], [2.0], [2.0], [3.0]], dtype='float64'))
assert imputer.fit_transform(df).dtype == np.array([[1.0], [2.0], [2.0], [3.0]], dtype='float64').dtype
</DeepExtract>
|
def test_missing_values_minmax_imputation():
class MinMaxImputer(TransformerMixin, BaseEstimator):
def fit(self, X, y=None):
mm = MinMaxScaler().fit(X)
self.data_min_ = mm.data_min_
self.data_max_ = mm.data_max_
return self
def transform(self, X):
(X_min, X_max) = (X.copy(), X.copy())
for feature_idx in range(X.shape[1]):
nan_mask = np.isnan(X[:, feature_idx])
X_min[nan_mask, feature_idx] = self.data_min_[feature_idx] - 1
X_max[nan_mask, feature_idx] = self.data_max_[feature_idx] + 1
return np.concatenate([X_min, X_max], axis=1)
def make_missing_value_data(n_samples=int(10000.0), seed=0):
rng = np.random.RandomState(seed)
(X, y) = make_regression(n_samples=n_samples, n_features=4, random_state=rng)
X = KBinsDiscretizer(n_bins=42, encode='ordinal').fit_transform(X)
rnd_mask = rng.rand(X.shape[0]) > 0.9
X[rnd_mask, 0] = np.nan
low_mask = X[:, 1] == 0
X[low_mask, 1] = np.nan
high_mask = X[:, 2] == X[:, 2].max()
X[high_mask, 2] = np.nan
y_max = np.percentile(y, 70)
y_max_mask = y >= y_max
y[y_max_mask] = y_max
X[y_max_mask, 3] = np.nan
for feature_idx in range(X.shape[1]):
assert any(np.isnan(X[:, feature_idx]))
return train_test_split(X, y, random_state=rng)
rng = np.random.RandomState(0)
(X, y) = make_regression(n_samples=int(10000.0), n_features=4, random_state=rng)
X = KBinsDiscretizer(n_bins=42, encode='ordinal').fit_transform(X)
rnd_mask = rng.rand(X.shape[0]) > 0.9
X[rnd_mask, 0] = np.nan
low_mask = X[:, 1] == 0
X[low_mask, 1] = np.nan
high_mask = X[:, 2] == X[:, 2].max()
X[high_mask, 2] = np.nan
y_max = np.percentile(y, 70)
y_max_mask = y >= y_max
y[y_max_mask] = y_max
X[y_max_mask, 3] = np.nan
for feature_idx in range(X.shape[1]):
assert any(np.isnan(X[:, feature_idx]))
(X_train, X_test, y_train, y_test) = train_test_split(X, y, random_state=rng)
gbm1 = HistGradientBoostingRegressor(max_iter=100, max_leaf_nodes=5, random_state=0)
gbm1.fit(X_train, y_train)
gbm2 = make_pipeline(MinMaxImputer(), clone(gbm1))
gbm2.fit(X_train, y_train)
assert gbm1.score(X_train, y_train) == pytest.approx(gbm2.score(X_train, y_train))
assert gbm1.score(X_test, y_test) == pytest.approx(gbm2.score(X_test, y_test))
assert_allclose(gbm1.predict(X_train), gbm2.predict(X_train))
assert_allclose(gbm1.predict(X_test), gbm2.predict(X_test))
|
def test_missing_values_minmax_imputation():
class MinMaxImputer(TransformerMixin, BaseEstimator):
def fit(self, X, y=None):
mm = MinMaxScaler().fit(X)
self.data_min_ = mm.data_min_
self.data_max_ = mm.data_max_
return self
def transform(self, X):
(X_min, X_max) = (X.copy(), X.copy())
for feature_idx in range(X.shape[1]):
nan_mask = np.isnan(X[:, feature_idx])
X_min[nan_mask, feature_idx] = self.data_min_[feature_idx] - 1
X_max[nan_mask, feature_idx] = self.data_max_[feature_idx] + 1
return np.concatenate([X_min, X_max], axis=1)
def make_missing_value_data(n_samples=int(10000.0), seed=0):
rng = np.random.RandomState(seed)
(X, y) = make_regression(n_samples=n_samples, n_features=4, random_state=rng)
X = KBinsDiscretizer(n_bins=42, encode='ordinal').fit_transform(X)
rnd_mask = rng.rand(X.shape[0]) > 0.9
X[rnd_mask, 0] = np.nan
low_mask = X[:, 1] == 0
X[low_mask, 1] = np.nan
high_mask = X[:, 2] == X[:, 2].max()
X[high_mask, 2] = np.nan
y_max = np.percentile(y, 70)
y_max_mask = y >= y_max
y[y_max_mask] = y_max
X[y_max_mask, 3] = np.nan
for feature_idx in range(X.shape[1]):
assert any(np.isnan(X[:, feature_idx]))
return train_test_split(X, y, random_state=rng)
<DeepExtract>
rng = np.random.RandomState(0)
(X, y) = make_regression(n_samples=int(10000.0), n_features=4, random_state=rng)
X = KBinsDiscretizer(n_bins=42, encode='ordinal').fit_transform(X)
rnd_mask = rng.rand(X.shape[0]) > 0.9
X[rnd_mask, 0] = np.nan
low_mask = X[:, 1] == 0
X[low_mask, 1] = np.nan
high_mask = X[:, 2] == X[:, 2].max()
X[high_mask, 2] = np.nan
y_max = np.percentile(y, 70)
y_max_mask = y >= y_max
y[y_max_mask] = y_max
X[y_max_mask, 3] = np.nan
for feature_idx in range(X.shape[1]):
assert any(np.isnan(X[:, feature_idx]))
(X_train, X_test, y_train, y_test) = train_test_split(X, y, random_state=rng)
</DeepExtract>
gbm1 = HistGradientBoostingRegressor(max_iter=100, max_leaf_nodes=5, random_state=0)
gbm1.fit(X_train, y_train)
gbm2 = make_pipeline(MinMaxImputer(), clone(gbm1))
gbm2.fit(X_train, y_train)
assert gbm1.score(X_train, y_train) == pytest.approx(gbm2.score(X_train, y_train))
assert gbm1.score(X_test, y_test) == pytest.approx(gbm2.score(X_test, y_test))
assert_allclose(gbm1.predict(X_train), gbm2.predict(X_train))
assert_allclose(gbm1.predict(X_test), gbm2.predict(X_test))
|
def fixed_classes_uniform_labelings_scores(score_func, n_samples, n_clusters_range, n_classes, n_runs=5):
scores = np.zeros((len(n_clusters_range), n_runs))
labels_a = rng.randint(low=0, high=n_classes, size=n_samples)
for (i, n_clusters) in enumerate(n_clusters_range):
for j in range(n_runs):
labels_b = rng.randint(low=0, high=n_clusters, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
|
def fixed_classes_uniform_labelings_scores(score_func, n_samples, n_clusters_range, n_classes, n_runs=5):
scores = np.zeros((len(n_clusters_range), n_runs))
<DeepExtract>
labels_a = rng.randint(low=0, high=n_classes, size=n_samples)
</DeepExtract>
for (i, n_clusters) in enumerate(n_clusters_range):
for j in range(n_runs):
<DeepExtract>
labels_b = rng.randint(low=0, high=n_clusters, size=n_samples)
</DeepExtract>
scores[i, j] = score_func(labels_a, labels_b)
return scores
|
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=0.0001, verbose=0, X_offset=None, X_scale=None, sample_weight_sqrt=None):
if sample_weight_sqrt is None:
sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype)
(n_samples, n_features) = X.shape
if X_offset is None or X_scale is None:
X1 = sp_linalg.aslinearoperator(X)
else:
X_offset_scale = X_offset / X_scale
def matvec(b):
X1 = X.dot(b) - sample_weight_sqrt * b.dot(X_offset_scale)
def rmatvec(b):
X1 = X.T.dot(b) - X_offset_scale * b.dot(sample_weight_sqrt)
X1 = sparse.linalg.LinearOperator(shape=X.shape, matvec=matvec, rmatvec=rmatvec)
X1 = X1
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
def _mv(x):
mv = X1.matvec(X1.rmatvec(x)) + alpha[i] * x
mv = _mv
if n_features > n_samples:
C = sp_linalg.LinearOperator((n_samples, n_samples), matvec=mv, dtype=X.dtype)
try:
(coef, info) = sp_linalg.cg(C, y_column, tol=tol, atol='legacy')
except TypeError:
(coef, info) = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator((n_features, n_features), matvec=mv, dtype=X.dtype)
try:
(coefs[i], info) = sp_linalg.cg(C, y_column, maxiter=max_iter, tol=tol, atol='legacy')
except TypeError:
(coefs[i], info) = sp_linalg.cg(C, y_column, maxiter=max_iter, tol=tol)
if info < 0:
raise ValueError('Failed with error code %d' % info)
if max_iter is None and info > 0 and verbose:
warnings.warn('sparse_cg did not converge after %d iterations.' % info, ConvergenceWarning)
return coefs
|
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=0.0001, verbose=0, X_offset=None, X_scale=None, sample_weight_sqrt=None):
if sample_weight_sqrt is None:
sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype)
(n_samples, n_features) = X.shape
if X_offset is None or X_scale is None:
X1 = sp_linalg.aslinearoperator(X)
else:
X_offset_scale = X_offset / X_scale
<DeepExtract>
def matvec(b):
X1 = X.dot(b) - sample_weight_sqrt * b.dot(X_offset_scale)
def rmatvec(b):
X1 = X.T.dot(b) - X_offset_scale * b.dot(sample_weight_sqrt)
X1 = sparse.linalg.LinearOperator(shape=X.shape, matvec=matvec, rmatvec=rmatvec)
X1 = X1
</DeepExtract>
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
<DeepExtract>
def _mv(x):
mv = X1.matvec(X1.rmatvec(x)) + alpha[i] * x
mv = _mv
</DeepExtract>
if n_features > n_samples:
C = sp_linalg.LinearOperator((n_samples, n_samples), matvec=mv, dtype=X.dtype)
try:
(coef, info) = sp_linalg.cg(C, y_column, tol=tol, atol='legacy')
except TypeError:
(coef, info) = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator((n_features, n_features), matvec=mv, dtype=X.dtype)
try:
(coefs[i], info) = sp_linalg.cg(C, y_column, maxiter=max_iter, tol=tol, atol='legacy')
except TypeError:
(coefs[i], info) = sp_linalg.cg(C, y_column, maxiter=max_iter, tol=tol)
if info < 0:
raise ValueError('Failed with error code %d' % info)
if max_iter is None and info > 0 and verbose:
warnings.warn('sparse_cg did not converge after %d iterations.' % info, ConvergenceWarning)
return coefs
|
@ignore_warnings
def check_estimators_pickle(name, estimator_orig, readonly_memmap=False):
"""Test that we can pickle all estimators."""
check_methods = ['predict', 'transform', 'decision_function', 'predict_proba']
(X, y) = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1)
if '1darray' in _safe_tags(estimator_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator_orig, key='pairwise'):
X = rbf_kernel(X, X)
X = X
tags = _safe_tags(estimator_orig)
if tags['allow_nan']:
rng = np.random.RandomState(42)
mask = rng.choice(X.size, 10, replace=False)
X.reshape(-1)[mask] = np.nan
estimator = clone(estimator_orig)
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
set_random_state(estimator)
estimator.fit(X, y)
if readonly_memmap:
unpickled_estimator = create_memmap_backed_data(estimator)
else:
pickled_estimator = pickle.dumps(estimator)
module_name = estimator.__module__
if module_name.startswith('sklearn.') and (not ('test_' in module_name or module_name.endswith('_testing'))):
assert b'version' in pickled_estimator
unpickled_estimator = pickle.loads(pickled_estimator)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_allclose_dense_sparse(result[method], unpickled_result)
|
@ignore_warnings
def check_estimators_pickle(name, estimator_orig, readonly_memmap=False):
"""Test that we can pickle all estimators."""
check_methods = ['predict', 'transform', 'decision_function', 'predict_proba']
(X, y) = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1)
<DeepExtract>
if '1darray' in _safe_tags(estimator_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator_orig, key='pairwise'):
X = rbf_kernel(X, X)
X = X
</DeepExtract>
tags = _safe_tags(estimator_orig)
if tags['allow_nan']:
rng = np.random.RandomState(42)
mask = rng.choice(X.size, 10, replace=False)
X.reshape(-1)[mask] = np.nan
estimator = clone(estimator_orig)
<DeepExtract>
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
set_random_state(estimator)
estimator.fit(X, y)
if readonly_memmap:
unpickled_estimator = create_memmap_backed_data(estimator)
else:
pickled_estimator = pickle.dumps(estimator)
module_name = estimator.__module__
if module_name.startswith('sklearn.') and (not ('test_' in module_name or module_name.endswith('_testing'))):
assert b'version' in pickled_estimator
unpickled_estimator = pickle.loads(pickled_estimator)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_allclose_dense_sparse(result[method], unpickled_result)
|
@validate_params({'subset': [StrOptions({'train', 'test', 'all'})], 'remove': [tuple], 'data_home': [str, None], 'download_if_missing': ['boolean'], 'return_X_y': ['boolean'], 'normalize': ['boolean'], 'as_frame': ['boolean']})
def fetch_20newsgroups_vectorized(*, subset='train', remove=(), data_home=None, download_if_missing=True, return_X_y=False, normalize=True, as_frame=False):
"""Load and vectorize the 20 newsgroups dataset (classification).
Download it if necessary.
This is a convenience function; the transformation is done using the
default settings for
:class:`~sklearn.feature_extraction.text.CountVectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom
:class:`~sklearn.feature_extraction.text.CountVectorizer`,
:class:`~sklearn.feature_extraction.text.HashingVectorizer`,
:class:`~sklearn.feature_extraction.text.TfidfTransformer` or
:class:`~sklearn.feature_extraction.text.TfidfVectorizer`.
The resulting counts are normalized using
:func:`sklearn.preprocessing.normalize` unless normalize is set to False.
================= ==========
Classes 20
Samples total 18846
Dimensionality 130107
Features real
================= ==========
Read more in the :ref:`User Guide <20newsgroups_dataset>`.
Parameters
----------
subset : {'train', 'test', 'all'}, default='train'
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
remove : tuple, default=()
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
data_home : str, default=None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns ``(data.data, data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
normalize : bool, default=True
If True, normalizes each document's feature vector to unit norm using
:func:`sklearn.preprocessing.normalize`.
.. versionadded:: 0.22
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string, or categorical). The target is
a pandas DataFrame or Series depending on the number of
`target_columns`.
.. versionadded:: 0.24
Returns
-------
bunch : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data: {sparse matrix, dataframe} of shape (n_samples, n_features)
The input data matrix. If ``as_frame`` is `True`, ``data`` is
a pandas DataFrame with sparse columns.
target: {ndarray, series} of shape (n_samples,)
The target labels. If ``as_frame`` is `True`, ``target`` is a
pandas Series.
target_names: list of shape (n_classes,)
The names of target classes.
DESCR: str
The full description of the dataset.
frame: dataframe of shape (n_samples, n_features + 1)
Only present when `as_frame=True`. Pandas DataFrame with ``data``
and ``target``.
.. versionadded:: 0.24
(data, target) : tuple if ``return_X_y`` is True
`data` and `target` would be of the format defined in the `Bunch`
description above.
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + '-'.join(remove)
target_file = _pkl_filepath(data_home, filebase + '.pkl')
data_home = get_data_home(data_home=data_home)
cache_path = _pkl_filepath(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, '20news_home')
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
logger.info('Downloading 20news dataset. This may take a few minutes.')
cache = _download_20newsgroups(target_dir=twenty_home, cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if 'train' in ('train', 'test'):
data = cache['train']
elif 'train' == 'all':
data_lst = list()
target = list()
filenames = list()
for 'train' in ('train', 'test'):
data = cache['train']
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
fdescr = load_descr('twenty_newsgroups.rst')
data.DESCR = fdescr
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if None is not None:
labels = [(data.target_names.index(cat), cat) for cat in None]
labels.sort()
(labels, None) = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
data.target = np.searchsorted(labels, data.target)
data.target_names = list(None)
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if True:
12 = check_random_state(12)
indices = np.arange(data.target.shape[0])
12.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
if return_X_y:
data_train = (data.data, data.target)
data_train = data
data_home = get_data_home(data_home=data_home)
cache_path = _pkl_filepath(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, '20news_home')
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
logger.info('Downloading 20news dataset. This may take a few minutes.')
cache = _download_20newsgroups(target_dir=twenty_home, cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if 'test' in ('train', 'test'):
data = cache['test']
elif 'test' == 'all':
data_lst = list()
target = list()
filenames = list()
for 'test' in ('train', 'test'):
data = cache['test']
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
fdescr = load_descr('twenty_newsgroups.rst')
data.DESCR = fdescr
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if None is not None:
labels = [(data.target_names.index(cat), cat) for cat in None]
labels.sort()
(labels, None) = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
data.target = np.searchsorted(labels, data.target)
data.target_names = list(None)
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if True:
12 = check_random_state(12)
indices = np.arange(data.target.shape[0])
12.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
if return_X_y:
data_test = (data.data, data.target)
data_test = data
if os.path.exists(target_file):
try:
(X_train, X_test, feature_names) = joblib.load(target_file)
except ValueError as e:
raise ValueError(f'The cached dataset located in {target_file} was fetched with an older scikit-learn version and it is not compatible with the scikit-learn version imported. You need to manually delete the file: {target_file}.') from e
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
feature_names = vectorizer.get_feature_names_out()
joblib.dump((X_train, X_test, feature_names), target_file, compress=9)
if normalize:
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
preprocessing.normalize(X_train, copy=False)
preprocessing.normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == 'train':
data = X_train
target = data_train.target
elif subset == 'test':
data = X_test
target = data_test.target
elif subset == 'all':
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
fdescr = load_descr('twenty_newsgroups.rst')
frame = None
target_name = ['category_class']
if as_frame:
(frame, data, target) = _convert_data_dataframe('fetch_20newsgroups_vectorized', data, target, feature_names, target_names=target_name, sparse_data=True)
if return_X_y:
return (data, target)
return Bunch(data=data, target=target, frame=frame, target_names=target_names, feature_names=feature_names, DESCR=fdescr)
|
@validate_params({'subset': [StrOptions({'train', 'test', 'all'})], 'remove': [tuple], 'data_home': [str, None], 'download_if_missing': ['boolean'], 'return_X_y': ['boolean'], 'normalize': ['boolean'], 'as_frame': ['boolean']})
def fetch_20newsgroups_vectorized(*, subset='train', remove=(), data_home=None, download_if_missing=True, return_X_y=False, normalize=True, as_frame=False):
"""Load and vectorize the 20 newsgroups dataset (classification).
Download it if necessary.
This is a convenience function; the transformation is done using the
default settings for
:class:`~sklearn.feature_extraction.text.CountVectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom
:class:`~sklearn.feature_extraction.text.CountVectorizer`,
:class:`~sklearn.feature_extraction.text.HashingVectorizer`,
:class:`~sklearn.feature_extraction.text.TfidfTransformer` or
:class:`~sklearn.feature_extraction.text.TfidfVectorizer`.
The resulting counts are normalized using
:func:`sklearn.preprocessing.normalize` unless normalize is set to False.
================= ==========
Classes 20
Samples total 18846
Dimensionality 130107
Features real
================= ==========
Read more in the :ref:`User Guide <20newsgroups_dataset>`.
Parameters
----------
subset : {'train', 'test', 'all'}, default='train'
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
remove : tuple, default=()
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
data_home : str, default=None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns ``(data.data, data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
normalize : bool, default=True
If True, normalizes each document's feature vector to unit norm using
:func:`sklearn.preprocessing.normalize`.
.. versionadded:: 0.22
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string, or categorical). The target is
a pandas DataFrame or Series depending on the number of
`target_columns`.
.. versionadded:: 0.24
Returns
-------
bunch : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data: {sparse matrix, dataframe} of shape (n_samples, n_features)
The input data matrix. If ``as_frame`` is `True`, ``data`` is
a pandas DataFrame with sparse columns.
target: {ndarray, series} of shape (n_samples,)
The target labels. If ``as_frame`` is `True`, ``target`` is a
pandas Series.
target_names: list of shape (n_classes,)
The names of target classes.
DESCR: str
The full description of the dataset.
frame: dataframe of shape (n_samples, n_features + 1)
Only present when `as_frame=True`. Pandas DataFrame with ``data``
and ``target``.
.. versionadded:: 0.24
(data, target) : tuple if ``return_X_y`` is True
`data` and `target` would be of the format defined in the `Bunch`
description above.
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + '-'.join(remove)
target_file = _pkl_filepath(data_home, filebase + '.pkl')
<DeepExtract>
data_home = get_data_home(data_home=data_home)
cache_path = _pkl_filepath(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, '20news_home')
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
logger.info('Downloading 20news dataset. This may take a few minutes.')
cache = _download_20newsgroups(target_dir=twenty_home, cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if 'train' in ('train', 'test'):
data = cache['train']
elif 'train' == 'all':
data_lst = list()
target = list()
filenames = list()
for 'train' in ('train', 'test'):
data = cache['train']
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
fdescr = load_descr('twenty_newsgroups.rst')
data.DESCR = fdescr
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if None is not None:
labels = [(data.target_names.index(cat), cat) for cat in None]
labels.sort()
(labels, None) = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
data.target = np.searchsorted(labels, data.target)
data.target_names = list(None)
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if True:
12 = check_random_state(12)
indices = np.arange(data.target.shape[0])
12.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
if return_X_y:
data_train = (data.data, data.target)
data_train = data
</DeepExtract>
<DeepExtract>
data_home = get_data_home(data_home=data_home)
cache_path = _pkl_filepath(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, '20news_home')
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
logger.info('Downloading 20news dataset. This may take a few minutes.')
cache = _download_20newsgroups(target_dir=twenty_home, cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if 'test' in ('train', 'test'):
data = cache['test']
elif 'test' == 'all':
data_lst = list()
target = list()
filenames = list()
for 'test' in ('train', 'test'):
data = cache['test']
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
fdescr = load_descr('twenty_newsgroups.rst')
data.DESCR = fdescr
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if None is not None:
labels = [(data.target_names.index(cat), cat) for cat in None]
labels.sort()
(labels, None) = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
data.target = np.searchsorted(labels, data.target)
data.target_names = list(None)
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if True:
12 = check_random_state(12)
indices = np.arange(data.target.shape[0])
12.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
if return_X_y:
data_test = (data.data, data.target)
data_test = data
</DeepExtract>
if os.path.exists(target_file):
try:
(X_train, X_test, feature_names) = joblib.load(target_file)
except ValueError as e:
raise ValueError(f'The cached dataset located in {target_file} was fetched with an older scikit-learn version and it is not compatible with the scikit-learn version imported. You need to manually delete the file: {target_file}.') from e
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
feature_names = vectorizer.get_feature_names_out()
joblib.dump((X_train, X_test, feature_names), target_file, compress=9)
if normalize:
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
preprocessing.normalize(X_train, copy=False)
preprocessing.normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == 'train':
data = X_train
target = data_train.target
elif subset == 'test':
data = X_test
target = data_test.target
elif subset == 'all':
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
fdescr = load_descr('twenty_newsgroups.rst')
frame = None
target_name = ['category_class']
if as_frame:
(frame, data, target) = _convert_data_dataframe('fetch_20newsgroups_vectorized', data, target, feature_names, target_names=target_name, sparse_data=True)
if return_X_y:
return (data, target)
return Bunch(data=data, target=target, frame=frame, target_names=target_names, feature_names=feature_names, DESCR=fdescr)
|
def load_breast_cancer(*, return_X_y=False, as_frame=False):
"""Load and return the breast cancer wisconsin dataset (classification).
The breast cancer dataset is a classic and very easy binary classification
dataset.
================= ==============
Classes 2
Samples per class 212(M),357(B)
Samples total 569
Dimensionality 30
Features real, positive
================= ==============
The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is
downloaded from:
https://goo.gl/U2Uwz2
Read more in the :ref:`User Guide <breast_cancer_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (569, 30)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target : {ndarray, Series} of shape (569,)
The classification target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names : list
The names of the dataset columns.
target_names : list
The names of target classes.
frame : DataFrame of shape (569, 31)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR : str
The full description of the dataset.
filename : str
The path to the location of the data.
.. versionadded:: 0.20
(data, target) : tuple if ``return_X_y`` is True
A tuple of two ndarrays by default. The first contains a 2D ndarray of
shape (569, 30) with each row representing one sample and each column
representing the features. The second ndarray of shape (569,) contains
the target samples. If `as_frame=True`, both arrays are pandas objects,
i.e. `X` a dataframe and `y` a series.
.. versionadded:: 0.18
Examples
--------
Let's say you are interested in the samples 10, 50, and 85, and want to
know their class name.
>>> from sklearn.datasets import load_breast_cancer
>>> data = load_breast_cancer()
>>> data.target[[10, 50, 85]]
array([0, 1, 0])
>>> list(data.target_names)
['malignant', 'benign']
"""
data_file_name = 'breast_cancer.csv'
with _open_text(data_module, data_file_name) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=int)
for (i, ir) in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float64)
target[i] = np.asarray(ir[-1], dtype=int)
if 'breast_cancer.rst' is None:
(data, target, target_names, fdescr) = (data, target, target_names)
else:
assert descr_module is not None
descr = load_descr(descr_module=descr_module, descr_file_name='breast_cancer.rst')
(data, target, target_names, fdescr) = (data, target, target_names, descr)
feature_names = np.array(['mean radius', 'mean texture', 'mean perimeter', 'mean area', 'mean smoothness', 'mean compactness', 'mean concavity', 'mean concave points', 'mean symmetry', 'mean fractal dimension', 'radius error', 'texture error', 'perimeter error', 'area error', 'smoothness error', 'compactness error', 'concavity error', 'concave points error', 'symmetry error', 'fractal dimension error', 'worst radius', 'worst texture', 'worst perimeter', 'worst area', 'worst smoothness', 'worst compactness', 'worst concavity', 'worst concave points', 'worst symmetry', 'worst fractal dimension'])
frame = None
target_columns = ['target']
if as_frame:
pd = check_pandas_support('{} with as_frame=True'.format('load_breast_cancer'))
if not sparse_data:
data_df = pd.DataFrame(data, columns=feature_names)
else:
data_df = pd.DataFrame.sparse.from_spmatrix(data, columns=feature_names)
target_df = pd.DataFrame(target, columns=target_columns)
combined_df = pd.concat([data_df, target_df], axis=1)
X = combined_df[feature_names]
y = combined_df[target_columns]
if y.shape[1] == 1:
y = y.iloc[:, 0]
(frame, data, target) = (combined_df, X, y)
if return_X_y:
return (data, target)
return Bunch(data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names, filename=data_file_name, data_module=DATA_MODULE)
|
def load_breast_cancer(*, return_X_y=False, as_frame=False):
"""Load and return the breast cancer wisconsin dataset (classification).
The breast cancer dataset is a classic and very easy binary classification
dataset.
================= ==============
Classes 2
Samples per class 212(M),357(B)
Samples total 569
Dimensionality 30
Features real, positive
================= ==============
The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is
downloaded from:
https://goo.gl/U2Uwz2
Read more in the :ref:`User Guide <breast_cancer_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (569, 30)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target : {ndarray, Series} of shape (569,)
The classification target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names : list
The names of the dataset columns.
target_names : list
The names of target classes.
frame : DataFrame of shape (569, 31)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR : str
The full description of the dataset.
filename : str
The path to the location of the data.
.. versionadded:: 0.20
(data, target) : tuple if ``return_X_y`` is True
A tuple of two ndarrays by default. The first contains a 2D ndarray of
shape (569, 30) with each row representing one sample and each column
representing the features. The second ndarray of shape (569,) contains
the target samples. If `as_frame=True`, both arrays are pandas objects,
i.e. `X` a dataframe and `y` a series.
.. versionadded:: 0.18
Examples
--------
Let's say you are interested in the samples 10, 50, and 85, and want to
know their class name.
>>> from sklearn.datasets import load_breast_cancer
>>> data = load_breast_cancer()
>>> data.target[[10, 50, 85]]
array([0, 1, 0])
>>> list(data.target_names)
['malignant', 'benign']
"""
data_file_name = 'breast_cancer.csv'
<DeepExtract>
with _open_text(data_module, data_file_name) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=int)
for (i, ir) in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float64)
target[i] = np.asarray(ir[-1], dtype=int)
if 'breast_cancer.rst' is None:
(data, target, target_names, fdescr) = (data, target, target_names)
else:
assert descr_module is not None
descr = load_descr(descr_module=descr_module, descr_file_name='breast_cancer.rst')
(data, target, target_names, fdescr) = (data, target, target_names, descr)
</DeepExtract>
feature_names = np.array(['mean radius', 'mean texture', 'mean perimeter', 'mean area', 'mean smoothness', 'mean compactness', 'mean concavity', 'mean concave points', 'mean symmetry', 'mean fractal dimension', 'radius error', 'texture error', 'perimeter error', 'area error', 'smoothness error', 'compactness error', 'concavity error', 'concave points error', 'symmetry error', 'fractal dimension error', 'worst radius', 'worst texture', 'worst perimeter', 'worst area', 'worst smoothness', 'worst compactness', 'worst concavity', 'worst concave points', 'worst symmetry', 'worst fractal dimension'])
frame = None
target_columns = ['target']
if as_frame:
<DeepExtract>
pd = check_pandas_support('{} with as_frame=True'.format('load_breast_cancer'))
if not sparse_data:
data_df = pd.DataFrame(data, columns=feature_names)
else:
data_df = pd.DataFrame.sparse.from_spmatrix(data, columns=feature_names)
target_df = pd.DataFrame(target, columns=target_columns)
combined_df = pd.concat([data_df, target_df], axis=1)
X = combined_df[feature_names]
y = combined_df[target_columns]
if y.shape[1] == 1:
y = y.iloc[:, 0]
(frame, data, target) = (combined_df, X, y)
</DeepExtract>
if return_X_y:
return (data, target)
return Bunch(data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names, filename=data_file_name, data_module=DATA_MODULE)
|
def test_deprecated():
with pytest.warns(FutureWarning, match='qwerty'):
MockClass1()
with pytest.warns(FutureWarning, match='mockclass2_method'):
MockClass2().method()
with pytest.warns(FutureWarning, match='deprecated'):
MockClass3()
with pytest.warns(FutureWarning, match='qwerty'):
MockClass5(42)
with pytest.warns(FutureWarning, match='a message'):
MockClass6(42)
with pytest.warns(FutureWarning, match='deprecated'):
val = 10
assert val == 10
|
def test_deprecated():
with pytest.warns(FutureWarning, match='qwerty'):
MockClass1()
with pytest.warns(FutureWarning, match='mockclass2_method'):
MockClass2().method()
with pytest.warns(FutureWarning, match='deprecated'):
MockClass3()
with pytest.warns(FutureWarning, match='qwerty'):
MockClass5(42)
with pytest.warns(FutureWarning, match='a message'):
MockClass6(42)
with pytest.warns(FutureWarning, match='deprecated'):
<DeepExtract>
val = 10
</DeepExtract>
assert val == 10
|
def __init__(self, wordChars=printables):
super(WordStart, self).__init__()
cache[wordChars] = value
self.errmsg = 'Not at the start of a word'
|
def __init__(self, wordChars=printables):
super(WordStart, self).__init__()
<DeepExtract>
cache[wordChars] = value
</DeepExtract>
self.errmsg = 'Not at the start of a word'
|
@pytest.mark.parametrize('Cls', [KDTree, BallTree])
@pytest.mark.parametrize('kernel', ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine'])
@pytest.mark.parametrize('h', [0.01, 0.1, 1])
@pytest.mark.parametrize('rtol', [0, 1e-05])
@pytest.mark.parametrize('atol', [1e-06, 0.01])
@pytest.mark.parametrize('breadth_first', [True, False])
def test_kernel_density(Cls, kernel, h, rtol, atol, breadth_first, n_samples=100, n_features=3):
rng = check_random_state(1)
X = rng.random_sample((n_samples, n_features))
Y = rng.random_sample((n_samples, n_features))
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
dens_true = norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
dens_true = norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
dens_true = norm * ((1.0 - d * d / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
dens_true = norm * np.exp(-d / h).sum(-1)
elif kernel == 'linear':
dens_true = norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
dens_true = norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
tree = Cls(X, leaf_size=10)
dens = tree.kernel_density(Y, h, atol=atol, rtol=rtol, kernel=kernel, breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol, rtol=max(rtol, 1e-07))
|
@pytest.mark.parametrize('Cls', [KDTree, BallTree])
@pytest.mark.parametrize('kernel', ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine'])
@pytest.mark.parametrize('h', [0.01, 0.1, 1])
@pytest.mark.parametrize('rtol', [0, 1e-05])
@pytest.mark.parametrize('atol', [1e-06, 0.01])
@pytest.mark.parametrize('breadth_first', [True, False])
def test_kernel_density(Cls, kernel, h, rtol, atol, breadth_first, n_samples=100, n_features=3):
rng = check_random_state(1)
X = rng.random_sample((n_samples, n_features))
Y = rng.random_sample((n_samples, n_features))
<DeepExtract>
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
dens_true = norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
dens_true = norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
dens_true = norm * ((1.0 - d * d / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
dens_true = norm * np.exp(-d / h).sum(-1)
elif kernel == 'linear':
dens_true = norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
dens_true = norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
</DeepExtract>
tree = Cls(X, leaf_size=10)
dens = tree.kernel_density(Y, h, atol=atol, rtol=rtol, kernel=kernel, breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol, rtol=max(rtol, 1e-07))
|
def test_theil_sen_parallel():
random_state = np.random.RandomState(0)
n_samples = 100
X = random_state.normal(size=(n_samples, 2))
w = np.array([5.0, 10.0])
c = 1.0
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
(X, y, w, c) = (X, y, w, c)
lstq = LinearRegression().fit(X, y)
assert norm(lstq.coef_ - w) > 1.0
theil_sen = TheilSenRegressor(n_jobs=2, random_state=0, max_subpopulation=2000.0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
|
def test_theil_sen_parallel():
<DeepExtract>
random_state = np.random.RandomState(0)
n_samples = 100
X = random_state.normal(size=(n_samples, 2))
w = np.array([5.0, 10.0])
c = 1.0
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
(X, y, w, c) = (X, y, w, c)
</DeepExtract>
lstq = LinearRegression().fit(X, y)
assert norm(lstq.coef_ - w) > 1.0
theil_sen = TheilSenRegressor(n_jobs=2, random_state=0, max_subpopulation=2000.0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
|
def write_pip_requirements(build_metadata):
build_name = build_metadata['build_name']
template = environment.from_string("\n# DO NOT EDIT: this file is generated from the specification found in the\n# following script to centralize the configuration for CI builds:\n# build_tools/update_environments_and_lock_files.py\n{% for pip_dep in build_metadata['pip_dependencies'] %}\n{{ pip_dep | get_package_with_constraint(build_metadata, uses_pip=True) }}\n{% endfor %}".strip())
content = template.render(build_metadata=build_metadata)
folder_path = Path(build_metadata['folder'])
output_path = folder_path / f'{build_name}_requirements.txt'
output_path.write_text(content)
|
def write_pip_requirements(build_metadata):
build_name = build_metadata['build_name']
<DeepExtract>
template = environment.from_string("\n# DO NOT EDIT: this file is generated from the specification found in the\n# following script to centralize the configuration for CI builds:\n# build_tools/update_environments_and_lock_files.py\n{% for pip_dep in build_metadata['pip_dependencies'] %}\n{{ pip_dep | get_package_with_constraint(build_metadata, uses_pip=True) }}\n{% endfor %}".strip())
content = template.render(build_metadata=build_metadata)
</DeepExtract>
folder_path = Path(build_metadata['folder'])
output_path = folder_path / f'{build_name}_requirements.txt'
output_path.write_text(content)
|
def test_lasso_dual_gap():
"""
Check that Lasso.dual_gap_ matches its objective formulation, with the
datafit normalized by n_samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(30, n_targets)
else:
w = random_state.randn(30)
w[n_informative_features:] = 0.0
X = random_state.randn(10, 30)
y = np.dot(X, w)
X_test = random_state.randn(10, 30)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
n_samples = len(y)
alpha = 0.01 * np.max(np.abs(X.T @ y)) / n_samples
clf = Lasso(alpha=alpha, fit_intercept=False).fit(X, y)
w = clf.coef_
R = y - X @ w
primal = 0.5 * np.mean(R ** 2) + clf.alpha * np.sum(np.abs(w))
R /= np.max(np.abs(X.T @ R) / (n_samples * alpha))
dual = 0.5 * (np.mean(y ** 2) - np.mean((y - R) ** 2))
assert_allclose(clf.dual_gap_, primal - dual)
|
def test_lasso_dual_gap():
"""
Check that Lasso.dual_gap_ matches its objective formulation, with the
datafit normalized by n_samples
"""
<DeepExtract>
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(30, n_targets)
else:
w = random_state.randn(30)
w[n_informative_features:] = 0.0
X = random_state.randn(10, 30)
y = np.dot(X, w)
X_test = random_state.randn(10, 30)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
</DeepExtract>
n_samples = len(y)
alpha = 0.01 * np.max(np.abs(X.T @ y)) / n_samples
clf = Lasso(alpha=alpha, fit_intercept=False).fit(X, y)
w = clf.coef_
R = y - X @ w
primal = 0.5 * np.mean(R ** 2) + clf.alpha * np.sum(np.abs(w))
R /= np.max(np.abs(X.T @ R) / (n_samples * alpha))
dual = 0.5 * (np.mean(y ** 2) - np.mean((y - R) ** 2))
assert_allclose(clf.dual_gap_, primal - dual)
|
def predict_proba(X):
"""Probability estimates.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y_prob : ndarray of shape (n_samples, n_classes)
The predicted probability of the sample for each class in the
model, where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self)
if check_input:
X = self._validate_data(X, accept_sparse=['csr', 'csc'], reset=False)
activation = X
hidden_activation = ACTIVATIONS[self.activation]
for i in range(self.n_layers_ - 1):
activation = safe_sparse_dot(activation, self.coefs_[i])
activation += self.intercepts_[i]
if i != self.n_layers_ - 2:
hidden_activation(activation)
output_activation = ACTIVATIONS[self.out_activation_]
output_activation(activation)
y_pred = activation
if self.n_outputs_ == 1:
y_pred = y_pred.ravel()
if y_pred.ndim == 1:
return np.vstack([1 - y_pred, y_pred]).T
else:
return y_pred
|
def predict_proba(X):
"""Probability estimates.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y_prob : ndarray of shape (n_samples, n_classes)
The predicted probability of the sample for each class in the
model, where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self)
<DeepExtract>
if check_input:
X = self._validate_data(X, accept_sparse=['csr', 'csc'], reset=False)
activation = X
hidden_activation = ACTIVATIONS[self.activation]
for i in range(self.n_layers_ - 1):
activation = safe_sparse_dot(activation, self.coefs_[i])
activation += self.intercepts_[i]
if i != self.n_layers_ - 2:
hidden_activation(activation)
output_activation = ACTIVATIONS[self.out_activation_]
output_activation(activation)
y_pred = activation
</DeepExtract>
if self.n_outputs_ == 1:
y_pred = y_pred.ravel()
if y_pred.ndim == 1:
return np.vstack([1 - y_pred, y_pred]).T
else:
return y_pred
|
@ignore_warnings(category=FutureWarning)
def check_sample_weights_invariance(name, estimator_orig, kind='ones'):
estimator1 = clone(estimator_orig)
estimator2 = clone(estimator_orig)
set_random_state(estimator1, random_state=0)
set_random_state(estimator2, random_state=0)
X1 = np.array([[1, 3], [1, 3], [1, 3], [1, 3], [2, 1], [2, 1], [2, 1], [2, 1], [3, 3], [3, 3], [3, 3], [3, 3], [4, 1], [4, 1], [4, 1], [4, 1]], dtype=np.float64)
y1 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int)
if kind == 'ones':
X2 = X1
y2 = y1
sw2 = np.ones(shape=len(y1))
err_msg = f'For {name} sample_weight=None is not equivalent to sample_weight=ones'
elif kind == 'zeros':
X2 = np.vstack([X1, X1 + 1])
y2 = np.hstack([y1, 3 - y1])
sw2 = np.ones(shape=len(y1) * 2)
sw2[len(y1):] = 0
(X2, y2, sw2) = shuffle(X2, y2, sw2, random_state=0)
err_msg = f'For {name}, a zero sample_weight is not equivalent to removing the sample'
else:
raise ValueError
if _safe_tags(estimator1, key='requires_positive_y'):
y1 += 1 + abs(y1.min())
if _safe_tags(estimator1, key='binary_only') and y1.size > 0:
y1 = np.where(y1 == y1.flat[0], y1, y1.flat[0] + 1)
if _safe_tags(estimator1, key='multioutput_only'):
y1 = np.reshape(y1, (-1, 1))
y1 = y1
if _safe_tags(estimator2, key='requires_positive_y'):
y2 += 1 + abs(y2.min())
if _safe_tags(estimator2, key='binary_only') and y2.size > 0:
y2 = np.where(y2 == y2.flat[0], y2, y2.flat[0] + 1)
if _safe_tags(estimator2, key='multioutput_only'):
y2 = np.reshape(y2, (-1, 1))
y2 = y2
estimator1.fit(X1, y=y1, sample_weight=None)
estimator2.fit(X2, y=y2, sample_weight=sw2)
for method in ['predict', 'predict_proba', 'decision_function', 'transform']:
if hasattr(estimator_orig, method):
X_pred1 = getattr(estimator1, method)(X1)
X_pred2 = getattr(estimator2, method)(X1)
assert_allclose_dense_sparse(X_pred1, X_pred2, err_msg=err_msg)
|
@ignore_warnings(category=FutureWarning)
def check_sample_weights_invariance(name, estimator_orig, kind='ones'):
estimator1 = clone(estimator_orig)
estimator2 = clone(estimator_orig)
set_random_state(estimator1, random_state=0)
set_random_state(estimator2, random_state=0)
X1 = np.array([[1, 3], [1, 3], [1, 3], [1, 3], [2, 1], [2, 1], [2, 1], [2, 1], [3, 3], [3, 3], [3, 3], [3, 3], [4, 1], [4, 1], [4, 1], [4, 1]], dtype=np.float64)
y1 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int)
if kind == 'ones':
X2 = X1
y2 = y1
sw2 = np.ones(shape=len(y1))
err_msg = f'For {name} sample_weight=None is not equivalent to sample_weight=ones'
elif kind == 'zeros':
X2 = np.vstack([X1, X1 + 1])
y2 = np.hstack([y1, 3 - y1])
sw2 = np.ones(shape=len(y1) * 2)
sw2[len(y1):] = 0
(X2, y2, sw2) = shuffle(X2, y2, sw2, random_state=0)
err_msg = f'For {name}, a zero sample_weight is not equivalent to removing the sample'
else:
raise ValueError
<DeepExtract>
if _safe_tags(estimator1, key='requires_positive_y'):
y1 += 1 + abs(y1.min())
if _safe_tags(estimator1, key='binary_only') and y1.size > 0:
y1 = np.where(y1 == y1.flat[0], y1, y1.flat[0] + 1)
if _safe_tags(estimator1, key='multioutput_only'):
y1 = np.reshape(y1, (-1, 1))
y1 = y1
</DeepExtract>
<DeepExtract>
if _safe_tags(estimator2, key='requires_positive_y'):
y2 += 1 + abs(y2.min())
if _safe_tags(estimator2, key='binary_only') and y2.size > 0:
y2 = np.where(y2 == y2.flat[0], y2, y2.flat[0] + 1)
if _safe_tags(estimator2, key='multioutput_only'):
y2 = np.reshape(y2, (-1, 1))
y2 = y2
</DeepExtract>
estimator1.fit(X1, y=y1, sample_weight=None)
estimator2.fit(X2, y=y2, sample_weight=sw2)
for method in ['predict', 'predict_proba', 'decision_function', 'transform']:
if hasattr(estimator_orig, method):
X_pred1 = getattr(estimator1, method)(X1)
X_pred2 = getattr(estimator2, method)(X1)
assert_allclose_dense_sparse(X_pred1, X_pred2, err_msg=err_msg)
|
def mahalanobis(self, X):
"""Compute the squared Mahalanobis distances of given observations.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
dist : ndarray of shape (n_samples,)
Squared Mahalanobis distances of the observations.
"""
X = self._validate_data(X, reset=False)
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_, check_finite=False)
precision = precision
with config_context(assume_finite=True):
dist = pairwise_distances(X, self.location_[np.newaxis, :], metric='mahalanobis', VI=precision)
return np.reshape(dist, (len(X),)) ** 2
|
def mahalanobis(self, X):
"""Compute the squared Mahalanobis distances of given observations.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
dist : ndarray of shape (n_samples,)
Squared Mahalanobis distances of the observations.
"""
X = self._validate_data(X, reset=False)
<DeepExtract>
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_, check_finite=False)
precision = precision
</DeepExtract>
with config_context(assume_finite=True):
dist = pairwise_distances(X, self.location_[np.newaxis, :], metric='mahalanobis', VI=precision)
return np.reshape(dist, (len(X),)) ** 2
|
@pytest.mark.parametrize('name', sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_sample_order_invariance(name):
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20,))
y_pred = random_state.randint(0, 2, size=(20,))
if name in METRICS_REQUIRE_POSITIVE_Y:
offset = abs(min(y_true.min(), y_pred.min())) + 1
y_true += offset
y_pred += offset
(y_true, y_pred) = (y_true, y_pred)
(y_true_shuffle, y_pred_shuffle) = shuffle(y_true, y_pred, random_state=0)
with ignore_warnings():
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_pred), metric(y_true_shuffle, y_pred_shuffle), err_msg='%s is not sample order invariant' % name)
|
@pytest.mark.parametrize('name', sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_sample_order_invariance(name):
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20,))
y_pred = random_state.randint(0, 2, size=(20,))
if name in METRICS_REQUIRE_POSITIVE_Y:
<DeepExtract>
offset = abs(min(y_true.min(), y_pred.min())) + 1
y_true += offset
y_pred += offset
(y_true, y_pred) = (y_true, y_pred)
</DeepExtract>
(y_true_shuffle, y_pred_shuffle) = shuffle(y_true, y_pred, random_state=0)
with ignore_warnings():
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_pred), metric(y_true_shuffle, y_pred_shuffle), err_msg='%s is not sample order invariant' % name)
|
def track_same_prediction(self, *args):
path = Path(__file__).resolve().parent / 'cache'
path = path / 'estimators' / Benchmark.base_commit if True else path / 'tmp'
filename = self.__class__.__name__ + '_estimator_' + '_'.join(list(map(str, args))) + '.pkl'
est_path = path / filename
with est_path.open(mode='rb') as f:
estimator_base = pickle.load(f)
y_val_pred_base = estimator_base.predict(self.X_val)
y_val_pred = self.estimator.predict(self.X_val)
return np.allclose(y_val_pred_base, y_val_pred)
|
def track_same_prediction(self, *args):
<DeepExtract>
path = Path(__file__).resolve().parent / 'cache'
path = path / 'estimators' / Benchmark.base_commit if True else path / 'tmp'
filename = self.__class__.__name__ + '_estimator_' + '_'.join(list(map(str, args))) + '.pkl'
est_path = path / filename
</DeepExtract>
with est_path.open(mode='rb') as f:
estimator_base = pickle.load(f)
y_val_pred_base = estimator_base.predict(self.X_val)
y_val_pred = self.estimator.predict(self.X_val)
return np.allclose(y_val_pred_base, y_val_pred)
|
def bench_sample(sampling, n_population, n_samples):
gc.collect()
t_start = datetime.now()
sampling(n_population, n_samples)
delta = datetime.now() - t_start
mu_second = 0.0 + 10 ** 6
time = delta.seconds + delta.microseconds / mu_second
return time
|
def bench_sample(sampling, n_population, n_samples):
gc.collect()
t_start = datetime.now()
sampling(n_population, n_samples)
delta = datetime.now() - t_start
<DeepExtract>
mu_second = 0.0 + 10 ** 6
time = delta.seconds + delta.microseconds / mu_second
</DeepExtract>
return time
|
def checks_generator():
for check in _yield_all_checks(estimator):
(should_be_skipped, reason) = _should_be_skipped_or_marked(estimator, check)
if not should_be_skipped:
check = check
check_name = check.func.__name__ if isinstance(check, partial) else check.__name__
@wraps(check)
def wrapped(*args, **kwargs):
raise SkipTest(f'Skipping {check_name} for {estimator.__class__.__name__}: {reason}')
check = wrapped
yield (estimator, partial(check, name))
|
def checks_generator():
for check in _yield_all_checks(estimator):
<DeepExtract>
(should_be_skipped, reason) = _should_be_skipped_or_marked(estimator, check)
if not should_be_skipped:
check = check
check_name = check.func.__name__ if isinstance(check, partial) else check.__name__
@wraps(check)
def wrapped(*args, **kwargs):
raise SkipTest(f'Skipping {check_name} for {estimator.__class__.__name__}: {reason}')
check = wrapped
</DeepExtract>
yield (estimator, partial(check, name))
|
def transform(self, X):
"""Impute all missing values in `X`.
Note that this is stochastic, and that if `random_state` is not fixed,
repeated calls, or permuted input, results will differ.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
check_is_fitted(self)
if is_scalar_nan(self.missing_values):
force_all_finite = 'allow-nan'
else:
force_all_finite = True
X = self._validate_data(X, dtype=FLOAT_DTYPES, order='F', reset=False, force_all_finite=force_all_finite)
_check_inputs_dtype(X, self.missing_values)
X_missing_mask = _get_mask(X, self.missing_values)
mask_missing_values = X_missing_mask.copy()
if self.initial_imputer_ is None:
self.initial_imputer_ = SimpleImputer(missing_values=self.missing_values, strategy=self.initial_strategy, fill_value=self.fill_value, keep_empty_features=self.keep_empty_features)
X_filled = self.initial_imputer_.fit_transform(X)
else:
X_filled = self.initial_imputer_.transform(X)
valid_mask = np.flatnonzero(np.logical_not(np.isnan(self.initial_imputer_.statistics_)))
if not self.keep_empty_features:
Xt = X[:, valid_mask]
mask_missing_values = mask_missing_values[:, valid_mask]
else:
mask_missing_values[:, valid_mask] = True
Xt = X
(X, Xt, mask_missing_values, complete_mask) = (Xt, X_filled, mask_missing_values, X_missing_mask)
X_indicator = super()._transform_indicator(complete_mask)
if self.n_iter_ == 0 or np.all(mask_missing_values):
return super()._concatenate_indicator(Xt, X_indicator)
imputations_per_round = len(self.imputation_sequence_) // self.n_iter_
i_rnd = 0
if self.verbose > 0:
print('[IterativeImputer] Completing matrix with shape %s' % (X.shape,))
start_t = time()
for (it, estimator_triplet) in enumerate(self.imputation_sequence_):
if estimator_triplet.estimator is None and False is False:
raise ValueError('If fit_mode is False, then an already-fitted estimator should be passed in.')
if estimator_triplet.estimator is None:
estimator_triplet.estimator = clone(self._estimator)
missing_row_mask = mask_missing_values[:, estimator_triplet.feat_idx]
if False:
X_train = _safe_indexing(_safe_indexing(Xt, estimator_triplet.neighbor_feat_idx, axis=1), ~missing_row_mask, axis=0)
y_train = _safe_indexing(_safe_indexing(Xt, estimator_triplet.feat_idx, axis=1), ~missing_row_mask, axis=0)
estimator_triplet.estimator.fit(X_train, y_train)
if np.sum(missing_row_mask) == 0:
(Xt, _) = (Xt, estimator_triplet.estimator)
X_test = _safe_indexing(_safe_indexing(Xt, estimator_triplet.neighbor_feat_idx, axis=1), missing_row_mask, axis=0)
if self.sample_posterior:
(mus, sigmas) = estimator_triplet.estimator.predict(X_test, return_std=True)
imputed_values = np.zeros(mus.shape, dtype=Xt.dtype)
positive_sigmas = sigmas > 0
imputed_values[~positive_sigmas] = mus[~positive_sigmas]
mus_too_low = mus < self._min_value[estimator_triplet.feat_idx]
imputed_values[mus_too_low] = self._min_value[estimator_triplet.feat_idx]
mus_too_high = mus > self._max_value[estimator_triplet.feat_idx]
imputed_values[mus_too_high] = self._max_value[estimator_triplet.feat_idx]
inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high
mus = mus[inrange_mask]
sigmas = sigmas[inrange_mask]
a = (self._min_value[estimator_triplet.feat_idx] - mus) / sigmas
b = (self._max_value[estimator_triplet.feat_idx] - mus) / sigmas
truncated_normal = stats.truncnorm(a=a, b=b, loc=mus, scale=sigmas)
imputed_values[inrange_mask] = truncated_normal.rvs(random_state=self.random_state_)
else:
imputed_values = estimator_triplet.estimator.predict(X_test)
imputed_values = np.clip(imputed_values, self._min_value[estimator_triplet.feat_idx], self._max_value[estimator_triplet.feat_idx])
_safe_assign(Xt, imputed_values, row_indexer=missing_row_mask, column_indexer=estimator_triplet.feat_idx)
(Xt, _) = (Xt, estimator_triplet.estimator)
if not (it + 1) % imputations_per_round:
if self.verbose > 1:
print('[IterativeImputer] Ending imputation round %d/%d, elapsed time %0.2f' % (i_rnd + 1, self.n_iter_, time() - start_t))
i_rnd += 1
if hasattr(Xt, 'mask'):
Xt.mask(cond=~mask_missing_values, other=X, inplace=True)
else:
Xt[~mask_missing_values] = X[~mask_missing_values]
return super()._concatenate_indicator(Xt, X_indicator)
|
def transform(self, X):
"""Impute all missing values in `X`.
Note that this is stochastic, and that if `random_state` is not fixed,
repeated calls, or permuted input, results will differ.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
check_is_fitted(self)
<DeepExtract>
if is_scalar_nan(self.missing_values):
force_all_finite = 'allow-nan'
else:
force_all_finite = True
X = self._validate_data(X, dtype=FLOAT_DTYPES, order='F', reset=False, force_all_finite=force_all_finite)
_check_inputs_dtype(X, self.missing_values)
X_missing_mask = _get_mask(X, self.missing_values)
mask_missing_values = X_missing_mask.copy()
if self.initial_imputer_ is None:
self.initial_imputer_ = SimpleImputer(missing_values=self.missing_values, strategy=self.initial_strategy, fill_value=self.fill_value, keep_empty_features=self.keep_empty_features)
X_filled = self.initial_imputer_.fit_transform(X)
else:
X_filled = self.initial_imputer_.transform(X)
valid_mask = np.flatnonzero(np.logical_not(np.isnan(self.initial_imputer_.statistics_)))
if not self.keep_empty_features:
Xt = X[:, valid_mask]
mask_missing_values = mask_missing_values[:, valid_mask]
else:
mask_missing_values[:, valid_mask] = True
Xt = X
(X, Xt, mask_missing_values, complete_mask) = (Xt, X_filled, mask_missing_values, X_missing_mask)
</DeepExtract>
X_indicator = super()._transform_indicator(complete_mask)
if self.n_iter_ == 0 or np.all(mask_missing_values):
return super()._concatenate_indicator(Xt, X_indicator)
imputations_per_round = len(self.imputation_sequence_) // self.n_iter_
i_rnd = 0
if self.verbose > 0:
print('[IterativeImputer] Completing matrix with shape %s' % (X.shape,))
start_t = time()
for (it, estimator_triplet) in enumerate(self.imputation_sequence_):
<DeepExtract>
if estimator_triplet.estimator is None and False is False:
raise ValueError('If fit_mode is False, then an already-fitted estimator should be passed in.')
if estimator_triplet.estimator is None:
estimator_triplet.estimator = clone(self._estimator)
missing_row_mask = mask_missing_values[:, estimator_triplet.feat_idx]
if False:
X_train = _safe_indexing(_safe_indexing(Xt, estimator_triplet.neighbor_feat_idx, axis=1), ~missing_row_mask, axis=0)
y_train = _safe_indexing(_safe_indexing(Xt, estimator_triplet.feat_idx, axis=1), ~missing_row_mask, axis=0)
estimator_triplet.estimator.fit(X_train, y_train)
if np.sum(missing_row_mask) == 0:
(Xt, _) = (Xt, estimator_triplet.estimator)
X_test = _safe_indexing(_safe_indexing(Xt, estimator_triplet.neighbor_feat_idx, axis=1), missing_row_mask, axis=0)
if self.sample_posterior:
(mus, sigmas) = estimator_triplet.estimator.predict(X_test, return_std=True)
imputed_values = np.zeros(mus.shape, dtype=Xt.dtype)
positive_sigmas = sigmas > 0
imputed_values[~positive_sigmas] = mus[~positive_sigmas]
mus_too_low = mus < self._min_value[estimator_triplet.feat_idx]
imputed_values[mus_too_low] = self._min_value[estimator_triplet.feat_idx]
mus_too_high = mus > self._max_value[estimator_triplet.feat_idx]
imputed_values[mus_too_high] = self._max_value[estimator_triplet.feat_idx]
inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high
mus = mus[inrange_mask]
sigmas = sigmas[inrange_mask]
a = (self._min_value[estimator_triplet.feat_idx] - mus) / sigmas
b = (self._max_value[estimator_triplet.feat_idx] - mus) / sigmas
truncated_normal = stats.truncnorm(a=a, b=b, loc=mus, scale=sigmas)
imputed_values[inrange_mask] = truncated_normal.rvs(random_state=self.random_state_)
else:
imputed_values = estimator_triplet.estimator.predict(X_test)
imputed_values = np.clip(imputed_values, self._min_value[estimator_triplet.feat_idx], self._max_value[estimator_triplet.feat_idx])
_safe_assign(Xt, imputed_values, row_indexer=missing_row_mask, column_indexer=estimator_triplet.feat_idx)
(Xt, _) = (Xt, estimator_triplet.estimator)
</DeepExtract>
if not (it + 1) % imputations_per_round:
if self.verbose > 1:
print('[IterativeImputer] Ending imputation round %d/%d, elapsed time %0.2f' % (i_rnd + 1, self.n_iter_, time() - start_t))
i_rnd += 1
<DeepExtract>
if hasattr(Xt, 'mask'):
Xt.mask(cond=~mask_missing_values, other=X, inplace=True)
else:
Xt[~mask_missing_values] = X[~mask_missing_values]
</DeepExtract>
return super()._concatenate_indicator(Xt, X_indicator)
|
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
return False
threshold = self.threshold
branching_factor = self.branching_factor
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.0
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(subcluster)
if not split_child:
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = self.subclusters_[closest_index].sq_norm_
return False
else:
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(threshold=threshold, branching_factor=branching_factor, is_leaf=closest_subcluster.child_.is_leaf, n_features=closest_subcluster.child_.n_features, dtype=closest_subcluster.child_.init_centroids_.dtype)
new_node2 = _CFNode(threshold=threshold, branching_factor=branching_factor, is_leaf=closest_subcluster.child_.is_leaf, n_features=closest_subcluster.child_.n_features, dtype=closest_subcluster.child_.init_centroids_.dtype)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if closest_subcluster.child_.is_leaf:
if closest_subcluster.child_.prev_leaf_ is not None:
closest_subcluster.child_.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = closest_subcluster.child_.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = closest_subcluster.child_.next_leaf_
if closest_subcluster.child_.next_leaf_ is not None:
closest_subcluster.child_.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(closest_subcluster.child_.centroids_, Y_norm_squared=closest_subcluster.child_.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(dist.argmax(), (n_clusters, n_clusters))
(node1_dist, node2_dist) = dist[farthest_idx,]
node1_closer = node1_dist < node2_dist
node1_closer[farthest_idx[0]] = True
for (idx, subcluster) in enumerate(closest_subcluster.child_.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
(new_subcluster1, new_subcluster2) = (new_subcluster1, new_subcluster2)
ind = self.subclusters_.index(closest_subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
else:
merged = closest_subcluster.merge_subcluster(subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = closest_subcluster.sq_norm_
return False
elif len(self.subclusters_) < self.branching_factor:
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
return False
else:
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
return True
|
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
<DeepExtract>
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
</DeepExtract>
return False
threshold = self.threshold
branching_factor = self.branching_factor
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.0
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(subcluster)
if not split_child:
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = self.subclusters_[closest_index].sq_norm_
return False
else:
<DeepExtract>
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(threshold=threshold, branching_factor=branching_factor, is_leaf=closest_subcluster.child_.is_leaf, n_features=closest_subcluster.child_.n_features, dtype=closest_subcluster.child_.init_centroids_.dtype)
new_node2 = _CFNode(threshold=threshold, branching_factor=branching_factor, is_leaf=closest_subcluster.child_.is_leaf, n_features=closest_subcluster.child_.n_features, dtype=closest_subcluster.child_.init_centroids_.dtype)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if closest_subcluster.child_.is_leaf:
if closest_subcluster.child_.prev_leaf_ is not None:
closest_subcluster.child_.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = closest_subcluster.child_.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = closest_subcluster.child_.next_leaf_
if closest_subcluster.child_.next_leaf_ is not None:
closest_subcluster.child_.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(closest_subcluster.child_.centroids_, Y_norm_squared=closest_subcluster.child_.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(dist.argmax(), (n_clusters, n_clusters))
(node1_dist, node2_dist) = dist[farthest_idx,]
node1_closer = node1_dist < node2_dist
node1_closer[farthest_idx[0]] = True
for (idx, subcluster) in enumerate(closest_subcluster.child_.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
(new_subcluster1, new_subcluster2) = (new_subcluster1, new_subcluster2)
</DeepExtract>
<DeepExtract>
ind = self.subclusters_.index(closest_subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
</DeepExtract>
if len(self.subclusters_) > self.branching_factor:
return True
return False
else:
merged = closest_subcluster.merge_subcluster(subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = closest_subcluster.sq_norm_
return False
elif len(self.subclusters_) < self.branching_factor:
<DeepExtract>
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
</DeepExtract>
return False
else:
<DeepExtract>
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
</DeepExtract>
return True
|
@validate_params({'y_true': ['array-like', 'sparse matrix'], 'y_pred': ['array-like', 'sparse matrix'], 'labels': ['array-like', None], 'pos_label': [Real, str, 'boolean', None], 'average': [StrOptions({'micro', 'macro', 'samples', 'weighted', 'binary'}), None], 'sample_weight': ['array-like', None], 'zero_division': [Options(Real, {0, 1}), StrOptions({'warn'})]})
def jaccard_score(y_true, y_pred, *, labels=None, pos_label=1, average='binary', sample_weight=None, zero_division='warn'):
"""Jaccard similarity coefficient score.
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array-like of shape (n_classes,), default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : int, float, bool or str, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None, default='binary'
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", {0.0, 1.0}, default="warn"
Sets the value to return when there is a zero division, i.e. when there
there are no negative values in predictions and labels. If set to
"warn", this acts like 0, but a warning is also raised.
Returns
-------
score : float or ndarray of shape (n_unique_labels,), dtype=np.float64
The Jaccard score. When `average` is not `None`, a single scalar is
returned.
See Also
--------
accuracy_score : Function for calculating the accuracy score.
f1_score : Function for calculating the F1 score.
multilabel_confusion_matrix : Function for computing a confusion matrix for each class or sample.
Notes
-----
:func:`jaccard_score` may be a poor metric if there are no
positives for some samples or classes. Jaccard is undefined if there are
no true or predicted labels, and our implementation will return a score
of 0 with a warning.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_score
>>> y_true = np.array([[0, 1, 1],
... [1, 1, 0]])
>>> y_pred = np.array([[1, 1, 1],
... [1, 0, 0]])
In the binary case:
>>> jaccard_score(y_true[0], y_pred[0])
0.6666...
In the 2D comparison case (e.g. image similarity):
>>> jaccard_score(y_true, y_pred, average="micro")
0.6
In the multilabel case:
>>> jaccard_score(y_true, y_pred, average='samples')
0.5833...
>>> jaccard_score(y_true, y_pred, average='macro')
0.6666...
>>> jaccard_score(y_true, y_pred, average=None)
array([0.5, 0.5, 1. ])
In the multiclass case:
>>> y_pred = [0, 2, 1, 2]
>>> y_true = [0, 1, 2, 2]
>>> jaccard_score(y_true, y_pred, average=None)
array([1. , 0. , 0.33...])
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' + str(average_options))
(y_type, y_true, y_pred) = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred).tolist()
if average == 'binary':
if y_type == 'binary':
if pos_label not in present_labels:
if len(present_labels) >= 2:
raise ValueError(f'pos_label={pos_label} is not a valid label. It should be one of {present_labels}')
labels = [pos_label]
else:
average_options = list(average_options)
if y_type == 'multiclass':
average_options.remove('samples')
raise ValueError("Target is %s but average='binary'. Please choose another average setting, one of %r." % (y_type, average_options))
elif pos_label not in (None, 1):
warnings.warn("Note that pos_label (set to %r) is ignored when average != 'binary' (got %r). You may use labels=[pos_label] to specify a single positive class." % (pos_label, average), UserWarning)
labels = labels
samplewise = average == 'samples'
(y_type, y_true, y_pred) = _check_targets(y_true, y_pred)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type not in ('binary', 'multiclass', 'multilabel-indicator'):
raise ValueError('%s is not supported' % y_type)
present_labels = unique_labels(y_true, y_pred)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels, assume_unique=True)])
if y_true.ndim == 1:
if samplewise:
raise ValueError('Samplewise metrics are not available outside of multilabel classification.')
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = np.bincount(tp_bins, weights=tp_bins_weights, minlength=len(labels))
else:
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = np.bincount(y_pred, weights=sample_weight, minlength=len(labels))
if len(y_true):
true_sum = np.bincount(y_true, weights=sample_weight, minlength=len(labels))
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
else:
sum_axis = 1 if samplewise else 0
if not np.array_equal(labels, present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels) for multilabel targets. Got %d > %d' % (np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels) for multilabel targets. Got %d < 0' % np.min(labels))
if n_labels is not None:
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis, sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight)
fp = pred_sum - tp_sum
fn = true_sum - tp_sum
tp = tp_sum
if sample_weight is not None and samplewise:
sample_weight = np.array(sample_weight)
tp = np.array(tp)
fp = np.array(fp)
fn = np.array(fn)
tn = sample_weight * y_true.shape[1] - tp - fp - fn
elif sample_weight is not None:
tn = sum(sample_weight) - tp - fp - fn
elif samplewise:
tn = y_true.shape[1] - tp - fp - fn
else:
tn = y_true.shape[0] - tp - fp - fn
MCM = np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)
numerator = MCM[:, 1, 1]
denominator = MCM[:, 1, 1] + MCM[:, 0, 1] + MCM[:, 1, 0]
if average == 'micro':
numerator = np.array([numerator.sum()])
denominator = np.array([denominator.sum()])
mask = denominator == 0.0
denominator = denominator.copy()
denominator[mask] = 1
result = numerator / denominator
if not np.any(mask):
jaccard = result
zero_division_value = _check_zero_division(zero_division)
result[mask] = zero_division_value
if zero_division != 'warn' or 'jaccard' not in ('jaccard',):
jaccard = result
if 'jaccard' in ('jaccard',) and 'f-score' in ('jaccard',):
msg_start = '{0} and F-score are'.format('jaccard'.title())
elif 'jaccard' in ('jaccard',):
msg_start = '{0} is'.format('jaccard'.title())
elif 'f-score' in ('jaccard',):
msg_start = 'F-score is'
else:
jaccard = result
_warn_prf(average, 'true or predicted', msg_start, len(result))
jaccard = result
if average is None:
return jaccard
if average == 'weighted':
weights = MCM[:, 1, 0] + MCM[:, 1, 1]
if not np.any(weights):
weights = None
elif average == 'samples' and sample_weight is not None:
weights = sample_weight
else:
weights = None
return np.average(jaccard, weights=weights)
|
@validate_params({'y_true': ['array-like', 'sparse matrix'], 'y_pred': ['array-like', 'sparse matrix'], 'labels': ['array-like', None], 'pos_label': [Real, str, 'boolean', None], 'average': [StrOptions({'micro', 'macro', 'samples', 'weighted', 'binary'}), None], 'sample_weight': ['array-like', None], 'zero_division': [Options(Real, {0, 1}), StrOptions({'warn'})]})
def jaccard_score(y_true, y_pred, *, labels=None, pos_label=1, average='binary', sample_weight=None, zero_division='warn'):
"""Jaccard similarity coefficient score.
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array-like of shape (n_classes,), default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : int, float, bool or str, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None, default='binary'
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", {0.0, 1.0}, default="warn"
Sets the value to return when there is a zero division, i.e. when there
there are no negative values in predictions and labels. If set to
"warn", this acts like 0, but a warning is also raised.
Returns
-------
score : float or ndarray of shape (n_unique_labels,), dtype=np.float64
The Jaccard score. When `average` is not `None`, a single scalar is
returned.
See Also
--------
accuracy_score : Function for calculating the accuracy score.
f1_score : Function for calculating the F1 score.
multilabel_confusion_matrix : Function for computing a confusion matrix for each class or sample.
Notes
-----
:func:`jaccard_score` may be a poor metric if there are no
positives for some samples or classes. Jaccard is undefined if there are
no true or predicted labels, and our implementation will return a score
of 0 with a warning.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_score
>>> y_true = np.array([[0, 1, 1],
... [1, 1, 0]])
>>> y_pred = np.array([[1, 1, 1],
... [1, 0, 0]])
In the binary case:
>>> jaccard_score(y_true[0], y_pred[0])
0.6666...
In the 2D comparison case (e.g. image similarity):
>>> jaccard_score(y_true, y_pred, average="micro")
0.6
In the multilabel case:
>>> jaccard_score(y_true, y_pred, average='samples')
0.5833...
>>> jaccard_score(y_true, y_pred, average='macro')
0.6666...
>>> jaccard_score(y_true, y_pred, average=None)
array([0.5, 0.5, 1. ])
In the multiclass case:
>>> y_pred = [0, 2, 1, 2]
>>> y_true = [0, 1, 2, 2]
>>> jaccard_score(y_true, y_pred, average=None)
array([1. , 0. , 0.33...])
"""
<DeepExtract>
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' + str(average_options))
(y_type, y_true, y_pred) = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred).tolist()
if average == 'binary':
if y_type == 'binary':
if pos_label not in present_labels:
if len(present_labels) >= 2:
raise ValueError(f'pos_label={pos_label} is not a valid label. It should be one of {present_labels}')
labels = [pos_label]
else:
average_options = list(average_options)
if y_type == 'multiclass':
average_options.remove('samples')
raise ValueError("Target is %s but average='binary'. Please choose another average setting, one of %r." % (y_type, average_options))
elif pos_label not in (None, 1):
warnings.warn("Note that pos_label (set to %r) is ignored when average != 'binary' (got %r). You may use labels=[pos_label] to specify a single positive class." % (pos_label, average), UserWarning)
labels = labels
</DeepExtract>
samplewise = average == 'samples'
<DeepExtract>
(y_type, y_true, y_pred) = _check_targets(y_true, y_pred)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type not in ('binary', 'multiclass', 'multilabel-indicator'):
raise ValueError('%s is not supported' % y_type)
present_labels = unique_labels(y_true, y_pred)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels, assume_unique=True)])
if y_true.ndim == 1:
if samplewise:
raise ValueError('Samplewise metrics are not available outside of multilabel classification.')
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = np.bincount(tp_bins, weights=tp_bins_weights, minlength=len(labels))
else:
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = np.bincount(y_pred, weights=sample_weight, minlength=len(labels))
if len(y_true):
true_sum = np.bincount(y_true, weights=sample_weight, minlength=len(labels))
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
else:
sum_axis = 1 if samplewise else 0
if not np.array_equal(labels, present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels) for multilabel targets. Got %d > %d' % (np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels) for multilabel targets. Got %d < 0' % np.min(labels))
if n_labels is not None:
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis, sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight)
fp = pred_sum - tp_sum
fn = true_sum - tp_sum
tp = tp_sum
if sample_weight is not None and samplewise:
sample_weight = np.array(sample_weight)
tp = np.array(tp)
fp = np.array(fp)
fn = np.array(fn)
tn = sample_weight * y_true.shape[1] - tp - fp - fn
elif sample_weight is not None:
tn = sum(sample_weight) - tp - fp - fn
elif samplewise:
tn = y_true.shape[1] - tp - fp - fn
else:
tn = y_true.shape[0] - tp - fp - fn
MCM = np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)
</DeepExtract>
numerator = MCM[:, 1, 1]
denominator = MCM[:, 1, 1] + MCM[:, 0, 1] + MCM[:, 1, 0]
if average == 'micro':
numerator = np.array([numerator.sum()])
denominator = np.array([denominator.sum()])
<DeepExtract>
mask = denominator == 0.0
denominator = denominator.copy()
denominator[mask] = 1
result = numerator / denominator
if not np.any(mask):
jaccard = result
zero_division_value = _check_zero_division(zero_division)
result[mask] = zero_division_value
if zero_division != 'warn' or 'jaccard' not in ('jaccard',):
jaccard = result
if 'jaccard' in ('jaccard',) and 'f-score' in ('jaccard',):
msg_start = '{0} and F-score are'.format('jaccard'.title())
elif 'jaccard' in ('jaccard',):
msg_start = '{0} is'.format('jaccard'.title())
elif 'f-score' in ('jaccard',):
msg_start = 'F-score is'
else:
jaccard = result
_warn_prf(average, 'true or predicted', msg_start, len(result))
jaccard = result
</DeepExtract>
if average is None:
return jaccard
if average == 'weighted':
weights = MCM[:, 1, 0] + MCM[:, 1, 1]
if not np.any(weights):
weights = None
elif average == 'samples' and sample_weight is not None:
weights = sample_weight
else:
weights = None
return np.average(jaccard, weights=weights)
|
def test_partial_roc_auc_score():
y_true = np.array([0, 0, 1, 1])
assert roc_auc_score(y_true, y_true, max_fpr=1) == 1
assert roc_auc_score(y_true, y_true, max_fpr=0.001) == 1
with pytest.raises(ValueError):
assert roc_auc_score(y_true, y_true, max_fpr=-0.1)
with pytest.raises(ValueError):
assert roc_auc_score(y_true, y_true, max_fpr=1.1)
with pytest.raises(ValueError):
assert roc_auc_score(y_true, y_true, max_fpr=0)
y_scores = np.array([0.1, 0, 0.1, 0.01])
roc_auc_with_max_fpr_one = roc_auc_score(y_true, y_scores, max_fpr=1)
unconstrained_roc_auc = roc_auc_score(y_true, y_scores)
assert roc_auc_with_max_fpr_one == unconstrained_roc_auc
assert roc_auc_score(y_true, y_scores, max_fpr=0.3) == 0.5
if dataset is None:
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if True:
(X, y) = (X[y < 2], y[y < 2])
(n_samples, n_features) = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
(X, y) = (X[p], y[p])
half = int(n_samples / 2)
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
y_score = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if True:
y_score = y_score[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
(y_true, y_pred, _) = (y_true, y_pred, y_score)
for max_fpr in np.linspace(0.0001, 1, 5):
assert_almost_equal(roc_auc_score(y_true, y_pred, max_fpr=max_fpr), _partial_roc_auc_score(y_true, y_pred, max_fpr))
|
def test_partial_roc_auc_score():
y_true = np.array([0, 0, 1, 1])
assert roc_auc_score(y_true, y_true, max_fpr=1) == 1
assert roc_auc_score(y_true, y_true, max_fpr=0.001) == 1
with pytest.raises(ValueError):
assert roc_auc_score(y_true, y_true, max_fpr=-0.1)
with pytest.raises(ValueError):
assert roc_auc_score(y_true, y_true, max_fpr=1.1)
with pytest.raises(ValueError):
assert roc_auc_score(y_true, y_true, max_fpr=0)
y_scores = np.array([0.1, 0, 0.1, 0.01])
roc_auc_with_max_fpr_one = roc_auc_score(y_true, y_scores, max_fpr=1)
unconstrained_roc_auc = roc_auc_score(y_true, y_scores)
assert roc_auc_with_max_fpr_one == unconstrained_roc_auc
assert roc_auc_score(y_true, y_scores, max_fpr=0.3) == 0.5
<DeepExtract>
if dataset is None:
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if True:
(X, y) = (X[y < 2], y[y < 2])
(n_samples, n_features) = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
(X, y) = (X[p], y[p])
half = int(n_samples / 2)
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
y_score = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if True:
y_score = y_score[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
(y_true, y_pred, _) = (y_true, y_pred, y_score)
</DeepExtract>
for max_fpr in np.linspace(0.0001, 1, 5):
assert_almost_equal(roc_auc_score(y_true, y_pred, max_fpr=max_fpr), _partial_roc_auc_score(y_true, y_pred, max_fpr))
|
def _fit_best_piecewise(vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters, init=self.init, n_init=self.n_init, random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init, n_init=self.n_init, random_state=self.random_state)
model.fit(v.reshape(-1, 1))
centroid = model.cluster_centers_
labels = model.labels_
(centroid, labels) = (centroid, labels)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise, axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1, arr=vectors - piecewise_vectors)
result = vectors[np.argsort(dists)[:n_best]]
return result
|
def _fit_best_piecewise(vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
<DeepExtract>
if self.mini_batch:
model = MiniBatchKMeans(n_clusters, init=self.init, n_init=self.n_init, random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init, n_init=self.n_init, random_state=self.random_state)
model.fit(v.reshape(-1, 1))
centroid = model.cluster_centers_
labels = model.labels_
(centroid, labels) = (centroid, labels)
</DeepExtract>
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise, axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1, arr=vectors - piecewise_vectors)
result = vectors[np.argsort(dists)[:n_best]]
return result
|
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.0, beta=0.0, max_iter=1000, tol=0.001, verbose=0, random_state=None, check_input=True, max_squared_sum=None, warm_start_mem=None, is_saga=False):
"""SAG solver for Ridge and LogisticRegression.
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
loss : {'log', 'squared', 'multinomial'}, default='log'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, default=1.
L2 regularization term in the objective function
``(0.5 * alpha * || W ||_F^2)``.
beta : float, default=0.
L1 regularization term in the objective function
``(beta * || W ||_1)``. Only applied if ``is_saga`` is set to True.
max_iter : int, default=1000
The max number of passes over the training data if the stopping
criteria is not reached.
tol : float, default=0.001
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol.
verbose : int, default=0
The verbosity level.
random_state : int, RandomState instance or None, default=None
Used when shuffling the data. Pass an int for reproducible output
across multiple function calls.
See :term:`Glossary <random_state>`.
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, default=None
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
is_saga : bool, default=False
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
better in the first epochs, and allow for l1 regularisation.
Returns
-------
coef_ : ndarray of shape (n_features,)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> X = rng.randn(n_samples, n_features)
>>> y = rng.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
Ridge(solver='sag')
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(
... solver='sag', multi_class='multinomial')
>>> clf.fit(X, y)
LogisticRegression(multi_class='multinomial', solver='sag')
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
:arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
"SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives" <1407.0202>`
See Also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR,
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
if max_iter is None:
max_iter = 1000
if check_input:
_dtype = [np.float64, np.float32]
X = check_array(X, dtype=_dtype, accept_sparse='csr', order='C')
y = check_array(y, dtype=_dtype, ensure_2d=False, order='C')
(n_samples, n_features) = (X.shape[0], X.shape[1])
alpha_scaled = float(alpha) / n_samples
beta_scaled = float(beta) / n_samples
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
coef_init = np.zeros((n_features, n_classes), dtype=X.dtype, order='C')
fit_intercept = coef_init.shape[0] == n_features + 1
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=X.dtype)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=X.dtype)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes), dtype=X.dtype, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes), dtype=X.dtype, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
(dataset, intercept_decay) = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
if loss in ('log', 'multinomial'):
L = 0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled
elif loss == 'squared':
L = max_squared_sum + int(fit_intercept) + alpha_scaled
else:
raise ValueError("Unknown loss function for SAG solver, got %s instead of 'log' or 'squared'" % loss)
if is_saga:
mun = min(2 * n_samples * alpha_scaled, L)
step = 1.0 / (2 * L + mun)
else:
step = 1.0 / L
step_size = step
if step_size * alpha_scaled == 1:
raise ZeroDivisionError('Current sag implementation does not handle the case step_size * alpha_scaled == 1')
sag = sag64 if X.dtype == np.float64 else sag32
(num_seen, n_iter_) = sag(dataset, coef_init, intercept_init, n_samples, n_features, n_classes, tol, max_iter, loss, step_size, alpha_scaled, beta_scaled, sum_gradient_init, gradient_memory_init, seen_init, num_seen_init, fit_intercept, intercept_sum_gradient, intercept_decay, is_saga, verbose)
if n_iter_ == max_iter:
warnings.warn('The max_iter was reached which means the coef_ did not converge', ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init, 'intercept_sum_gradient': intercept_sum_gradient, 'gradient_memory': gradient_memory_init, 'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return (coef_, n_iter_, warm_start_mem)
|
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.0, beta=0.0, max_iter=1000, tol=0.001, verbose=0, random_state=None, check_input=True, max_squared_sum=None, warm_start_mem=None, is_saga=False):
"""SAG solver for Ridge and LogisticRegression.
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
loss : {'log', 'squared', 'multinomial'}, default='log'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, default=1.
L2 regularization term in the objective function
``(0.5 * alpha * || W ||_F^2)``.
beta : float, default=0.
L1 regularization term in the objective function
``(beta * || W ||_1)``. Only applied if ``is_saga`` is set to True.
max_iter : int, default=1000
The max number of passes over the training data if the stopping
criteria is not reached.
tol : float, default=0.001
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol.
verbose : int, default=0
The verbosity level.
random_state : int, RandomState instance or None, default=None
Used when shuffling the data. Pass an int for reproducible output
across multiple function calls.
See :term:`Glossary <random_state>`.
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, default=None
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
is_saga : bool, default=False
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
better in the first epochs, and allow for l1 regularisation.
Returns
-------
coef_ : ndarray of shape (n_features,)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> X = rng.randn(n_samples, n_features)
>>> y = rng.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
Ridge(solver='sag')
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(
... solver='sag', multi_class='multinomial')
>>> clf.fit(X, y)
LogisticRegression(multi_class='multinomial', solver='sag')
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
:arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
"SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives" <1407.0202>`
See Also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR,
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
if max_iter is None:
max_iter = 1000
if check_input:
_dtype = [np.float64, np.float32]
X = check_array(X, dtype=_dtype, accept_sparse='csr', order='C')
y = check_array(y, dtype=_dtype, ensure_2d=False, order='C')
(n_samples, n_features) = (X.shape[0], X.shape[1])
alpha_scaled = float(alpha) / n_samples
beta_scaled = float(beta) / n_samples
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
coef_init = np.zeros((n_features, n_classes), dtype=X.dtype, order='C')
fit_intercept = coef_init.shape[0] == n_features + 1
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=X.dtype)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=X.dtype)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes), dtype=X.dtype, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes), dtype=X.dtype, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
(dataset, intercept_decay) = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
<DeepExtract>
if loss in ('log', 'multinomial'):
L = 0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled
elif loss == 'squared':
L = max_squared_sum + int(fit_intercept) + alpha_scaled
else:
raise ValueError("Unknown loss function for SAG solver, got %s instead of 'log' or 'squared'" % loss)
if is_saga:
mun = min(2 * n_samples * alpha_scaled, L)
step = 1.0 / (2 * L + mun)
else:
step = 1.0 / L
step_size = step
</DeepExtract>
if step_size * alpha_scaled == 1:
raise ZeroDivisionError('Current sag implementation does not handle the case step_size * alpha_scaled == 1')
sag = sag64 if X.dtype == np.float64 else sag32
(num_seen, n_iter_) = sag(dataset, coef_init, intercept_init, n_samples, n_features, n_classes, tol, max_iter, loss, step_size, alpha_scaled, beta_scaled, sum_gradient_init, gradient_memory_init, seen_init, num_seen_init, fit_intercept, intercept_sum_gradient, intercept_decay, is_saga, verbose)
if n_iter_ == max_iter:
warnings.warn('The max_iter was reached which means the coef_ did not converge', ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init, 'intercept_sum_gradient': intercept_sum_gradient, 'gradient_memory': gradient_memory_init, 'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return (coef_, n_iter_, warm_start_mem)
|
def traceParseAction(f):
"""
Decorator for debugging parse actions.
When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens)))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
if f in singleArgBuiltins:
f = lambda s, l, t: f(t)
limit = [0]
foundArity = [False]
def extract_stack(limit=0):
offset = -2
frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset]
f = [(frame_summary.filename, frame_summary.lineno)]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
f = [(frame_summary.filename, frame_summary.lineno)]
LINE_DIFF = 6
this_line = extract_stack(limit=2)[-1]
pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = f(*args[limit[0]:])
foundArity[0] = True
f = ret
except TypeError:
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
raise
finally:
del tb
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
func_name = '<parse action>'
try:
func_name = getattr(f, '__name__', getattr(f, '__class__').__name__)
except Exception:
func_name = str(f)
wrapper.__name__ = func_name
f = wrapper
def z(*paArgs):
thisFunc = f.__name__
(s, l, t) = paArgs[-3:]
if len(paArgs) > 3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write(">>entering %s(line: '%s', %d, %r)\n" % (thisFunc, line(l, s), l, t))
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write('<<leaving %s (exception: %s)\n' % (thisFunc, exc))
raise
sys.stderr.write('<<leaving %s (ret: %r)\n' % (thisFunc, ret))
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
|
def traceParseAction(f):
"""
Decorator for debugging parse actions.
When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens)))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
<DeepExtract>
if f in singleArgBuiltins:
f = lambda s, l, t: f(t)
limit = [0]
foundArity = [False]
def extract_stack(limit=0):
offset = -2
frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset]
f = [(frame_summary.filename, frame_summary.lineno)]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
f = [(frame_summary.filename, frame_summary.lineno)]
LINE_DIFF = 6
this_line = extract_stack(limit=2)[-1]
pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = f(*args[limit[0]:])
foundArity[0] = True
f = ret
except TypeError:
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
raise
finally:
del tb
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
func_name = '<parse action>'
try:
func_name = getattr(f, '__name__', getattr(f, '__class__').__name__)
except Exception:
func_name = str(f)
wrapper.__name__ = func_name
f = wrapper
</DeepExtract>
def z(*paArgs):
thisFunc = f.__name__
(s, l, t) = paArgs[-3:]
if len(paArgs) > 3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write(">>entering %s(line: '%s', %d, %r)\n" % (thisFunc, line(l, s), l, t))
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write('<<leaving %s (exception: %s)\n' % (thisFunc, exc))
raise
sys.stderr.write('<<leaving %s (ret: %r)\n' % (thisFunc, ret))
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
|
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples/test vectors.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Posterior probabilities of classification per class.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * S ** (-0.5))
norm2.append(np.sum(X2 ** 2, axis=1))
norm2 = np.array(norm2).T
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
values = -0.5 * (norm2 + u) + np.log(self.priors_)
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
|
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples/test vectors.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Posterior probabilities of classification per class.
"""
<DeepExtract>
check_is_fitted(self)
X = self._validate_data(X, reset=False)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * S ** (-0.5))
norm2.append(np.sum(X2 ** 2, axis=1))
norm2 = np.array(norm2).T
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
values = -0.5 * (norm2 + u) + np.log(self.priors_)
</DeepExtract>
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
|
def mean_variance_axis(X, axis, weights=None, return_sum_weights=False):
"""Compute mean and variance along an axis on a CSR or CSC matrix.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It can be of CSR or CSC format.
axis : {0, 1}
Axis along which the axis should be computed.
weights : ndarray of shape (n_samples,) or (n_features,), default=None
If axis is set to 0 shape is (n_samples,) or
if axis is set to 1 shape is (n_features,).
If it is set to None, then samples are equally weighted.
.. versionadded:: 0.24
return_sum_weights : bool, default=False
If True, returns the sum of weights seen for each feature
if `axis=0` or each sample if `axis=1`.
.. versionadded:: 0.24
Returns
-------
means : ndarray of shape (n_features,), dtype=floating
Feature-wise means.
variances : ndarray of shape (n_features,), dtype=floating
Feature-wise variances.
sum_weights : ndarray of shape (n_features,), dtype=floating
Returned if `return_sum_weights` is `True`.
"""
if axis not in (0, 1):
raise ValueError('Unknown axis value: %d. Use 0 for rows, or 1 for columns' % axis)
if isinstance(X, sp.csr_matrix):
if axis == 0:
return _csr_mean_var_axis0(X, weights=weights, return_sum_weights=return_sum_weights)
else:
return _csc_mean_var_axis0(X.T, weights=weights, return_sum_weights=return_sum_weights)
elif isinstance(X, sp.csc_matrix):
if axis == 0:
return _csc_mean_var_axis0(X, weights=weights, return_sum_weights=return_sum_weights)
else:
return _csr_mean_var_axis0(X.T, weights=weights, return_sum_weights=return_sum_weights)
else:
input_type = X.format if sp.issparse(X) else type(X)
err = 'Expected a CSR or CSC sparse matrix, got %s.' % input_type
raise TypeError(err)
</DeepExtract>
|
def mean_variance_axis(X, axis, weights=None, return_sum_weights=False):
"""Compute mean and variance along an axis on a CSR or CSC matrix.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It can be of CSR or CSC format.
axis : {0, 1}
Axis along which the axis should be computed.
weights : ndarray of shape (n_samples,) or (n_features,), default=None
If axis is set to 0 shape is (n_samples,) or
if axis is set to 1 shape is (n_features,).
If it is set to None, then samples are equally weighted.
.. versionadded:: 0.24
return_sum_weights : bool, default=False
If True, returns the sum of weights seen for each feature
if `axis=0` or each sample if `axis=1`.
.. versionadded:: 0.24
Returns
-------
means : ndarray of shape (n_features,), dtype=floating
Feature-wise means.
variances : ndarray of shape (n_features,), dtype=floating
Feature-wise variances.
sum_weights : ndarray of shape (n_features,), dtype=floating
Returned if `return_sum_weights` is `True`.
"""
<DeepExtract>
if axis not in (0, 1):
raise ValueError('Unknown axis value: %d. Use 0 for rows, or 1 for columns' % axis)
</DeepExtract>
if isinstance(X, sp.csr_matrix):
if axis == 0:
return _csr_mean_var_axis0(X, weights=weights, return_sum_weights=return_sum_weights)
else:
return _csc_mean_var_axis0(X.T, weights=weights, return_sum_weights=return_sum_weights)
elif isinstance(X, sp.csc_matrix):
if axis == 0:
return _csc_mean_var_axis0(X, weights=weights, return_sum_weights=return_sum_weights)
else:
return _csr_mean_var_axis0(X.T, weights=weights, return_sum_weights=return_sum_weights)
else:
<DeepExtract>
input_type = X.format if sp.issparse(X) else type(X)
err = 'Expected a CSR or CSC sparse matrix, got %s.' % input_type
raise TypeError(err)
</DeepExtract>
|
def test_assert_radius_neighbors_results_quasi_equality():
rtol = 1e-07
eps = 1e-07
_1m = 1.0 - eps
_1p = 1.0 + eps
_6_1m = 6.1 - eps
_6_1p = 6.1 + eps
ref_dist = [np.array([1.2, 2.5, _6_1m, 6.1, _6_1p]), np.array([_1m, 1, _1p, _1p])]
ref_indices = [np.array([1, 2, 3, 4, 5]), np.array([6, 7, 8, 9])]
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert len(ref_dist) == len(ref_dist) == len(ref_indices) == len(ref_indices), 'Arrays of results have various lengths.'
n_queries = len(ref_dist)
for query_idx in range(n_queries):
ref_dist_row = ref_dist[query_idx]
dist_row = ref_dist[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
largest_row = ref_dist_row if len(ref_dist_row) > len(dist_row) else dist_row
min_length = min(len(ref_dist_row), len(dist_row))
last_extra_elements = largest_row[min_length:]
if last_extra_elements.size > 0:
assert np.all(6.1 - rtol <= last_extra_elements <= 6.1 + rtol), f"The last extra elements ({last_extra_elements}) aren't in [radius ± rtol]=[{6.1} ± {rtol}]"
ref_dist_row = ref_dist_row[:min_length]
dist_row = dist_row[:min_length]
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = ref_indices[query_idx]
indices_row = ref_indices[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(min_length):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([1, 2, 3, 4, 5])])) == len(np.array([np.array([1, 2, 4, 5, 3])])), 'Arrays of results have various lengths.'
n_queries = len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]))
for query_idx in range(n_queries):
ref_dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])[query_idx]
dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
largest_row = ref_dist_row if len(ref_dist_row) > len(dist_row) else dist_row
min_length = min(len(ref_dist_row), len(dist_row))
last_extra_elements = largest_row[min_length:]
if last_extra_elements.size > 0:
assert np.all(6.1 - rtol <= last_extra_elements <= 6.1 + rtol), f"The last extra elements ({last_extra_elements}) aren't in [radius ± rtol]=[{6.1} ± {rtol}]"
ref_dist_row = ref_dist_row[:min_length]
dist_row = dist_row[:min_length]
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([np.array([1, 2, 3, 4, 5])])[query_idx]
indices_row = np.array([np.array([1, 2, 4, 5, 3])])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(min_length):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert len(np.array([np.array([_1m, _1m, 1, _1p, _1p])])) == len(np.array([np.array([_1m, _1m, 1, _1p, _1p])])) == len(np.array([np.array([6, 7, 8, 9, 10])])) == len(np.array([np.array([6, 9, 7, 8, 10])])), 'Arrays of results have various lengths.'
n_queries = len(np.array([np.array([_1m, _1m, 1, _1p, _1p])]))
for query_idx in range(n_queries):
ref_dist_row = np.array([np.array([_1m, _1m, 1, _1p, _1p])])[query_idx]
dist_row = np.array([np.array([_1m, _1m, 1, _1p, _1p])])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
largest_row = ref_dist_row if len(ref_dist_row) > len(dist_row) else dist_row
min_length = min(len(ref_dist_row), len(dist_row))
last_extra_elements = largest_row[min_length:]
if last_extra_elements.size > 0:
assert np.all(6.1 - rtol <= last_extra_elements <= 6.1 + rtol), f"The last extra elements ({last_extra_elements}) aren't in [radius ± rtol]=[{6.1} ± {rtol}]"
ref_dist_row = ref_dist_row[:min_length]
dist_row = dist_row[:min_length]
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([np.array([6, 7, 8, 9, 10])])[query_idx]
indices_row = np.array([np.array([6, 9, 7, 8, 10])])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(min_length):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
msg = 'Neighbors indices for query 0 are not matching'
with pytest.raises(AssertionError, match=msg):
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([1, 2, 3, 4, 5])])) == len(np.array([np.array([2, 1, 3, 4, 5])])), 'Arrays of results have various lengths.'
n_queries = len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]))
for query_idx in range(n_queries):
ref_dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])[query_idx]
dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
largest_row = ref_dist_row if len(ref_dist_row) > len(dist_row) else dist_row
min_length = min(len(ref_dist_row), len(dist_row))
last_extra_elements = largest_row[min_length:]
if last_extra_elements.size > 0:
assert np.all(6.1 - rtol <= last_extra_elements <= 6.1 + rtol), f"The last extra elements ({last_extra_elements}) aren't in [radius ± rtol]=[{6.1} ± {rtol}]"
ref_dist_row = ref_dist_row[:min_length]
dist_row = dist_row[:min_length]
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([np.array([1, 2, 3, 4, 5])])[query_idx]
indices_row = np.array([np.array([2, 1, 3, 4, 5])])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(min_length):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([1.2, 2.5, _6_1m, 6.1])])) == len(np.array([np.array([1, 2, 3, 4, 5])])) == len(np.array([np.array([1, 2, 3, 4])])), 'Arrays of results have various lengths.'
n_queries = len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]))
for query_idx in range(n_queries):
ref_dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])[query_idx]
dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1])])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
largest_row = ref_dist_row if len(ref_dist_row) > len(dist_row) else dist_row
min_length = min(len(ref_dist_row), len(dist_row))
last_extra_elements = largest_row[min_length:]
if last_extra_elements.size > 0:
assert np.all(6.1 - rtol <= last_extra_elements <= 6.1 + rtol), f"The last extra elements ({last_extra_elements}) aren't in [radius ± rtol]=[{6.1} ± {rtol}]"
ref_dist_row = ref_dist_row[:min_length]
dist_row = dist_row[:min_length]
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([np.array([1, 2, 3, 4, 5])])[query_idx]
indices_row = np.array([np.array([1, 2, 3, 4])])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(min_length):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
msg = re.escape("The last extra elements ([6.]) aren't in [radius ± rtol]=[6.1 ± 1e-07]")
with pytest.raises(AssertionError, match=msg):
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert len(np.array([np.array([1.2, 2.5, 6])])) == len(np.array([np.array([1.2, 2.5])])) == len(np.array([np.array([1, 2, 3])])) == len(np.array([np.array([1, 2])])), 'Arrays of results have various lengths.'
n_queries = len(np.array([np.array([1.2, 2.5, 6])]))
for query_idx in range(n_queries):
ref_dist_row = np.array([np.array([1.2, 2.5, 6])])[query_idx]
dist_row = np.array([np.array([1.2, 2.5])])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
largest_row = ref_dist_row if len(ref_dist_row) > len(dist_row) else dist_row
min_length = min(len(ref_dist_row), len(dist_row))
last_extra_elements = largest_row[min_length:]
if last_extra_elements.size > 0:
assert np.all(6.1 - rtol <= last_extra_elements <= 6.1 + rtol), f"The last extra elements ({last_extra_elements}) aren't in [radius ± rtol]=[{6.1} ± {rtol}]"
ref_dist_row = ref_dist_row[:min_length]
dist_row = dist_row[:min_length]
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([np.array([1, 2, 3])])[query_idx]
indices_row = np.array([np.array([1, 2])])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(min_length):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
msg = 'Neighbors indices for query 0 are not matching'
with pytest.raises(AssertionError, match=msg):
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([1, 2, 3, 4, 5])])) == len(np.array([np.array([2, 1, 4, 5, 3])])), 'Arrays of results have various lengths.'
n_queries = len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]))
for query_idx in range(n_queries):
ref_dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])[query_idx]
dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
largest_row = ref_dist_row if len(ref_dist_row) > len(dist_row) else dist_row
min_length = min(len(ref_dist_row), len(dist_row))
last_extra_elements = largest_row[min_length:]
if last_extra_elements.size > 0:
assert np.all(6.1 - rtol <= last_extra_elements <= 6.1 + rtol), f"The last extra elements ({last_extra_elements}) aren't in [radius ± rtol]=[{6.1} ± {rtol}]"
ref_dist_row = ref_dist_row[:min_length]
dist_row = dist_row[:min_length]
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([np.array([1, 2, 3, 4, 5])])[query_idx]
indices_row = np.array([np.array([2, 1, 4, 5, 3])])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(min_length):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
msg = "Distances aren't sorted on row 0"
with pytest.raises(AssertionError, match=msg):
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([2.5, 1.2, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([1, 2, 3, 4, 5])])) == len(np.array([np.array([2, 1, 4, 5, 3])])), 'Arrays of results have various lengths.'
n_queries = len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]))
for query_idx in range(n_queries):
ref_dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])[query_idx]
dist_row = np.array([np.array([2.5, 1.2, _6_1m, 6.1, _6_1p])])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
largest_row = ref_dist_row if len(ref_dist_row) > len(dist_row) else dist_row
min_length = min(len(ref_dist_row), len(dist_row))
last_extra_elements = largest_row[min_length:]
if last_extra_elements.size > 0:
assert np.all(6.1 - rtol <= last_extra_elements <= 6.1 + rtol), f"The last extra elements ({last_extra_elements}) aren't in [radius ± rtol]=[{6.1} ± {rtol}]"
ref_dist_row = ref_dist_row[:min_length]
dist_row = dist_row[:min_length]
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([np.array([1, 2, 3, 4, 5])])[query_idx]
indices_row = np.array([np.array([2, 1, 4, 5, 3])])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(min_length):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
</DeepExtract>
|
def test_assert_radius_neighbors_results_quasi_equality():
rtol = 1e-07
eps = 1e-07
_1m = 1.0 - eps
_1p = 1.0 + eps
_6_1m = 6.1 - eps
_6_1p = 6.1 + eps
ref_dist = [np.array([1.2, 2.5, _6_1m, 6.1, _6_1p]), np.array([_1m, 1, _1p, _1p])]
ref_indices = [np.array([1, 2, 3, 4, 5]), np.array([6, 7, 8, 9])]
<DeepExtract>
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert len(ref_dist) == len(ref_dist) == len(ref_indices) == len(ref_indices), 'Arrays of results have various lengths.'
n_queries = len(ref_dist)
for query_idx in range(n_queries):
ref_dist_row = ref_dist[query_idx]
dist_row = ref_dist[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
largest_row = ref_dist_row if len(ref_dist_row) > len(dist_row) else dist_row
min_length = min(len(ref_dist_row), len(dist_row))
last_extra_elements = largest_row[min_length:]
if last_extra_elements.size > 0:
assert np.all(6.1 - rtol <= last_extra_elements <= 6.1 + rtol), f"The last extra elements ({last_extra_elements}) aren't in [radius ± rtol]=[{6.1} ± {rtol}]"
ref_dist_row = ref_dist_row[:min_length]
dist_row = dist_row[:min_length]
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = ref_indices[query_idx]
indices_row = ref_indices[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(min_length):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
</DeepExtract>
<DeepExtract>
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([1, 2, 3, 4, 5])])) == len(np.array([np.array([1, 2, 4, 5, 3])])), 'Arrays of results have various lengths.'
n_queries = len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]))
for query_idx in range(n_queries):
ref_dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])[query_idx]
dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
largest_row = ref_dist_row if len(ref_dist_row) > len(dist_row) else dist_row
min_length = min(len(ref_dist_row), len(dist_row))
last_extra_elements = largest_row[min_length:]
if last_extra_elements.size > 0:
assert np.all(6.1 - rtol <= last_extra_elements <= 6.1 + rtol), f"The last extra elements ({last_extra_elements}) aren't in [radius ± rtol]=[{6.1} ± {rtol}]"
ref_dist_row = ref_dist_row[:min_length]
dist_row = dist_row[:min_length]
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([np.array([1, 2, 3, 4, 5])])[query_idx]
indices_row = np.array([np.array([1, 2, 4, 5, 3])])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(min_length):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
</DeepExtract>
<DeepExtract>
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert len(np.array([np.array([_1m, _1m, 1, _1p, _1p])])) == len(np.array([np.array([_1m, _1m, 1, _1p, _1p])])) == len(np.array([np.array([6, 7, 8, 9, 10])])) == len(np.array([np.array([6, 9, 7, 8, 10])])), 'Arrays of results have various lengths.'
n_queries = len(np.array([np.array([_1m, _1m, 1, _1p, _1p])]))
for query_idx in range(n_queries):
ref_dist_row = np.array([np.array([_1m, _1m, 1, _1p, _1p])])[query_idx]
dist_row = np.array([np.array([_1m, _1m, 1, _1p, _1p])])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
largest_row = ref_dist_row if len(ref_dist_row) > len(dist_row) else dist_row
min_length = min(len(ref_dist_row), len(dist_row))
last_extra_elements = largest_row[min_length:]
if last_extra_elements.size > 0:
assert np.all(6.1 - rtol <= last_extra_elements <= 6.1 + rtol), f"The last extra elements ({last_extra_elements}) aren't in [radius ± rtol]=[{6.1} ± {rtol}]"
ref_dist_row = ref_dist_row[:min_length]
dist_row = dist_row[:min_length]
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([np.array([6, 7, 8, 9, 10])])[query_idx]
indices_row = np.array([np.array([6, 9, 7, 8, 10])])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(min_length):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
</DeepExtract>
msg = 'Neighbors indices for query 0 are not matching'
with pytest.raises(AssertionError, match=msg):
<DeepExtract>
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([1, 2, 3, 4, 5])])) == len(np.array([np.array([2, 1, 3, 4, 5])])), 'Arrays of results have various lengths.'
n_queries = len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]))
for query_idx in range(n_queries):
ref_dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])[query_idx]
dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
largest_row = ref_dist_row if len(ref_dist_row) > len(dist_row) else dist_row
min_length = min(len(ref_dist_row), len(dist_row))
last_extra_elements = largest_row[min_length:]
if last_extra_elements.size > 0:
assert np.all(6.1 - rtol <= last_extra_elements <= 6.1 + rtol), f"The last extra elements ({last_extra_elements}) aren't in [radius ± rtol]=[{6.1} ± {rtol}]"
ref_dist_row = ref_dist_row[:min_length]
dist_row = dist_row[:min_length]
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([np.array([1, 2, 3, 4, 5])])[query_idx]
indices_row = np.array([np.array([2, 1, 3, 4, 5])])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(min_length):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
</DeepExtract>
<DeepExtract>
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([1.2, 2.5, _6_1m, 6.1])])) == len(np.array([np.array([1, 2, 3, 4, 5])])) == len(np.array([np.array([1, 2, 3, 4])])), 'Arrays of results have various lengths.'
n_queries = len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]))
for query_idx in range(n_queries):
ref_dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])[query_idx]
dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1])])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
largest_row = ref_dist_row if len(ref_dist_row) > len(dist_row) else dist_row
min_length = min(len(ref_dist_row), len(dist_row))
last_extra_elements = largest_row[min_length:]
if last_extra_elements.size > 0:
assert np.all(6.1 - rtol <= last_extra_elements <= 6.1 + rtol), f"The last extra elements ({last_extra_elements}) aren't in [radius ± rtol]=[{6.1} ± {rtol}]"
ref_dist_row = ref_dist_row[:min_length]
dist_row = dist_row[:min_length]
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([np.array([1, 2, 3, 4, 5])])[query_idx]
indices_row = np.array([np.array([1, 2, 3, 4])])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(min_length):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
</DeepExtract>
msg = re.escape("The last extra elements ([6.]) aren't in [radius ± rtol]=[6.1 ± 1e-07]")
with pytest.raises(AssertionError, match=msg):
<DeepExtract>
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert len(np.array([np.array([1.2, 2.5, 6])])) == len(np.array([np.array([1.2, 2.5])])) == len(np.array([np.array([1, 2, 3])])) == len(np.array([np.array([1, 2])])), 'Arrays of results have various lengths.'
n_queries = len(np.array([np.array([1.2, 2.5, 6])]))
for query_idx in range(n_queries):
ref_dist_row = np.array([np.array([1.2, 2.5, 6])])[query_idx]
dist_row = np.array([np.array([1.2, 2.5])])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
largest_row = ref_dist_row if len(ref_dist_row) > len(dist_row) else dist_row
min_length = min(len(ref_dist_row), len(dist_row))
last_extra_elements = largest_row[min_length:]
if last_extra_elements.size > 0:
assert np.all(6.1 - rtol <= last_extra_elements <= 6.1 + rtol), f"The last extra elements ({last_extra_elements}) aren't in [radius ± rtol]=[{6.1} ± {rtol}]"
ref_dist_row = ref_dist_row[:min_length]
dist_row = dist_row[:min_length]
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([np.array([1, 2, 3])])[query_idx]
indices_row = np.array([np.array([1, 2])])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(min_length):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
</DeepExtract>
msg = 'Neighbors indices for query 0 are not matching'
with pytest.raises(AssertionError, match=msg):
<DeepExtract>
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([1, 2, 3, 4, 5])])) == len(np.array([np.array([2, 1, 4, 5, 3])])), 'Arrays of results have various lengths.'
n_queries = len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]))
for query_idx in range(n_queries):
ref_dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])[query_idx]
dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
largest_row = ref_dist_row if len(ref_dist_row) > len(dist_row) else dist_row
min_length = min(len(ref_dist_row), len(dist_row))
last_extra_elements = largest_row[min_length:]
if last_extra_elements.size > 0:
assert np.all(6.1 - rtol <= last_extra_elements <= 6.1 + rtol), f"The last extra elements ({last_extra_elements}) aren't in [radius ± rtol]=[{6.1} ± {rtol}]"
ref_dist_row = ref_dist_row[:min_length]
dist_row = dist_row[:min_length]
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([np.array([1, 2, 3, 4, 5])])[query_idx]
indices_row = np.array([np.array([2, 1, 4, 5, 3])])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(min_length):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
</DeepExtract>
msg = "Distances aren't sorted on row 0"
with pytest.raises(AssertionError, match=msg):
<DeepExtract>
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([2.5, 1.2, _6_1m, 6.1, _6_1p])])) == len(np.array([np.array([1, 2, 3, 4, 5])])) == len(np.array([np.array([2, 1, 4, 5, 3])])), 'Arrays of results have various lengths.'
n_queries = len(np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]))
for query_idx in range(n_queries):
ref_dist_row = np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])])[query_idx]
dist_row = np.array([np.array([2.5, 1.2, _6_1m, 6.1, _6_1p])])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
largest_row = ref_dist_row if len(ref_dist_row) > len(dist_row) else dist_row
min_length = min(len(ref_dist_row), len(dist_row))
last_extra_elements = largest_row[min_length:]
if last_extra_elements.size > 0:
assert np.all(6.1 - rtol <= last_extra_elements <= 6.1 + rtol), f"The last extra elements ({last_extra_elements}) aren't in [radius ± rtol]=[{6.1} ± {rtol}]"
ref_dist_row = ref_dist_row[:min_length]
dist_row = dist_row[:min_length]
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([np.array([1, 2, 3, 4, 5])])[query_idx]
indices_row = np.array([np.array([2, 1, 4, 5, 3])])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(min_length):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
</DeepExtract>
|
def test_calinski_harabasz_score():
rng = np.random.RandomState(seed=0)
with pytest.raises(ValueError, match='Number of labels is'):
calinski_harabasz_score(rng.rand(10, 2), np.zeros(10))
rng = np.random.RandomState(seed=0)
with pytest.raises(ValueError, match='Number of labels is'):
calinski_harabasz_score(rng.rand(10, 2), np.arange(10))
assert 1.0 == calinski_harabasz_score(np.ones((10, 2)), [0] * 5 + [1] * 5)
assert 0.0 == calinski_harabasz_score([[-1, -1], [1, 1]] * 10, [0] * 10 + [1] * 10)
X = [[0, 0], [1, 1]] * 5 + [[3, 3], [4, 4]] * 5 + [[0, 4], [1, 3]] * 5 + [[3, 1], [4, 0]] * 5
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
pytest.approx(calinski_harabasz_score(X, labels), 45 * (40 - 4) / (5 * (4 - 1)))
|
def test_calinski_harabasz_score():
<DeepExtract>
rng = np.random.RandomState(seed=0)
with pytest.raises(ValueError, match='Number of labels is'):
calinski_harabasz_score(rng.rand(10, 2), np.zeros(10))
</DeepExtract>
<DeepExtract>
rng = np.random.RandomState(seed=0)
with pytest.raises(ValueError, match='Number of labels is'):
calinski_harabasz_score(rng.rand(10, 2), np.arange(10))
</DeepExtract>
assert 1.0 == calinski_harabasz_score(np.ones((10, 2)), [0] * 5 + [1] * 5)
assert 0.0 == calinski_harabasz_score([[-1, -1], [1, 1]] * 10, [0] * 10 + [1] * 10)
X = [[0, 0], [1, 1]] * 5 + [[3, 3], [4, 4]] * 5 + [[0, 4], [1, 3]] * 5 + [[3, 1], [4, 0]] * 5
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
pytest.approx(calinski_harabasz_score(X, labels), 45 * (40 - 4) / (5 * (4 - 1)))
|
def _safe_repr(object, context, maxlevels, level, changed_only=False):
"""Same as the builtin _safe_repr, with added support for Estimator
objects."""
typ = type(object)
if typ in pprint._builtin_scalars:
return (repr(object), True, False)
r = getattr(typ, '__repr__', None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return ('{}', True, False)
objid = id(object)
if maxlevels and level >= maxlevels:
return ('{...}', False, objid in context)
if objid in context:
return (pprint._recursion(object), False, True)
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(object.items(), key=pprint._safe_tuple)
for (k, v) in items:
(krepr, kreadable, krecur) = saferepr(k, context, maxlevels, level, changed_only=changed_only)
(vrepr, vreadable, vrecur) = saferepr(v, context, maxlevels, level, changed_only=changed_only)
append('%s: %s' % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return ('{%s}' % ', '.join(components), readable, recursive)
if issubclass(typ, list) and r is list.__repr__ or (issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return ('[]', True, False)
format = '[%s]'
elif len(object) == 1:
format = '(%s,)'
else:
if not object:
return ('()', True, False)
format = '(%s)'
objid = id(object)
if maxlevels and level >= maxlevels:
return (format % '...', False, objid in context)
if objid in context:
return (pprint._recursion(object), False, True)
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
typ = type(o)
if typ in pprint._builtin_scalars:
(orepr, oreadable, orecur) = (repr(o), True, False)
r = getattr(typ, '__repr__', None)
if issubclass(typ, dict) and r is dict.__repr__:
if not o:
(orepr, oreadable, orecur) = ('{}', True, False)
objid = id(o)
if maxlevels and level >= maxlevels:
(orepr, oreadable, orecur) = ('{...}', False, objid in context)
if objid in context:
(orepr, oreadable, orecur) = (pprint._recursion(o), False, True)
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(o.items(), key=pprint._safe_tuple)
for (k, v) in items:
(krepr, kreadable, krecur) = saferepr(k, context, maxlevels, level, changed_only=changed_only)
(vrepr, vreadable, vrecur) = saferepr(v, context, maxlevels, level, changed_only=changed_only)
append('%s: %s' % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
(orepr, oreadable, orecur) = ('{%s}' % ', '.join(components), readable, recursive)
if issubclass(typ, list) and r is list.__repr__ or (issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not o:
(orepr, oreadable, orecur) = ('[]', True, False)
format = '[%s]'
elif len(o) == 1:
format = '(%s,)'
else:
if not o:
(orepr, oreadable, orecur) = ('()', True, False)
format = '(%s)'
objid = id(o)
if maxlevels and level >= maxlevels:
(orepr, oreadable, orecur) = (format % '...', False, objid in context)
if objid in context:
(orepr, oreadable, orecur) = (pprint._recursion(o), False, True)
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in o:
(orepr, oreadable, orecur) = _safe_repr(o, context, maxlevels, level, changed_only=changed_only)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
(orepr, oreadable, orecur) = (format % ', '.join(components), readable, recursive)
if issubclass(typ, BaseEstimator):
objid = id(o)
if maxlevels and level >= maxlevels:
(orepr, oreadable, orecur) = ('{...}', False, objid in context)
if objid in context:
(orepr, oreadable, orecur) = (pprint._recursion(o), False, True)
context[objid] = 1
readable = True
recursive = False
if changed_only:
params = _changed_params(o)
else:
params = o.get_params(deep=False)
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(params.items(), key=pprint._safe_tuple)
for (k, v) in items:
(krepr, kreadable, krecur) = saferepr(k, context, maxlevels, level, changed_only=changed_only)
(vrepr, vreadable, vrecur) = saferepr(v, context, maxlevels, level, changed_only=changed_only)
append('%s=%s' % (krepr.strip("'"), vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
(orepr, oreadable, orecur) = ('%s(%s)' % (typ.__name__, ', '.join(components)), readable, recursive)
rep = repr(o)
(orepr, oreadable, orecur) = (rep, rep and (not rep.startswith('<')), False)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return (format % ', '.join(components), readable, recursive)
if issubclass(typ, BaseEstimator):
objid = id(object)
if maxlevels and level >= maxlevels:
return ('{...}', False, objid in context)
if objid in context:
return (pprint._recursion(object), False, True)
context[objid] = 1
readable = True
recursive = False
if changed_only:
params = object.get_params(deep=False)
init_func = getattr(object.__init__, 'deprecated_original', object.__init__)
init_params = inspect.signature(init_func).parameters
init_params = {name: param.default for (name, param) in init_params.items()}
def has_changed(k, v):
if k not in init_params:
params = True
if init_params[k] == inspect._empty:
params = True
if isinstance(v, BaseEstimator) and v.__class__ != init_params[k].__class__:
params = True
if repr(v) != repr(init_params[k]) and (not (is_scalar_nan(init_params[k]) and is_scalar_nan(v))):
params = True
params = False
params = {k: v for (k, v) in params.items() if has_changed(k, v)}
else:
params = object.get_params(deep=False)
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(params.items(), key=pprint._safe_tuple)
for (k, v) in items:
(krepr, kreadable, krecur) = saferepr(k, context, maxlevels, level, changed_only=changed_only)
(vrepr, vreadable, vrecur) = saferepr(v, context, maxlevels, level, changed_only=changed_only)
append('%s=%s' % (krepr.strip("'"), vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return ('%s(%s)' % (typ.__name__, ', '.join(components)), readable, recursive)
rep = repr(object)
return (rep, rep and (not rep.startswith('<')), False)
|
def _safe_repr(object, context, maxlevels, level, changed_only=False):
"""Same as the builtin _safe_repr, with added support for Estimator
objects."""
typ = type(object)
if typ in pprint._builtin_scalars:
return (repr(object), True, False)
r = getattr(typ, '__repr__', None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return ('{}', True, False)
objid = id(object)
if maxlevels and level >= maxlevels:
return ('{...}', False, objid in context)
if objid in context:
return (pprint._recursion(object), False, True)
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(object.items(), key=pprint._safe_tuple)
for (k, v) in items:
(krepr, kreadable, krecur) = saferepr(k, context, maxlevels, level, changed_only=changed_only)
(vrepr, vreadable, vrecur) = saferepr(v, context, maxlevels, level, changed_only=changed_only)
append('%s: %s' % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return ('{%s}' % ', '.join(components), readable, recursive)
if issubclass(typ, list) and r is list.__repr__ or (issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return ('[]', True, False)
format = '[%s]'
elif len(object) == 1:
format = '(%s,)'
else:
if not object:
return ('()', True, False)
format = '(%s)'
objid = id(object)
if maxlevels and level >= maxlevels:
return (format % '...', False, objid in context)
if objid in context:
return (pprint._recursion(object), False, True)
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
<DeepExtract>
typ = type(o)
if typ in pprint._builtin_scalars:
(orepr, oreadable, orecur) = (repr(o), True, False)
r = getattr(typ, '__repr__', None)
if issubclass(typ, dict) and r is dict.__repr__:
if not o:
(orepr, oreadable, orecur) = ('{}', True, False)
objid = id(o)
if maxlevels and level >= maxlevels:
(orepr, oreadable, orecur) = ('{...}', False, objid in context)
if objid in context:
(orepr, oreadable, orecur) = (pprint._recursion(o), False, True)
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(o.items(), key=pprint._safe_tuple)
for (k, v) in items:
(krepr, kreadable, krecur) = saferepr(k, context, maxlevels, level, changed_only=changed_only)
(vrepr, vreadable, vrecur) = saferepr(v, context, maxlevels, level, changed_only=changed_only)
append('%s: %s' % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
(orepr, oreadable, orecur) = ('{%s}' % ', '.join(components), readable, recursive)
if issubclass(typ, list) and r is list.__repr__ or (issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not o:
(orepr, oreadable, orecur) = ('[]', True, False)
format = '[%s]'
elif len(o) == 1:
format = '(%s,)'
else:
if not o:
(orepr, oreadable, orecur) = ('()', True, False)
format = '(%s)'
objid = id(o)
if maxlevels and level >= maxlevels:
(orepr, oreadable, orecur) = (format % '...', False, objid in context)
if objid in context:
(orepr, oreadable, orecur) = (pprint._recursion(o), False, True)
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in o:
(orepr, oreadable, orecur) = _safe_repr(o, context, maxlevels, level, changed_only=changed_only)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
(orepr, oreadable, orecur) = (format % ', '.join(components), readable, recursive)
if issubclass(typ, BaseEstimator):
objid = id(o)
if maxlevels and level >= maxlevels:
(orepr, oreadable, orecur) = ('{...}', False, objid in context)
if objid in context:
(orepr, oreadable, orecur) = (pprint._recursion(o), False, True)
context[objid] = 1
readable = True
recursive = False
if changed_only:
params = _changed_params(o)
else:
params = o.get_params(deep=False)
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(params.items(), key=pprint._safe_tuple)
for (k, v) in items:
(krepr, kreadable, krecur) = saferepr(k, context, maxlevels, level, changed_only=changed_only)
(vrepr, vreadable, vrecur) = saferepr(v, context, maxlevels, level, changed_only=changed_only)
append('%s=%s' % (krepr.strip("'"), vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
(orepr, oreadable, orecur) = ('%s(%s)' % (typ.__name__, ', '.join(components)), readable, recursive)
rep = repr(o)
(orepr, oreadable, orecur) = (rep, rep and (not rep.startswith('<')), False)
</DeepExtract>
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return (format % ', '.join(components), readable, recursive)
if issubclass(typ, BaseEstimator):
objid = id(object)
if maxlevels and level >= maxlevels:
return ('{...}', False, objid in context)
if objid in context:
return (pprint._recursion(object), False, True)
context[objid] = 1
readable = True
recursive = False
if changed_only:
<DeepExtract>
params = object.get_params(deep=False)
init_func = getattr(object.__init__, 'deprecated_original', object.__init__)
init_params = inspect.signature(init_func).parameters
init_params = {name: param.default for (name, param) in init_params.items()}
def has_changed(k, v):
if k not in init_params:
params = True
if init_params[k] == inspect._empty:
params = True
if isinstance(v, BaseEstimator) and v.__class__ != init_params[k].__class__:
params = True
if repr(v) != repr(init_params[k]) and (not (is_scalar_nan(init_params[k]) and is_scalar_nan(v))):
params = True
params = False
params = {k: v for (k, v) in params.items() if has_changed(k, v)}
</DeepExtract>
else:
params = object.get_params(deep=False)
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(params.items(), key=pprint._safe_tuple)
for (k, v) in items:
(krepr, kreadable, krecur) = saferepr(k, context, maxlevels, level, changed_only=changed_only)
(vrepr, vreadable, vrecur) = saferepr(v, context, maxlevels, level, changed_only=changed_only)
append('%s=%s' % (krepr.strip("'"), vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return ('%s(%s)' % (typ.__name__, ', '.join(components)), readable, recursive)
rep = repr(object)
return (rep, rep and (not rep.startswith('<')), False)
|
def process_chunk(dist_chunk, start):
row_missing_chunk = row_missing_idx[start:start + len(dist_chunk)]
for col in range(X.shape[1]):
if not valid_mask[col]:
continue
col_mask = mask[row_missing_chunk, col]
if not np.any(col_mask):
continue
(potential_donors_idx,) = np.nonzero(non_missing_fix_X[:, col])
receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)]
dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][:, potential_donors_idx]
all_nan_dist_mask = np.isnan(dist_subset).all(axis=1)
all_nan_receivers_idx = receivers_idx[all_nan_dist_mask]
if all_nan_receivers_idx.size:
col_mean = np.ma.array(self._fit_X[:, col], mask=mask_fit_X[:, col]).mean()
X[all_nan_receivers_idx, col] = col_mean
if len(all_nan_receivers_idx) == len(receivers_idx):
continue
receivers_idx = receivers_idx[~all_nan_dist_mask]
dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][:, potential_donors_idx]
n_neighbors = min(self.n_neighbors, len(potential_donors_idx))
donors_idx = np.argpartition(dist_subset, n_neighbors - 1, axis=1)[:, :n_neighbors]
donors_dist = dist_subset[np.arange(donors_idx.shape[0])[:, None], donors_idx]
weight_matrix = _get_weights(donors_dist, self.weights)
if weight_matrix is not None:
weight_matrix[np.isnan(weight_matrix)] = 0.0
donors = self._fit_X[potential_donors_idx, col].take(donors_idx)
donors_mask = mask_fit_X[potential_donors_idx, col].take(donors_idx)
donors = np.ma.array(donors, mask=donors_mask)
value = np.ma.average(donors, axis=1, weights=weight_matrix).data
X[receivers_idx, col] = value
|
def process_chunk(dist_chunk, start):
row_missing_chunk = row_missing_idx[start:start + len(dist_chunk)]
for col in range(X.shape[1]):
if not valid_mask[col]:
continue
col_mask = mask[row_missing_chunk, col]
if not np.any(col_mask):
continue
(potential_donors_idx,) = np.nonzero(non_missing_fix_X[:, col])
receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)]
dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][:, potential_donors_idx]
all_nan_dist_mask = np.isnan(dist_subset).all(axis=1)
all_nan_receivers_idx = receivers_idx[all_nan_dist_mask]
if all_nan_receivers_idx.size:
col_mean = np.ma.array(self._fit_X[:, col], mask=mask_fit_X[:, col]).mean()
X[all_nan_receivers_idx, col] = col_mean
if len(all_nan_receivers_idx) == len(receivers_idx):
continue
receivers_idx = receivers_idx[~all_nan_dist_mask]
dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][:, potential_donors_idx]
n_neighbors = min(self.n_neighbors, len(potential_donors_idx))
<DeepExtract>
donors_idx = np.argpartition(dist_subset, n_neighbors - 1, axis=1)[:, :n_neighbors]
donors_dist = dist_subset[np.arange(donors_idx.shape[0])[:, None], donors_idx]
weight_matrix = _get_weights(donors_dist, self.weights)
if weight_matrix is not None:
weight_matrix[np.isnan(weight_matrix)] = 0.0
donors = self._fit_X[potential_donors_idx, col].take(donors_idx)
donors_mask = mask_fit_X[potential_donors_idx, col].take(donors_idx)
donors = np.ma.array(donors, mask=donors_mask)
value = np.ma.average(donors, axis=1, weights=weight_matrix).data
</DeepExtract>
X[receivers_idx, col] = value
|
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
"""
X = self._validate_data(X, dtype=DTYPE, order='C', accept_sparse='csr', reset=False)
raw_predictions = self._raw_predict(X)
if raw_predictions.shape[1] == 1:
raw_predictions = raw_predictions.ravel()
raw_predictions = raw_predictions
try:
return self._loss._raw_prediction_to_proba(raw_predictions)
except NotFittedError:
raise
except AttributeError as e:
raise AttributeError('loss=%r does not support predict_proba' % self.loss) from e
|
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
"""
<DeepExtract>
X = self._validate_data(X, dtype=DTYPE, order='C', accept_sparse='csr', reset=False)
raw_predictions = self._raw_predict(X)
if raw_predictions.shape[1] == 1:
raw_predictions = raw_predictions.ravel()
raw_predictions = raw_predictions
</DeepExtract>
try:
return self._loss._raw_prediction_to_proba(raw_predictions)
except NotFittedError:
raise
except AttributeError as e:
raise AttributeError('loss=%r does not support predict_proba' % self.loss) from e
|
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None
Target values (None for unsupervised transformations).
Returns
-------
self : BernoulliRBM
The fitted model.
"""
self._validate_params()
X = self._validate_data(X, accept_sparse='csr', dtype=(np.float64, np.float32))
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(rng.normal(0, 0.01, (self.n_components, X.shape[1])), order='F', dtype=X.dtype)
self._n_features_out = self.components_.shape[0]
self.intercept_hidden_ = np.zeros(self.n_components, dtype=X.dtype)
self.intercept_visible_ = np.zeros(X.shape[1], dtype=X.dtype)
self.h_samples_ = np.zeros((self.batch_size, self.n_components), dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size, n_batches, n_samples=n_samples))
verbose = self.verbose
begin = time.time()
for iteration in range(1, self.n_iter + 1):
for batch_slice in batch_slices:
h_pos = self._mean_hiddens(X[batch_slice])
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / X[batch_slice].shape[0]
update = safe_sparse_dot(X[batch_slice].T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(X[batch_slice].sum(axis=0)).squeeze() - v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0
self.h_samples_ = np.floor(h_neg, h_neg)
if verbose:
end = time.time()
print('[%s] Iteration %d, pseudo-likelihood = %.2f, time = %.2fs' % (type(self).__name__, iteration, self.score_samples(X).mean(), end - begin))
begin = end
return self
|
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None
Target values (None for unsupervised transformations).
Returns
-------
self : BernoulliRBM
The fitted model.
"""
self._validate_params()
X = self._validate_data(X, accept_sparse='csr', dtype=(np.float64, np.float32))
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(rng.normal(0, 0.01, (self.n_components, X.shape[1])), order='F', dtype=X.dtype)
self._n_features_out = self.components_.shape[0]
self.intercept_hidden_ = np.zeros(self.n_components, dtype=X.dtype)
self.intercept_visible_ = np.zeros(X.shape[1], dtype=X.dtype)
self.h_samples_ = np.zeros((self.batch_size, self.n_components), dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size, n_batches, n_samples=n_samples))
verbose = self.verbose
begin = time.time()
for iteration in range(1, self.n_iter + 1):
for batch_slice in batch_slices:
<DeepExtract>
h_pos = self._mean_hiddens(X[batch_slice])
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / X[batch_slice].shape[0]
update = safe_sparse_dot(X[batch_slice].T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(X[batch_slice].sum(axis=0)).squeeze() - v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0
self.h_samples_ = np.floor(h_neg, h_neg)
</DeepExtract>
if verbose:
end = time.time()
print('[%s] Iteration %d, pseudo-likelihood = %.2f, time = %.2fs' % (type(self).__name__, iteration, self.score_samples(X).mean(), end - begin))
begin = end
return self
|
def _perplexity_precomp_distr(X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X with ability to accept
precomputed doc_topic_distr
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
doc_topic_distr : ndarray of shape (n_samples, n_components), default=None
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if doc_topic_distr is None:
(doc_topic_distr, _) = self._e_step(X, cal_sstats=False, random_init=False)
doc_topic_distr = doc_topic_distr
else:
(n_samples, n_components) = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError('Number of samples in X and doc_topic_distr do not match.')
if n_components != self.n_components:
raise ValueError('Number of topics does not match.')
current_samples = X.shape[0]
def _loglikelihood(prior, distr, dirichlet_distr, size):
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
bound = score
is_sparse_x = sp.issparse(X)
(n_samples, n_components) = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in range(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = dirichlet_doc_topic[idx_d, :, np.newaxis] + dirichlet_component_[:, ids]
norm_phi = logsumexp(temp, axis=0)
score += np.dot(cnts, norm_phi)
score += _loglikelihood(doc_topic_prior, doc_topic_distr, dirichlet_doc_topic, self.n_components)
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
score += _loglikelihood(topic_word_prior, self.components_, dirichlet_component_, n_features)
bound = score
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
|
def _perplexity_precomp_distr(X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X with ability to accept
precomputed doc_topic_distr
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
doc_topic_distr : ndarray of shape (n_samples, n_components), default=None
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if doc_topic_distr is None:
<DeepExtract>
(doc_topic_distr, _) = self._e_step(X, cal_sstats=False, random_init=False)
doc_topic_distr = doc_topic_distr
</DeepExtract>
else:
(n_samples, n_components) = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError('Number of samples in X and doc_topic_distr do not match.')
if n_components != self.n_components:
raise ValueError('Number of topics does not match.')
current_samples = X.shape[0]
<DeepExtract>
def _loglikelihood(prior, distr, dirichlet_distr, size):
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
bound = score
is_sparse_x = sp.issparse(X)
(n_samples, n_components) = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in range(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = dirichlet_doc_topic[idx_d, :, np.newaxis] + dirichlet_component_[:, ids]
norm_phi = logsumexp(temp, axis=0)
score += np.dot(cnts, norm_phi)
score += _loglikelihood(doc_topic_prior, doc_topic_distr, dirichlet_doc_topic, self.n_components)
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
score += _loglikelihood(topic_word_prior, self.components_, dirichlet_component_, n_features)
bound = score
</DeepExtract>
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
|
def __init__(self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex, self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn('null string passed to Regex; use Empty() instead', SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn('invalid pattern (%s) passed to Regex' % pattern, SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = self.reString = str(pattern)
self.flags = flags
else:
raise ValueError('Regex may only be constructed with a string or a compiled RE object')
if isinstance(self, unicode):
self.name = self
try:
self.name = str(self)
except UnicodeEncodeError:
ret = unicode(self).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex('&#\\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
self.name = xmlcharref.transformString(ret)
self.errmsg = 'Expected ' + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
|
def __init__(self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex, self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn('null string passed to Regex; use Empty() instead', SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn('invalid pattern (%s) passed to Regex' % pattern, SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = self.reString = str(pattern)
self.flags = flags
else:
raise ValueError('Regex may only be constructed with a string or a compiled RE object')
<DeepExtract>
if isinstance(self, unicode):
self.name = self
try:
self.name = str(self)
except UnicodeEncodeError:
ret = unicode(self).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex('&#\\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
self.name = xmlcharref.transformString(ret)
</DeepExtract>
self.errmsg = 'Expected ' + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
|
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence, early stopping, and
learning rate adjustments should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data.
y : ndarray of shape (n_samples,)
Subset of the target values.
classes : ndarray of shape (n_classes,), default=None
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : object
Returns an instance of self.
"""
if not hasattr(self, 'classes_'):
self._validate_params()
if self.early_stopping and True:
raise ValueError('early_stopping should be False with partial_fit')
if self.learning_rate in ('constant', 'invscaling', 'adaptive') and self.eta0 <= 0.0:
raise ValueError('eta0 must be > 0')
if self.learning_rate == 'optimal' and self.alpha == 0:
raise ValueError("alpha must be > 0 since learning_rate is 'optimal'. alpha is used to compute the optimal learning rate.")
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.class_weight == 'balanced':
raise ValueError("class_weight '{0}' is not supported for partial_fit. In order to use 'balanced' weights, use compute_class_weight('{0}', classes=classes, y=y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, max_iter=1, classes=classes, sample_weight=sample_weight, coef_init=None, intercept_init=None)
|
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence, early stopping, and
learning rate adjustments should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data.
y : ndarray of shape (n_samples,)
Subset of the target values.
classes : ndarray of shape (n_classes,), default=None
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : object
Returns an instance of self.
"""
if not hasattr(self, 'classes_'):
self._validate_params()
<DeepExtract>
if self.early_stopping and True:
raise ValueError('early_stopping should be False with partial_fit')
if self.learning_rate in ('constant', 'invscaling', 'adaptive') and self.eta0 <= 0.0:
raise ValueError('eta0 must be > 0')
if self.learning_rate == 'optimal' and self.alpha == 0:
raise ValueError("alpha must be > 0 since learning_rate is 'optimal'. alpha is used to compute the optimal learning rate.")
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
</DeepExtract>
if self.class_weight == 'balanced':
raise ValueError("class_weight '{0}' is not supported for partial_fit. In order to use 'balanced' weights, use compute_class_weight('{0}', classes=classes, y=y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, max_iter=1, classes=classes, sample_weight=sample_weight, coef_init=None, intercept_init=None)
|
@ignore_warnings
def test_regression_scorer_sample_weight():
(X, y) = make_regression(n_samples=101, n_features=20, random_state=0)
offset = abs(y.min()) + 1
y = y + offset
y = y
(X_train, X_test, y_train, y_test) = train_test_split(X, y, random_state=0)
sample_weight = np.ones_like(y_test)
sample_weight[:11] = 0
reg = DecisionTreeRegressor(random_state=0)
reg.fit(X_train, y_train)
for name in get_scorer_names():
scorer = get_scorer(name)
if name not in REGRESSION_SCORERS:
continue
try:
weighted = scorer(reg, X_test, y_test, sample_weight=sample_weight)
ignored = scorer(reg, X_test[11:], y_test[11:])
unweighted = scorer(reg, X_test, y_test)
assert weighted != unweighted, f'scorer {name} behaves identically when called with sample weights: {weighted} vs {unweighted}'
assert_almost_equal(weighted, ignored, err_msg=f'scorer {name} behaves differently when ignoring samples and setting sample_weight to 0: {weighted} vs {ignored}')
except TypeError as e:
assert 'sample_weight' in str(e), f'scorer {name} raises unhelpful exception when called with sample weights: {str(e)}'
|
@ignore_warnings
def test_regression_scorer_sample_weight():
(X, y) = make_regression(n_samples=101, n_features=20, random_state=0)
<DeepExtract>
offset = abs(y.min()) + 1
y = y + offset
y = y
</DeepExtract>
(X_train, X_test, y_train, y_test) = train_test_split(X, y, random_state=0)
sample_weight = np.ones_like(y_test)
sample_weight[:11] = 0
reg = DecisionTreeRegressor(random_state=0)
reg.fit(X_train, y_train)
for name in get_scorer_names():
scorer = get_scorer(name)
if name not in REGRESSION_SCORERS:
continue
try:
weighted = scorer(reg, X_test, y_test, sample_weight=sample_weight)
ignored = scorer(reg, X_test[11:], y_test[11:])
unweighted = scorer(reg, X_test, y_test)
assert weighted != unweighted, f'scorer {name} behaves identically when called with sample weights: {weighted} vs {unweighted}'
assert_almost_equal(weighted, ignored, err_msg=f'scorer {name} behaves differently when ignoring samples and setting sample_weight to 0: {weighted} vs {ignored}')
except TypeError as e:
assert 'sample_weight' in str(e), f'scorer {name} raises unhelpful exception when called with sample weights: {str(e)}'
|
def get_contributors():
"""Get the list of contributor profiles. Require admin rights."""
core_devs = []
contributor_experience_team = []
comm_team = []
core_devs_slug = 'core-devs'
contributor_experience_team_slug = 'contributor-experience-team'
comm_team_slug = 'communication-team'
entry_point = 'https://api.github.com/orgs/scikit-learn/'
for (team_slug, lst) in zip((core_devs_slug, contributor_experience_team_slug, comm_team_slug), (core_devs, contributor_experience_team, comm_team)):
for page in [1, 2]:
for sleep_time in [10, 30, 0]:
reply = requests.get(f'{entry_point}teams/{team_slug}/members?page={page}', auth=auth)
api_limit = 'message' in reply.json() and 'API rate limit exceeded' in reply.json()['message']
if not api_limit:
break
print('API rate limit exceeded, waiting..')
time.sleep(sleep_time)
reply.raise_for_status()
reply = reply
lst.extend(reply.json())
members = []
for page in [1, 2, 3]:
for sleep_time in [10, 30, 0]:
reply = requests.get(f'{entry_point}members?page={page}', auth=auth)
api_limit = 'message' in reply.json() and 'API rate limit exceeded' in reply.json()['message']
if not api_limit:
break
print('API rate limit exceeded, waiting..')
time.sleep(sleep_time)
reply.raise_for_status()
reply = reply
members.extend(reply.json())
core_devs = set((c['login'] for c in core_devs))
contributor_experience_team = set((c['login'] for c in contributor_experience_team))
comm_team = set((c['login'] for c in comm_team))
members = set((c['login'] for c in members))
members |= {'dubourg', 'mbrucher', 'thouis', 'jarrodmillman'}
members |= {'Angel Soler Gollonet'}
members -= {'sklearn-ci', 'sklearn-wheels', 'sklearn-lgtm'}
contributor_experience_team -= core_devs
emeritus = members - core_devs - contributor_experience_team - comm_team
emeritus_comm_team = {'reshamas'}
comm_team -= {'reshamas'}
core_devs = [get_profile(login) for login in core_devs]
emeritus = [get_profile(login) for login in emeritus]
contributor_experience_team = [get_profile(login) for login in contributor_experience_team]
comm_team = [get_profile(login) for login in comm_team]
emeritus_comm_team = [get_profile(login) for login in emeritus_comm_team]
core_devs = sorted(core_devs, key=key)
emeritus = sorted(emeritus, key=key)
contributor_experience_team = sorted(contributor_experience_team, key=key)
comm_team = sorted(comm_team, key=key)
emeritus_comm_team = sorted(emeritus_comm_team, key=key)
return (core_devs, emeritus, contributor_experience_team, comm_team, emeritus_comm_team)
|
def get_contributors():
"""Get the list of contributor profiles. Require admin rights."""
core_devs = []
contributor_experience_team = []
comm_team = []
core_devs_slug = 'core-devs'
contributor_experience_team_slug = 'contributor-experience-team'
comm_team_slug = 'communication-team'
entry_point = 'https://api.github.com/orgs/scikit-learn/'
for (team_slug, lst) in zip((core_devs_slug, contributor_experience_team_slug, comm_team_slug), (core_devs, contributor_experience_team, comm_team)):
for page in [1, 2]:
<DeepExtract>
for sleep_time in [10, 30, 0]:
reply = requests.get(f'{entry_point}teams/{team_slug}/members?page={page}', auth=auth)
api_limit = 'message' in reply.json() and 'API rate limit exceeded' in reply.json()['message']
if not api_limit:
break
print('API rate limit exceeded, waiting..')
time.sleep(sleep_time)
reply.raise_for_status()
reply = reply
</DeepExtract>
lst.extend(reply.json())
members = []
for page in [1, 2, 3]:
<DeepExtract>
for sleep_time in [10, 30, 0]:
reply = requests.get(f'{entry_point}members?page={page}', auth=auth)
api_limit = 'message' in reply.json() and 'API rate limit exceeded' in reply.json()['message']
if not api_limit:
break
print('API rate limit exceeded, waiting..')
time.sleep(sleep_time)
reply.raise_for_status()
reply = reply
</DeepExtract>
members.extend(reply.json())
core_devs = set((c['login'] for c in core_devs))
contributor_experience_team = set((c['login'] for c in contributor_experience_team))
comm_team = set((c['login'] for c in comm_team))
members = set((c['login'] for c in members))
members |= {'dubourg', 'mbrucher', 'thouis', 'jarrodmillman'}
members |= {'Angel Soler Gollonet'}
members -= {'sklearn-ci', 'sklearn-wheels', 'sklearn-lgtm'}
contributor_experience_team -= core_devs
emeritus = members - core_devs - contributor_experience_team - comm_team
emeritus_comm_team = {'reshamas'}
comm_team -= {'reshamas'}
core_devs = [get_profile(login) for login in core_devs]
emeritus = [get_profile(login) for login in emeritus]
contributor_experience_team = [get_profile(login) for login in contributor_experience_team]
comm_team = [get_profile(login) for login in comm_team]
emeritus_comm_team = [get_profile(login) for login in emeritus_comm_team]
core_devs = sorted(core_devs, key=key)
emeritus = sorted(emeritus, key=key)
contributor_experience_team = sorted(contributor_experience_team, key=key)
comm_team = sorted(comm_team, key=key)
emeritus_comm_team = sorted(emeritus_comm_team, key=key)
return (core_devs, emeritus, contributor_experience_team, comm_team, emeritus_comm_team)
|
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
p : ndarray, shape (n_samples, n_classes)
The class probabilities of the input samples.
"""
is_binned = getattr(self, '_in_fit', False)
if not is_binned:
X = self._validate_data(X, dtype=X_DTYPE, force_all_finite=False, reset=False)
check_is_fitted(self)
if X.shape[1] != self._n_features:
raise ValueError('X has {} features but this estimator was trained with {} features.'.format(X.shape[1], self._n_features))
n_samples = X.shape[0]
raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions += self._baseline_prediction
n_threads = _openmp_effective_n_threads(n_threads)
self._predict_iterations(X, self._predictors, raw_predictions, is_binned, n_threads)
raw_predictions = raw_predictions
return self._loss.predict_proba(raw_predictions)
|
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
p : ndarray, shape (n_samples, n_classes)
The class probabilities of the input samples.
"""
<DeepExtract>
is_binned = getattr(self, '_in_fit', False)
if not is_binned:
X = self._validate_data(X, dtype=X_DTYPE, force_all_finite=False, reset=False)
check_is_fitted(self)
if X.shape[1] != self._n_features:
raise ValueError('X has {} features but this estimator was trained with {} features.'.format(X.shape[1], self._n_features))
n_samples = X.shape[0]
raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions += self._baseline_prediction
n_threads = _openmp_effective_n_threads(n_threads)
self._predict_iterations(X, self._predictors, raw_predictions, is_binned, n_threads)
raw_predictions = raw_predictions
</DeepExtract>
return self._loss.predict_proba(raw_predictions)
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
self.length_scale = np.squeeze(self.length_scale).astype(float)
if np.ndim(self.length_scale) > 1:
raise ValueError('length_scale cannot be of dimension greater than 1')
if np.ndim(self.length_scale) == 1 and X.shape[1] != self.length_scale.shape[0]:
raise ValueError('Anisotropic kernel must have the same number of dimensions as data (%d!=%d)' % (self.length_scale.shape[0], X.shape[1]))
self.length_scale = self.length_scale
if Y is None:
dists = pdist(X / length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError('Gradient can only be evaluated when Y is None.')
dists = cdist(X / length_scale, Y / length_scale, metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1.0 + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1.0 + K + K ** 2 / 3.0) * np.exp(-K)
elif self.nu == np.inf:
K = np.exp(-dists ** 2 / 2.0)
else:
K = dists
K[K == 0.0] += np.finfo(float).eps
tmp = math.sqrt(2 * self.nu) * K
K.fill(2 ** (1.0 - self.nu) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return (K, K_gradient)
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / length_scale ** 2
else:
D = squareform(dists ** 2)[:, :, np.newaxis]
if self.nu == 0.5:
denominator = np.sqrt(D.sum(axis=2))[:, :, np.newaxis]
divide_result = np.zeros_like(D)
np.divide(D, denominator, out=divide_result, where=denominator != 0)
K_gradient = K[..., np.newaxis] * divide_result
elif self.nu == 1.5:
K_gradient = 3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
elif self.nu == np.inf:
K_gradient = D * K[..., np.newaxis]
else:
def f(theta):
return self.clone_with_theta(theta)(X, Y)
return (K, _approx_fprime(self.theta, f, 1e-10))
if not self.anisotropic:
return (K, K_gradient[:, :].sum(-1)[:, :, np.newaxis])
else:
return (K, K_gradient)
else:
return K
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
<DeepExtract>
self.length_scale = np.squeeze(self.length_scale).astype(float)
if np.ndim(self.length_scale) > 1:
raise ValueError('length_scale cannot be of dimension greater than 1')
if np.ndim(self.length_scale) == 1 and X.shape[1] != self.length_scale.shape[0]:
raise ValueError('Anisotropic kernel must have the same number of dimensions as data (%d!=%d)' % (self.length_scale.shape[0], X.shape[1]))
self.length_scale = self.length_scale
</DeepExtract>
if Y is None:
dists = pdist(X / length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError('Gradient can only be evaluated when Y is None.')
dists = cdist(X / length_scale, Y / length_scale, metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1.0 + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1.0 + K + K ** 2 / 3.0) * np.exp(-K)
elif self.nu == np.inf:
K = np.exp(-dists ** 2 / 2.0)
else:
K = dists
K[K == 0.0] += np.finfo(float).eps
tmp = math.sqrt(2 * self.nu) * K
K.fill(2 ** (1.0 - self.nu) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return (K, K_gradient)
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / length_scale ** 2
else:
D = squareform(dists ** 2)[:, :, np.newaxis]
if self.nu == 0.5:
denominator = np.sqrt(D.sum(axis=2))[:, :, np.newaxis]
divide_result = np.zeros_like(D)
np.divide(D, denominator, out=divide_result, where=denominator != 0)
K_gradient = K[..., np.newaxis] * divide_result
elif self.nu == 1.5:
K_gradient = 3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
elif self.nu == np.inf:
K_gradient = D * K[..., np.newaxis]
else:
def f(theta):
return self.clone_with_theta(theta)(X, Y)
return (K, _approx_fprime(self.theta, f, 1e-10))
if not self.anisotropic:
return (K, K_gradient[:, :].sum(-1)[:, :, np.newaxis])
else:
return (K, K_gradient)
else:
return K
|
def test_importances_asymptotic():
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.0
for count in np.bincount(samples):
p = 1.0 * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
(n_samples, n_features) = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.0
for k in range(n_features):
coef = 1.0 / (binomial(k, n_features) * (n_features - k))
for B in combinations(features, k):
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
(X_, y_) = (X[mask_b, :], y[mask_b])
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += coef * (1.0 * n_samples_b / n_samples) * (entropy(y_) - sum([entropy(c) * len(c) / n_samples_b for c in children]))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1], [1, 0, 1, 1, 1, 0, 1, 2], [1, 0, 1, 1, 0, 1, 1, 3], [0, 1, 1, 1, 0, 1, 0, 4], [1, 1, 0, 1, 0, 1, 1, 5], [1, 1, 0, 1, 1, 1, 1, 6], [1, 0, 1, 0, 0, 1, 0, 7], [1, 1, 1, 1, 1, 1, 1, 8], [1, 1, 1, 1, 0, 1, 1, 9], [1, 1, 1, 0, 1, 1, 1, 0]])
(X, y) = (np.array(data[:, :7], dtype=bool), data[:, 7])
n_features = X.shape[1]
true_importances = np.zeros(n_features)
for i in range(n_features):
(n_samples, n_features) = X.shape
features = list(range(n_features))
features.pop(i)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.0
for k in range(n_features):
coef = 1.0 / (binomial(k, n_features) * (n_features - k))
for B in combinations(features, k):
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
(X_, y_) = (X[mask_b, :], y[mask_b])
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[i]:
mask_xi = X_[:, i] == xi
children.append(y_[mask_xi])
imp += coef * (1.0 * n_samples_b / n_samples) * (entropy(y_) - sum([entropy(c) * len(c) / n_samples_b for c in children]))
true_importances[i] = imp
clf = ExtraTreesClassifier(n_estimators=500, max_features=1, criterion='log_loss', random_state=0).fit(X, y)
importances = sum((tree.tree_.compute_feature_importances(normalize=False) for tree in clf.estimators_)) / clf.n_estimators
assert_almost_equal(entropy(y), sum(importances))
assert np.abs(true_importances - importances).mean() < 0.01
|
def test_importances_asymptotic():
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.0
for count in np.bincount(samples):
p = 1.0 * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
(n_samples, n_features) = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.0
for k in range(n_features):
coef = 1.0 / (binomial(k, n_features) * (n_features - k))
for B in combinations(features, k):
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
(X_, y_) = (X[mask_b, :], y[mask_b])
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += coef * (1.0 * n_samples_b / n_samples) * (entropy(y_) - sum([entropy(c) * len(c) / n_samples_b for c in children]))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1], [1, 0, 1, 1, 1, 0, 1, 2], [1, 0, 1, 1, 0, 1, 1, 3], [0, 1, 1, 1, 0, 1, 0, 4], [1, 1, 0, 1, 0, 1, 1, 5], [1, 1, 0, 1, 1, 1, 1, 6], [1, 0, 1, 0, 0, 1, 0, 7], [1, 1, 1, 1, 1, 1, 1, 8], [1, 1, 1, 1, 0, 1, 1, 9], [1, 1, 1, 0, 1, 1, 1, 0]])
(X, y) = (np.array(data[:, :7], dtype=bool), data[:, 7])
n_features = X.shape[1]
true_importances = np.zeros(n_features)
for i in range(n_features):
<DeepExtract>
(n_samples, n_features) = X.shape
features = list(range(n_features))
features.pop(i)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.0
for k in range(n_features):
coef = 1.0 / (binomial(k, n_features) * (n_features - k))
for B in combinations(features, k):
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
(X_, y_) = (X[mask_b, :], y[mask_b])
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[i]:
mask_xi = X_[:, i] == xi
children.append(y_[mask_xi])
imp += coef * (1.0 * n_samples_b / n_samples) * (entropy(y_) - sum([entropy(c) * len(c) / n_samples_b for c in children]))
true_importances[i] = imp
</DeepExtract>
clf = ExtraTreesClassifier(n_estimators=500, max_features=1, criterion='log_loss', random_state=0).fit(X, y)
importances = sum((tree.tree_.compute_feature_importances(normalize=False) for tree in clf.estimators_)) / clf.n_estimators
assert_almost_equal(entropy(y), sum(importances))
assert np.abs(true_importances - importances).mean() < 0.01
|
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
def test_rotg(dtype):
rotg = _rotg_memview[_numpy_to_cython(dtype)]
rng = np.random.RandomState(0)
a = dtype(rng.randn())
b = dtype(rng.randn())
(c, s) = (0.0, 0.0)
def expected_rotg(a, b):
roe = a if abs(a) > abs(b) else b
if a == 0 and b == 0:
(c, s, r, z) = (1, 0, 0, 0)
else:
r = np.sqrt(a ** 2 + b ** 2) * (1 if roe >= 0 else -1)
(c, s) = (a / r, b / r)
z = s if roe == a else 1 if c == 0 else 1 / c
return (r, z, c, s)
roe = a if abs(a) > abs(b) else b
if a == 0 and b == 0:
(c, s, r, z) = (1, 0, 0, 0)
else:
r = np.sqrt(a ** 2 + b ** 2) * (1 if roe >= 0 else -1)
(c, s) = (a / r, b / r)
z = s if roe == a else 1 if c == 0 else 1 / c
expected = (r, z, c, s)
actual = rotg(a, b, c, s)
assert_allclose(actual, expected, rtol=RTOL[dtype])
|
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
def test_rotg(dtype):
rotg = _rotg_memview[_numpy_to_cython(dtype)]
rng = np.random.RandomState(0)
a = dtype(rng.randn())
b = dtype(rng.randn())
(c, s) = (0.0, 0.0)
def expected_rotg(a, b):
roe = a if abs(a) > abs(b) else b
if a == 0 and b == 0:
(c, s, r, z) = (1, 0, 0, 0)
else:
r = np.sqrt(a ** 2 + b ** 2) * (1 if roe >= 0 else -1)
(c, s) = (a / r, b / r)
z = s if roe == a else 1 if c == 0 else 1 / c
return (r, z, c, s)
<DeepExtract>
roe = a if abs(a) > abs(b) else b
if a == 0 and b == 0:
(c, s, r, z) = (1, 0, 0, 0)
else:
r = np.sqrt(a ** 2 + b ** 2) * (1 if roe >= 0 else -1)
(c, s) = (a / r, b / r)
z = s if roe == a else 1 if c == 0 else 1 / c
expected = (r, z, c, s)
</DeepExtract>
actual = rotg(a, b, c, s)
assert_allclose(actual, expected, rtol=RTOL[dtype])
|
def test_lda_dense_input():
rng = np.random.RandomState(0)
n_components = 3
block = np.full((3, 3), n_components, dtype=int)
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
(n_components, X) = (n_components, X)
lda = LatentDirichletAllocation(n_components=n_components, learning_method='batch', random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
top_idx = set(component.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
|
def test_lda_dense_input():
rng = np.random.RandomState(0)
<DeepExtract>
n_components = 3
block = np.full((3, 3), n_components, dtype=int)
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
(n_components, X) = (n_components, X)
</DeepExtract>
lda = LatentDirichletAllocation(n_components=n_components, learning_method='batch', random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
top_idx = set(component.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
|
def pair_confusion_matrix(labels_true, labels_pred):
"""Pair confusion matrix arising from two clusterings [1]_.
The pair confusion matrix :math:`C` computes a 2 by 2 similarity matrix
between two clusterings by considering all pairs of samples and counting
pairs that are assigned into the same or into different clusters under
the true and predicted clusterings.
Considering a pair of samples that is clustered together a positive pair,
then as in binary classification the count of true negatives is
:math:`C_{00}`, false negatives is :math:`C_{10}`, true positives is
:math:`C_{11}` and false positives is :math:`C_{01}`.
Read more in the :ref:`User Guide <pair_confusion_matrix>`.
Parameters
----------
labels_true : array-like of shape (n_samples,), dtype=integral
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,), dtype=integral
Cluster labels to evaluate.
Returns
-------
C : ndarray of shape (2, 2), dtype=np.int64
The contingency matrix.
See Also
--------
rand_score: Rand Score.
adjusted_rand_score: Adjusted Rand Score.
adjusted_mutual_info_score: Adjusted Mutual Information.
References
----------
.. [1] :doi:`Hubert, L., Arabie, P. "Comparing partitions."
Journal of Classification 2, 193–218 (1985).
<10.1007/BF01908075>`
Examples
--------
Perfectly matching labelings have all non-zero entries on the
diagonal regardless of actual label values:
>>> from sklearn.metrics.cluster import pair_confusion_matrix
>>> pair_confusion_matrix([0, 0, 1, 1], [1, 1, 0, 0])
array([[8, 0],
[0, 4]]...
Labelings that assign all classes members to the same clusters
are complete but may be not always pure, hence penalized, and
have some off-diagonal non-zero entries:
>>> pair_confusion_matrix([0, 0, 1, 2], [0, 0, 1, 1])
array([[8, 2],
[0, 2]]...
Note that the matrix is not symmetric.
"""
labels_true = check_array(labels_true, ensure_2d=False, ensure_min_samples=0, dtype=None)
labels_pred = check_array(labels_pred, ensure_2d=False, ensure_min_samples=0, dtype=None)
type_label = type_of_target(labels_true)
type_pred = type_of_target(labels_pred)
if 'continuous' in (type_pred, type_label):
msg = f'Clustering metrics expects discrete values but received {type_label} values for label, and {type_pred} values for target'
warnings.warn(msg, UserWarning)
if labels_true.ndim != 1:
raise ValueError('labels_true must be 1D: shape is %r' % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError('labels_pred must be 1D: shape is %r' % (labels_pred.shape,))
check_consistent_length(labels_true, labels_pred)
(labels_true, labels_pred) = (labels_true, labels_pred)
n_samples = np.int64(labels_true.shape[0])
if eps is not None and True:
raise ValueError("Cannot set 'eps' when sparse=True")
(classes, class_idx) = np.unique(labels_true, return_inverse=True)
(clusters, cluster_idx) = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
contingency = sp.coo_matrix((np.ones(class_idx.shape[0]), (class_idx, cluster_idx)), shape=(n_classes, n_clusters), dtype=np.int64)
if True:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
contingency = contingency + eps
contingency = contingency
n_c = np.ravel(contingency.sum(axis=1))
n_k = np.ravel(contingency.sum(axis=0))
sum_squares = (contingency.data ** 2).sum()
C = np.empty((2, 2), dtype=np.int64)
C[1, 1] = sum_squares - n_samples
C[0, 1] = contingency.dot(n_k).sum() - sum_squares
C[1, 0] = contingency.transpose().dot(n_c).sum() - sum_squares
C[0, 0] = n_samples ** 2 - C[0, 1] - C[1, 0] - sum_squares
return C
|
def pair_confusion_matrix(labels_true, labels_pred):
"""Pair confusion matrix arising from two clusterings [1]_.
The pair confusion matrix :math:`C` computes a 2 by 2 similarity matrix
between two clusterings by considering all pairs of samples and counting
pairs that are assigned into the same or into different clusters under
the true and predicted clusterings.
Considering a pair of samples that is clustered together a positive pair,
then as in binary classification the count of true negatives is
:math:`C_{00}`, false negatives is :math:`C_{10}`, true positives is
:math:`C_{11}` and false positives is :math:`C_{01}`.
Read more in the :ref:`User Guide <pair_confusion_matrix>`.
Parameters
----------
labels_true : array-like of shape (n_samples,), dtype=integral
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,), dtype=integral
Cluster labels to evaluate.
Returns
-------
C : ndarray of shape (2, 2), dtype=np.int64
The contingency matrix.
See Also
--------
rand_score: Rand Score.
adjusted_rand_score: Adjusted Rand Score.
adjusted_mutual_info_score: Adjusted Mutual Information.
References
----------
.. [1] :doi:`Hubert, L., Arabie, P. "Comparing partitions."
Journal of Classification 2, 193–218 (1985).
<10.1007/BF01908075>`
Examples
--------
Perfectly matching labelings have all non-zero entries on the
diagonal regardless of actual label values:
>>> from sklearn.metrics.cluster import pair_confusion_matrix
>>> pair_confusion_matrix([0, 0, 1, 1], [1, 1, 0, 0])
array([[8, 0],
[0, 4]]...
Labelings that assign all classes members to the same clusters
are complete but may be not always pure, hence penalized, and
have some off-diagonal non-zero entries:
>>> pair_confusion_matrix([0, 0, 1, 2], [0, 0, 1, 1])
array([[8, 2],
[0, 2]]...
Note that the matrix is not symmetric.
"""
<DeepExtract>
labels_true = check_array(labels_true, ensure_2d=False, ensure_min_samples=0, dtype=None)
labels_pred = check_array(labels_pred, ensure_2d=False, ensure_min_samples=0, dtype=None)
type_label = type_of_target(labels_true)
type_pred = type_of_target(labels_pred)
if 'continuous' in (type_pred, type_label):
msg = f'Clustering metrics expects discrete values but received {type_label} values for label, and {type_pred} values for target'
warnings.warn(msg, UserWarning)
if labels_true.ndim != 1:
raise ValueError('labels_true must be 1D: shape is %r' % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError('labels_pred must be 1D: shape is %r' % (labels_pred.shape,))
check_consistent_length(labels_true, labels_pred)
(labels_true, labels_pred) = (labels_true, labels_pred)
</DeepExtract>
n_samples = np.int64(labels_true.shape[0])
<DeepExtract>
if eps is not None and True:
raise ValueError("Cannot set 'eps' when sparse=True")
(classes, class_idx) = np.unique(labels_true, return_inverse=True)
(clusters, cluster_idx) = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
contingency = sp.coo_matrix((np.ones(class_idx.shape[0]), (class_idx, cluster_idx)), shape=(n_classes, n_clusters), dtype=np.int64)
if True:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
contingency = contingency + eps
contingency = contingency
</DeepExtract>
n_c = np.ravel(contingency.sum(axis=1))
n_k = np.ravel(contingency.sum(axis=0))
sum_squares = (contingency.data ** 2).sum()
C = np.empty((2, 2), dtype=np.int64)
C[1, 1] = sum_squares - n_samples
C[0, 1] = contingency.dot(n_k).sum() - sum_squares
C[1, 0] = contingency.transpose().dot(n_c).sum() - sum_squares
C[0, 0] = n_samples ** 2 - C[0, 1] - C[1, 0] - sum_squares
return C
|
@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None], 'alpha': [Interval(Real, 0, 1, closed='both')], 'multioutput': [StrOptions({'raw_values', 'uniform_average'}), 'array-like']})
def mean_pinball_loss(y_true, y_pred, *, sample_weight=None, alpha=0.5, multioutput='uniform_average'):
"""Pinball loss for quantile regression.
Read more in the :ref:`User Guide <pinball_loss>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
alpha : float, slope of the pinball loss, default=0.5,
This loss is equivalent to :ref:`mean_absolute_error` when `alpha=0.5`,
`alpha=0.95` is minimized by estimators of the 95th percentile.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape (n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
The pinball loss output is a non-negative floating point. The best
value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_pinball_loss
>>> y_true = [1, 2, 3]
>>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.1)
0.03...
>>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.1)
0.3...
>>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.9)
0.3...
>>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.9)
0.03...
>>> mean_pinball_loss(y_true, y_true, alpha=0.1)
0.0
>>> mean_pinball_loss(y_true, y_true, alpha=0.9)
0.0
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False, dtype=dtype)
y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError('y_true and y_pred have different number of output ({0}!={1})'.format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
allowed_multioutput_str = ('raw_values', 'uniform_average', 'variance_weighted')
if isinstance(multioutput, str):
if multioutput not in allowed_multioutput_str:
raise ValueError("Allowed 'multioutput' string values are {}. You provided multioutput={!r}".format(allowed_multioutput_str, multioutput))
elif multioutput is not None:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError('Custom weights are useful only in multi-output cases.')
elif n_outputs != len(multioutput):
raise ValueError('There must be equally many custom weights (%d) as outputs (%d).' % (len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
(y_type, y_true, y_pred, multioutput) = (y_type, y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
diff = y_true - y_pred
sign = (diff >= 0).astype(diff.dtype)
loss = alpha * sign * diff - (1 - alpha) * (1 - sign) * diff
output_errors = np.average(loss, weights=sample_weight, axis=0)
if isinstance(multioutput, str) and multioutput == 'raw_values':
return output_errors
if isinstance(multioutput, str) and multioutput == 'uniform_average':
multioutput = None
return np.average(output_errors, weights=multioutput)
|
@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None], 'alpha': [Interval(Real, 0, 1, closed='both')], 'multioutput': [StrOptions({'raw_values', 'uniform_average'}), 'array-like']})
def mean_pinball_loss(y_true, y_pred, *, sample_weight=None, alpha=0.5, multioutput='uniform_average'):
"""Pinball loss for quantile regression.
Read more in the :ref:`User Guide <pinball_loss>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
alpha : float, slope of the pinball loss, default=0.5,
This loss is equivalent to :ref:`mean_absolute_error` when `alpha=0.5`,
`alpha=0.95` is minimized by estimators of the 95th percentile.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape (n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
The pinball loss output is a non-negative floating point. The best
value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_pinball_loss
>>> y_true = [1, 2, 3]
>>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.1)
0.03...
>>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.1)
0.3...
>>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.9)
0.3...
>>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.9)
0.03...
>>> mean_pinball_loss(y_true, y_true, alpha=0.1)
0.0
>>> mean_pinball_loss(y_true, y_true, alpha=0.9)
0.0
"""
<DeepExtract>
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False, dtype=dtype)
y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError('y_true and y_pred have different number of output ({0}!={1})'.format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
allowed_multioutput_str = ('raw_values', 'uniform_average', 'variance_weighted')
if isinstance(multioutput, str):
if multioutput not in allowed_multioutput_str:
raise ValueError("Allowed 'multioutput' string values are {}. You provided multioutput={!r}".format(allowed_multioutput_str, multioutput))
elif multioutput is not None:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError('Custom weights are useful only in multi-output cases.')
elif n_outputs != len(multioutput):
raise ValueError('There must be equally many custom weights (%d) as outputs (%d).' % (len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
(y_type, y_true, y_pred, multioutput) = (y_type, y_true, y_pred, multioutput)
</DeepExtract>
check_consistent_length(y_true, y_pred, sample_weight)
diff = y_true - y_pred
sign = (diff >= 0).astype(diff.dtype)
loss = alpha * sign * diff - (1 - alpha) * (1 - sign) * diff
output_errors = np.average(loss, weights=sample_weight, axis=0)
if isinstance(multioutput, str) and multioutput == 'raw_values':
return output_errors
if isinstance(multioutput, str) and multioutput == 'uniform_average':
multioutput = None
return np.average(output_errors, weights=multioutput)
|
def test_return_std():
def f(X):
return np.dot(X, w) + b
def f_noise(X, noise_mult):
return f(X) + np.random.randn(X.shape[0]) * noise_mult
d = 5
n_train = 50
n_test = 10
w = np.array([1.0, 0.0, 1.0, -1.0, 0.0])
b = 1.0
X = np.random.random((n_train, d))
X_test = np.random.random((n_test, d))
for (decimal, noise_mult) in enumerate([1, 0.1, 0.01]):
y = f(X) + np.random.randn(X.shape[0]) * noise_mult
m1 = BayesianRidge()
m1.fit(X, y)
(y_mean1, y_std1) = m1.predict(X_test, return_std=True)
assert_array_almost_equal(y_std1, noise_mult, decimal=decimal)
m2 = ARDRegression()
m2.fit(X, y)
(y_mean2, y_std2) = m2.predict(X_test, return_std=True)
assert_array_almost_equal(y_std2, noise_mult, decimal=decimal)
|
def test_return_std():
def f(X):
return np.dot(X, w) + b
def f_noise(X, noise_mult):
return f(X) + np.random.randn(X.shape[0]) * noise_mult
d = 5
n_train = 50
n_test = 10
w = np.array([1.0, 0.0, 1.0, -1.0, 0.0])
b = 1.0
X = np.random.random((n_train, d))
X_test = np.random.random((n_test, d))
for (decimal, noise_mult) in enumerate([1, 0.1, 0.01]):
<DeepExtract>
y = f(X) + np.random.randn(X.shape[0]) * noise_mult
</DeepExtract>
m1 = BayesianRidge()
m1.fit(X, y)
(y_mean1, y_std1) = m1.predict(X_test, return_std=True)
assert_array_almost_equal(y_std1, noise_mult, decimal=decimal)
m2 = ARDRegression()
m2.fit(X, y)
(y_mean2, y_std2) = m2.predict(X_test, return_std=True)
assert_array_almost_equal(y_std2, noise_mult, decimal=decimal)
|
def fit_transform(X, y=None, init=None):
"""
Fit the data from `X`, and returns the embedded coordinates.
Parameters
----------
X : array-like of shape (n_samples, n_features) or (n_samples, n_samples)
Input data. If ``dissimilarity=='precomputed'``, the input should
be the dissimilarity matrix.
y : Ignored
Not used, present for API consistency by convention.
init : ndarray of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the SMACOF
algorithm. By default, the algorithm is initialized with a randomly
chosen array.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
X transformed in the new space.
"""
self._validate_params()
X = self._validate_data(X)
if X.shape[0] == X.shape[1] and self.dissimilarity != 'precomputed':
warnings.warn("The MDS API has changed. ``fit`` now constructs an dissimilarity matrix from data. To use a custom dissimilarity matrix, set ``dissimilarity='precomputed'``.")
if self.dissimilarity == 'precomputed':
self.dissimilarity_matrix_ = X
elif self.dissimilarity == 'euclidean':
self.dissimilarity_matrix_ = euclidean_distances(X)
self.dissimilarity_matrix_ = check_array(self.dissimilarity_matrix_)
self.random_state = check_random_state(self.random_state)
if self.normalized_stress == 'warn':
warnings.warn("The default value of `normalized_stress` will change to `'auto'` in version 1.4. To suppress this warning, manually set the value of `normalized_stress`.", FutureWarning)
self.normalized_stress = False
if self.normalized_stress == 'auto':
self.normalized_stress = not self.metric
if self.normalized_stress and self.metric:
raise ValueError('Normalized stress is not supported for metric MDS. Either set `normalized_stress=False` or use `metric=False`.')
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
if not self.n_init == 1:
warnings.warn('Explicit initial positions passed: performing only one init of the MDS instead of %d' % self.n_init)
self.n_init = 1
(best_pos, best_stress) = (None, None)
if effective_n_jobs(self.n_jobs) == 1:
for it in range(self.n_init):
(pos, stress, n_iter_) = _smacof_single(self.dissimilarity_matrix_, metric=self.metric, n_components=self.n_components, init=init, max_iter=self.max_iter, verbose=self.verbose, eps=self.eps, random_state=self.random_state, normalized_stress=self.normalized_stress)
if best_stress is None or stress < best_stress:
best_stress = stress
best_pos = pos.copy()
best_iter = n_iter_
else:
seeds = self.random_state.randint(np.iinfo(np.int32).max, size=self.n_init)
results = Parallel(n_jobs=self.n_jobs, verbose=max(self.verbose - 1, 0))((delayed(_smacof_single)(self.dissimilarity_matrix_, metric=self.metric, n_components=self.n_components, init=init, max_iter=self.max_iter, verbose=self.verbose, eps=self.eps, random_state=seed, normalized_stress=self.normalized_stress) for seed in seeds))
(positions, stress, n_iters) = zip(*results)
best = np.argmin(stress)
best_stress = stress[best]
best_pos = positions[best]
best_iter = n_iters[best]
if True:
(self.embedding_, self.stress_, self.n_iter_) = (best_pos, best_stress, best_iter)
else:
(self.embedding_, self.stress_, self.n_iter_) = (best_pos, best_stress)
return self.embedding_
|
def fit_transform(X, y=None, init=None):
"""
Fit the data from `X`, and returns the embedded coordinates.
Parameters
----------
X : array-like of shape (n_samples, n_features) or (n_samples, n_samples)
Input data. If ``dissimilarity=='precomputed'``, the input should
be the dissimilarity matrix.
y : Ignored
Not used, present for API consistency by convention.
init : ndarray of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the SMACOF
algorithm. By default, the algorithm is initialized with a randomly
chosen array.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
X transformed in the new space.
"""
self._validate_params()
X = self._validate_data(X)
if X.shape[0] == X.shape[1] and self.dissimilarity != 'precomputed':
warnings.warn("The MDS API has changed. ``fit`` now constructs an dissimilarity matrix from data. To use a custom dissimilarity matrix, set ``dissimilarity='precomputed'``.")
if self.dissimilarity == 'precomputed':
self.dissimilarity_matrix_ = X
elif self.dissimilarity == 'euclidean':
self.dissimilarity_matrix_ = euclidean_distances(X)
<DeepExtract>
self.dissimilarity_matrix_ = check_array(self.dissimilarity_matrix_)
self.random_state = check_random_state(self.random_state)
if self.normalized_stress == 'warn':
warnings.warn("The default value of `normalized_stress` will change to `'auto'` in version 1.4. To suppress this warning, manually set the value of `normalized_stress`.", FutureWarning)
self.normalized_stress = False
if self.normalized_stress == 'auto':
self.normalized_stress = not self.metric
if self.normalized_stress and self.metric:
raise ValueError('Normalized stress is not supported for metric MDS. Either set `normalized_stress=False` or use `metric=False`.')
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
if not self.n_init == 1:
warnings.warn('Explicit initial positions passed: performing only one init of the MDS instead of %d' % self.n_init)
self.n_init = 1
(best_pos, best_stress) = (None, None)
if effective_n_jobs(self.n_jobs) == 1:
for it in range(self.n_init):
(pos, stress, n_iter_) = _smacof_single(self.dissimilarity_matrix_, metric=self.metric, n_components=self.n_components, init=init, max_iter=self.max_iter, verbose=self.verbose, eps=self.eps, random_state=self.random_state, normalized_stress=self.normalized_stress)
if best_stress is None or stress < best_stress:
best_stress = stress
best_pos = pos.copy()
best_iter = n_iter_
else:
seeds = self.random_state.randint(np.iinfo(np.int32).max, size=self.n_init)
results = Parallel(n_jobs=self.n_jobs, verbose=max(self.verbose - 1, 0))((delayed(_smacof_single)(self.dissimilarity_matrix_, metric=self.metric, n_components=self.n_components, init=init, max_iter=self.max_iter, verbose=self.verbose, eps=self.eps, random_state=seed, normalized_stress=self.normalized_stress) for seed in seeds))
(positions, stress, n_iters) = zip(*results)
best = np.argmin(stress)
best_stress = stress[best]
best_pos = positions[best]
best_iter = n_iters[best]
if True:
(self.embedding_, self.stress_, self.n_iter_) = (best_pos, best_stress, best_iter)
else:
(self.embedding_, self.stress_, self.n_iter_) = (best_pos, best_stress)
</DeepExtract>
return self.embedding_
|
def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except (subprocess.CalledProcessError, OSError):
print('Failed to execute git to get revision')
revision = None
revision = revision.decode('utf-8')
return partial(_linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt)
|
def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
<DeepExtract>
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except (subprocess.CalledProcessError, OSError):
print('Failed to execute git to get revision')
revision = None
revision = revision.decode('utf-8')
</DeepExtract>
return partial(_linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt)
|
def compute_sample_weight(class_weight, y, *, indices=None):
"""Estimate sample weights by class for unbalanced datasets.
Parameters
----------
class_weight : dict, list of dicts, "balanced", or None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data:
``n_samples / (n_classes * np.bincount(y))``.
For multi-output, the weights of each column of y will be multiplied.
y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs)
Array of original class labels per sample.
indices : array-like of shape (n_subsample,), default=None
Array of indices to be used in a subsample. Can be of length less than
n_samples in the case of a subsample, or equal to n_samples in the
case of a bootstrap subsample with repeated indices. If None, the
sample weight will be calculated over the full sample. Only "balanced"
is supported for class_weight if this is provided.
Returns
-------
sample_weight_vect : ndarray of shape (n_samples,)
Array with sample weights as applied to the original y.
"""
if not sparse.issparse(y):
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
if isinstance(class_weight, str):
if class_weight not in ['balanced']:
raise ValueError('The only valid preset for class_weight is "balanced". Given "%s".' % class_weight)
elif indices is not None and (not isinstance(class_weight, str)):
raise ValueError('The only valid class_weight for subsampling is "balanced". Given "%s".' % class_weight)
elif n_outputs > 1:
if not hasattr(class_weight, '__iter__') or isinstance(class_weight, dict):
raise ValueError('For multi-output, class_weight should be a list of dicts, or a valid string.')
if len(class_weight) != n_outputs:
raise ValueError('For multi-output, number of elements in class_weight should match number of outputs.')
expanded_class_weight = []
for k in range(n_outputs):
y_full = y[:, k]
if sparse.issparse(y_full):
y_full = y_full.toarray().flatten()
classes_full = np.unique(y_full)
classes_missing = None
if class_weight == 'balanced' or n_outputs == 1:
class_weight_k = class_weight
else:
class_weight_k = class_weight[k]
if indices is not None:
y_subsample = y_full[indices]
classes_subsample = np.unique(y_subsample)
weight_k = np.take(compute_class_weight(class_weight_k, classes=classes_subsample, y=y_subsample), np.searchsorted(classes_subsample, classes_full), mode='clip')
classes_missing = set(classes_full) - set(classes_subsample)
else:
from ..preprocessing import LabelEncoder
if set(y_full) - set(classes_full):
raise ValueError('classes should include all valid labels that can be in y')
if class_weight_k is None or len(class_weight_k) == 0:
weight = np.ones(classes_full.shape[0], dtype=np.float64, order='C')
elif class_weight_k == 'balanced':
le = LabelEncoder()
y_ind = le.fit_transform(y_full)
if not all(np.in1d(classes_full, le.classes_)):
raise ValueError('classes should have valid labels that are in y')
recip_freq = len(y_full) / (len(le.classes_) * np.bincount(y_ind).astype(np.float64))
weight = recip_freq[le.transform(classes_full)]
else:
weight = np.ones(classes_full.shape[0], dtype=np.float64, order='C')
if not isinstance(class_weight_k, dict):
raise ValueError("class_weight must be dict, 'balanced', or None, got: %r" % class_weight_k)
unweighted_classes = []
for (i, c) in enumerate(classes_full):
if c in class_weight_k:
weight[i] = class_weight_k[c]
else:
unweighted_classes.append(c)
n_weighted_classes = len(classes_full) - len(unweighted_classes)
if unweighted_classes and n_weighted_classes != len(class_weight_k):
raise ValueError(f'The classes, {unweighted_classes}, are not in class_weight')
weight_k = weight
weight_k = weight_k[np.searchsorted(classes_full, y_full)]
if classes_missing:
weight_k[np.in1d(y_full, list(classes_missing))] = 0.0
expanded_class_weight.append(weight_k)
expanded_class_weight = np.prod(expanded_class_weight, axis=0, dtype=np.float64)
return expanded_class_weight
|
def compute_sample_weight(class_weight, y, *, indices=None):
"""Estimate sample weights by class for unbalanced datasets.
Parameters
----------
class_weight : dict, list of dicts, "balanced", or None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data:
``n_samples / (n_classes * np.bincount(y))``.
For multi-output, the weights of each column of y will be multiplied.
y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs)
Array of original class labels per sample.
indices : array-like of shape (n_subsample,), default=None
Array of indices to be used in a subsample. Can be of length less than
n_samples in the case of a subsample, or equal to n_samples in the
case of a bootstrap subsample with repeated indices. If None, the
sample weight will be calculated over the full sample. Only "balanced"
is supported for class_weight if this is provided.
Returns
-------
sample_weight_vect : ndarray of shape (n_samples,)
Array with sample weights as applied to the original y.
"""
if not sparse.issparse(y):
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
if isinstance(class_weight, str):
if class_weight not in ['balanced']:
raise ValueError('The only valid preset for class_weight is "balanced". Given "%s".' % class_weight)
elif indices is not None and (not isinstance(class_weight, str)):
raise ValueError('The only valid class_weight for subsampling is "balanced". Given "%s".' % class_weight)
elif n_outputs > 1:
if not hasattr(class_weight, '__iter__') or isinstance(class_weight, dict):
raise ValueError('For multi-output, class_weight should be a list of dicts, or a valid string.')
if len(class_weight) != n_outputs:
raise ValueError('For multi-output, number of elements in class_weight should match number of outputs.')
expanded_class_weight = []
for k in range(n_outputs):
y_full = y[:, k]
if sparse.issparse(y_full):
y_full = y_full.toarray().flatten()
classes_full = np.unique(y_full)
classes_missing = None
if class_weight == 'balanced' or n_outputs == 1:
class_weight_k = class_weight
else:
class_weight_k = class_weight[k]
if indices is not None:
y_subsample = y_full[indices]
classes_subsample = np.unique(y_subsample)
weight_k = np.take(compute_class_weight(class_weight_k, classes=classes_subsample, y=y_subsample), np.searchsorted(classes_subsample, classes_full), mode='clip')
classes_missing = set(classes_full) - set(classes_subsample)
else:
<DeepExtract>
from ..preprocessing import LabelEncoder
if set(y_full) - set(classes_full):
raise ValueError('classes should include all valid labels that can be in y')
if class_weight_k is None or len(class_weight_k) == 0:
weight = np.ones(classes_full.shape[0], dtype=np.float64, order='C')
elif class_weight_k == 'balanced':
le = LabelEncoder()
y_ind = le.fit_transform(y_full)
if not all(np.in1d(classes_full, le.classes_)):
raise ValueError('classes should have valid labels that are in y')
recip_freq = len(y_full) / (len(le.classes_) * np.bincount(y_ind).astype(np.float64))
weight = recip_freq[le.transform(classes_full)]
else:
weight = np.ones(classes_full.shape[0], dtype=np.float64, order='C')
if not isinstance(class_weight_k, dict):
raise ValueError("class_weight must be dict, 'balanced', or None, got: %r" % class_weight_k)
unweighted_classes = []
for (i, c) in enumerate(classes_full):
if c in class_weight_k:
weight[i] = class_weight_k[c]
else:
unweighted_classes.append(c)
n_weighted_classes = len(classes_full) - len(unweighted_classes)
if unweighted_classes and n_weighted_classes != len(class_weight_k):
raise ValueError(f'The classes, {unweighted_classes}, are not in class_weight')
weight_k = weight
</DeepExtract>
weight_k = weight_k[np.searchsorted(classes_full, y_full)]
if classes_missing:
weight_k[np.in1d(y_full, list(classes_missing))] = 0.0
expanded_class_weight.append(weight_k)
expanded_class_weight = np.prod(expanded_class_weight, axis=0, dtype=np.float64)
return expanded_class_weight
|
def _fit_multiclass(X, y, alpha, C, learning_rate, sample_weight, max_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OvA (One versus All) or OvR (One versus Rest).
"""
n_samples = y.shape[0]
validation_mask = np.zeros(n_samples, dtype=np.bool_)
if not self.early_stopping:
validation_mask = validation_mask
if is_classifier(self):
splitter_type = StratifiedShuffleSplit
else:
splitter_type = ShuffleSplit
cv = splitter_type(test_size=self.validation_fraction, random_state=self.random_state)
(idx_train, idx_val) = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))
if not np.any(sample_weight > 0[idx_val]):
raise ValueError('The sample weights for validation set are all zero, consider using a different random state.')
if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:
raise ValueError('Splitting %d samples into a train set and a validation set with validation_fraction=%r led to an empty set (%d and %d samples). Please either change validation_fraction, increase number of samples, or disable early_stopping.' % (n_samples, self.validation_fraction, idx_train.shape[0], idx_val.shape[0]))
validation_mask[idx_val] = True
validation_mask = validation_mask
random_state = check_random_state(self.random_state)
seeds = random_state.randint(MAX_INT, size=len(self.classes_))
result = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, require='sharedmem')((delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate, max_iter, self._expanded_class_weight[i], 1.0, sample_weight, validation_mask=validation_mask, random_state=seed) for (i, seed) in enumerate(seeds)))
n_iter_ = 0.0
for (i, (_, intercept, n_iter_i)) in enumerate(result):
self.intercept_[i] = intercept
n_iter_ = max(n_iter_, n_iter_i)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self._average_coef
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef
self._standard_intercept = np.atleast_1d(self.intercept_)
self.intercept_ = self._standard_intercept
|
def _fit_multiclass(X, y, alpha, C, learning_rate, sample_weight, max_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OvA (One versus All) or OvR (One versus Rest).
"""
<DeepExtract>
n_samples = y.shape[0]
validation_mask = np.zeros(n_samples, dtype=np.bool_)
if not self.early_stopping:
validation_mask = validation_mask
if is_classifier(self):
splitter_type = StratifiedShuffleSplit
else:
splitter_type = ShuffleSplit
cv = splitter_type(test_size=self.validation_fraction, random_state=self.random_state)
(idx_train, idx_val) = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))
if not np.any(sample_weight > 0[idx_val]):
raise ValueError('The sample weights for validation set are all zero, consider using a different random state.')
if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:
raise ValueError('Splitting %d samples into a train set and a validation set with validation_fraction=%r led to an empty set (%d and %d samples). Please either change validation_fraction, increase number of samples, or disable early_stopping.' % (n_samples, self.validation_fraction, idx_train.shape[0], idx_val.shape[0]))
validation_mask[idx_val] = True
validation_mask = validation_mask
</DeepExtract>
random_state = check_random_state(self.random_state)
seeds = random_state.randint(MAX_INT, size=len(self.classes_))
result = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, require='sharedmem')((delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate, max_iter, self._expanded_class_weight[i], 1.0, sample_weight, validation_mask=validation_mask, random_state=seed) for (i, seed) in enumerate(seeds)))
n_iter_ = 0.0
for (i, (_, intercept, n_iter_i)) in enumerate(result):
self.intercept_[i] = intercept
n_iter_ = max(n_iter_, n_iter_i)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self._average_coef
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef
self._standard_intercept = np.atleast_1d(self.intercept_)
self.intercept_ = self._standard_intercept
|
def partial_fit(self, X, y=None, sample_weight=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when :meth:`fit` is not feasible due to very large number of
`n_samples` or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.24
parameter *sample_weight* support to StandardScaler.
Returns
-------
self : object
Fitted scaler.
"""
self._validate_params()
first_call = not hasattr(self, 'n_samples_seen_')
X = self._validate_data(X, accept_sparse=('csr', 'csc'), dtype=FLOAT_DTYPES, force_all_finite='allow-nan', reset=first_call)
n_features = X.shape[1]
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
dtype = np.int64 if sample_weight is None else X.dtype
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = np.zeros(n_features, dtype=dtype)
elif np.size(self.n_samples_seen_) == 1:
self.n_samples_seen_ = np.repeat(self.n_samples_seen_, X.shape[1])
self.n_samples_seen_ = self.n_samples_seen_.astype(dtype, copy=False)
if sparse.issparse(X):
if self.with_mean:
raise ValueError('Cannot center sparse matrices: pass `with_mean=False` instead. See docstring for motivation and alternatives.')
sparse_constructor = sparse.csr_matrix if X.format == 'csr' else sparse.csc_matrix
if self.with_std:
if not hasattr(self, 'scale_'):
(self.mean_, self.var_, self.n_samples_seen_) = mean_variance_axis(X, axis=0, weights=sample_weight, return_sum_weights=True)
else:
(self.mean_, self.var_, self.n_samples_seen_) = incr_mean_variance_axis(X, axis=0, last_mean=self.mean_, last_var=self.var_, last_n=self.n_samples_seen_, weights=sample_weight)
self.mean_ = self.mean_.astype(np.float64, copy=False)
self.var_ = self.var_.astype(np.float64, copy=False)
else:
self.mean_ = None
self.var_ = None
weights = _check_sample_weight(sample_weight, X)
sum_weights_nan = weights @ sparse_constructor((np.isnan(X.data), X.indices, X.indptr), shape=X.shape)
self.n_samples_seen_ += (np.sum(weights) - sum_weights_nan).astype(dtype)
else:
if not hasattr(self, 'scale_'):
self.mean_ = 0.0
if self.with_std:
self.var_ = 0.0
else:
self.var_ = None
if not self.with_mean and (not self.with_std):
self.mean_ = None
self.var_ = None
self.n_samples_seen_ += X.shape[0] - np.isnan(X).sum(axis=0)
else:
(self.mean_, self.var_, self.n_samples_seen_) = _incremental_mean_and_var(X, self.mean_, self.var_, self.n_samples_seen_, sample_weight=sample_weight)
if np.ptp(self.n_samples_seen_) == 0:
self.n_samples_seen_ = self.n_samples_seen_[0]
if self.with_std:
eps = np.finfo(np.float64).eps
upper_bound = self.n_samples_seen_ * eps * self.var_ + (self.n_samples_seen_ * self.mean_ * eps) ** 2
constant_mask = self.var_ <= upper_bound
if np.isscalar(np.sqrt(self.var_)):
if np.sqrt(self.var_) == 0.0:
np.sqrt(self.var_) = 1.0
self.scale_ = np.sqrt(self.var_)
elif isinstance(np.sqrt(self.var_), np.ndarray):
if constant_mask is None:
constant_mask = np.sqrt(self.var_) < 10 * np.finfo(np.sqrt(self.var_).dtype).eps
if False:
np.sqrt(self.var_) = np.sqrt(self.var_).copy()
np.sqrt(self.var_)[constant_mask] = 1.0
self.scale_ = np.sqrt(self.var_)
else:
self.scale_ = None
return self
|
def partial_fit(self, X, y=None, sample_weight=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when :meth:`fit` is not feasible due to very large number of
`n_samples` or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.24
parameter *sample_weight* support to StandardScaler.
Returns
-------
self : object
Fitted scaler.
"""
self._validate_params()
first_call = not hasattr(self, 'n_samples_seen_')
X = self._validate_data(X, accept_sparse=('csr', 'csc'), dtype=FLOAT_DTYPES, force_all_finite='allow-nan', reset=first_call)
n_features = X.shape[1]
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
dtype = np.int64 if sample_weight is None else X.dtype
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = np.zeros(n_features, dtype=dtype)
elif np.size(self.n_samples_seen_) == 1:
self.n_samples_seen_ = np.repeat(self.n_samples_seen_, X.shape[1])
self.n_samples_seen_ = self.n_samples_seen_.astype(dtype, copy=False)
if sparse.issparse(X):
if self.with_mean:
raise ValueError('Cannot center sparse matrices: pass `with_mean=False` instead. See docstring for motivation and alternatives.')
sparse_constructor = sparse.csr_matrix if X.format == 'csr' else sparse.csc_matrix
if self.with_std:
if not hasattr(self, 'scale_'):
(self.mean_, self.var_, self.n_samples_seen_) = mean_variance_axis(X, axis=0, weights=sample_weight, return_sum_weights=True)
else:
(self.mean_, self.var_, self.n_samples_seen_) = incr_mean_variance_axis(X, axis=0, last_mean=self.mean_, last_var=self.var_, last_n=self.n_samples_seen_, weights=sample_weight)
self.mean_ = self.mean_.astype(np.float64, copy=False)
self.var_ = self.var_.astype(np.float64, copy=False)
else:
self.mean_ = None
self.var_ = None
weights = _check_sample_weight(sample_weight, X)
sum_weights_nan = weights @ sparse_constructor((np.isnan(X.data), X.indices, X.indptr), shape=X.shape)
self.n_samples_seen_ += (np.sum(weights) - sum_weights_nan).astype(dtype)
else:
if not hasattr(self, 'scale_'):
self.mean_ = 0.0
if self.with_std:
self.var_ = 0.0
else:
self.var_ = None
if not self.with_mean and (not self.with_std):
self.mean_ = None
self.var_ = None
self.n_samples_seen_ += X.shape[0] - np.isnan(X).sum(axis=0)
else:
(self.mean_, self.var_, self.n_samples_seen_) = _incremental_mean_and_var(X, self.mean_, self.var_, self.n_samples_seen_, sample_weight=sample_weight)
if np.ptp(self.n_samples_seen_) == 0:
self.n_samples_seen_ = self.n_samples_seen_[0]
if self.with_std:
<DeepExtract>
eps = np.finfo(np.float64).eps
upper_bound = self.n_samples_seen_ * eps * self.var_ + (self.n_samples_seen_ * self.mean_ * eps) ** 2
constant_mask = self.var_ <= upper_bound
</DeepExtract>
<DeepExtract>
if np.isscalar(np.sqrt(self.var_)):
if np.sqrt(self.var_) == 0.0:
np.sqrt(self.var_) = 1.0
self.scale_ = np.sqrt(self.var_)
elif isinstance(np.sqrt(self.var_), np.ndarray):
if constant_mask is None:
constant_mask = np.sqrt(self.var_) < 10 * np.finfo(np.sqrt(self.var_).dtype).eps
if False:
np.sqrt(self.var_) = np.sqrt(self.var_).copy()
np.sqrt(self.var_)[constant_mask] = 1.0
self.scale_ = np.sqrt(self.var_)
</DeepExtract>
else:
self.scale_ = None
return self
|
def davies_bouldin_score(X, labels):
"""Compute the Davies-Bouldin score.
The score is defined as the average similarity measure of each cluster with
its most similar cluster, where similarity is the ratio of within-cluster
distances to between-cluster distances. Thus, clusters which are farther
apart and less dispersed will result in a better score.
The minimum score is zero, with lower values indicating better clustering.
Read more in the :ref:`User Guide <davies-bouldin_index>`.
.. versionadded:: 0.20
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score: float
The resulting Davies-Bouldin score.
References
----------
.. [1] Davies, David L.; Bouldin, Donald W. (1979).
`"A Cluster Separation Measure"
<https://ieeexplore.ieee.org/document/4766909>`__.
IEEE Transactions on Pattern Analysis and Machine Intelligence.
PAMI-1 (2): 224-227
"""
(X, labels) = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
(n_samples, _) = X.shape
n_labels = len(le.classes_)
if not 1 < n_labels < n_samples:
raise ValueError('Number of labels is %d. Valid values are 2 to n_samples - 1 (inclusive)' % n_labels)
intra_dists = np.zeros(n_labels)
centroids = np.zeros((n_labels, len(X[0])), dtype=float)
for k in range(n_labels):
cluster_k = _safe_indexing(X, labels == k)
centroid = cluster_k.mean(axis=0)
centroids[k] = centroid
intra_dists[k] = np.average(pairwise_distances(cluster_k, [centroid]))
centroid_distances = pairwise_distances(centroids)
if np.allclose(intra_dists, 0) or np.allclose(centroid_distances, 0):
return 0.0
centroid_distances[centroid_distances == 0] = np.inf
combined_intra_dists = intra_dists[:, None] + intra_dists
scores = np.max(combined_intra_dists / centroid_distances, axis=1)
return np.mean(scores)
|
def davies_bouldin_score(X, labels):
"""Compute the Davies-Bouldin score.
The score is defined as the average similarity measure of each cluster with
its most similar cluster, where similarity is the ratio of within-cluster
distances to between-cluster distances. Thus, clusters which are farther
apart and less dispersed will result in a better score.
The minimum score is zero, with lower values indicating better clustering.
Read more in the :ref:`User Guide <davies-bouldin_index>`.
.. versionadded:: 0.20
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score: float
The resulting Davies-Bouldin score.
References
----------
.. [1] Davies, David L.; Bouldin, Donald W. (1979).
`"A Cluster Separation Measure"
<https://ieeexplore.ieee.org/document/4766909>`__.
IEEE Transactions on Pattern Analysis and Machine Intelligence.
PAMI-1 (2): 224-227
"""
(X, labels) = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
(n_samples, _) = X.shape
n_labels = len(le.classes_)
<DeepExtract>
if not 1 < n_labels < n_samples:
raise ValueError('Number of labels is %d. Valid values are 2 to n_samples - 1 (inclusive)' % n_labels)
</DeepExtract>
intra_dists = np.zeros(n_labels)
centroids = np.zeros((n_labels, len(X[0])), dtype=float)
for k in range(n_labels):
cluster_k = _safe_indexing(X, labels == k)
centroid = cluster_k.mean(axis=0)
centroids[k] = centroid
intra_dists[k] = np.average(pairwise_distances(cluster_k, [centroid]))
centroid_distances = pairwise_distances(centroids)
if np.allclose(intra_dists, 0) or np.allclose(centroid_distances, 0):
return 0.0
centroid_distances[centroid_distances == 0] = np.inf
combined_intra_dists = intra_dists[:, None] + intra_dists
scores = np.max(combined_intra_dists / centroid_distances, axis=1)
return np.mean(scores)
|
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
X = make_nonnegative(X)
X_scaled = X
for _ in range(max_iter):
(X_new, _, _) = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
normalized_data = X_scaled
n_sv += 1
elif self.method == 'scale':
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
(n_rows, n_cols) = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
(normalized_data, _, _) = (an, row_diag, col_diag)
n_sv += 1
elif self.method == 'log':
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError('Cannot compute log of a sparse matrix, because log(x) diverges to -infinity as x goes to 0.')
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
normalized_data = L - row_avg - col_avg + avg
n_discard = 0 if self.method == 'log' else 1
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
(u, _, vt) = randomized_svd(normalized_data, n_sv, random_state=self.random_state, **kwargs)
elif self.svd_method == 'arpack':
(u, _, vt) = svds(normalized_data, k=n_sv, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
A = safe_sparse_dot(normalized_data.T, normalized_data)
random_state = check_random_state(self.random_state)
v0 = random_state.uniform(-1, 1, A.shape[0])
(_, v) = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(normalized_data, normalized_data.T)
random_state = check_random_state(self.random_state)
v0 = random_state.uniform(-1, 1, A.shape[0])
(_, u) = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
(u, v) = (u, vt.T)
ut = u.T
vt = v.T
try:
(n_row_clusters, n_col_clusters) = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
def make_piecewise(v):
(centroid, labels) = self._k_means(v.reshape(-1, 1), n_row_clusters)
best_ut = centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise, axis=1, arr=ut)
dists = np.apply_along_axis(norm, axis=1, arr=ut - piecewise_vectors)
result = ut[np.argsort(dists)[:self.n_best]]
best_ut = result
def make_piecewise(v):
(centroid, labels) = self._k_means(v.reshape(-1, 1), n_col_clusters)
best_vt = centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise, axis=1, arr=vt)
dists = np.apply_along_axis(norm, axis=1, arr=vt - piecewise_vectors)
result = vt[np.argsort(dists)[:self.n_best]]
best_vt = result
projected = safe_sparse_dot(X, best_vt.T)
(_, labels) = self._k_means(projected, n_row_clusters)
self.row_labels_ = labels
projected = safe_sparse_dot(X.T, best_ut.T)
(_, labels) = self._k_means(projected, n_col_clusters)
self.column_labels_ = labels
self.rows_ = np.vstack([self.row_labels_ == label for label in range(n_row_clusters) for _ in range(n_col_clusters)])
self.columns_ = np.vstack([self.column_labels_ == label for _ in range(n_row_clusters) for label in range(n_col_clusters)])
|
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
<DeepExtract>
X = make_nonnegative(X)
X_scaled = X
for _ in range(max_iter):
(X_new, _, _) = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
normalized_data = X_scaled
</DeepExtract>
n_sv += 1
elif self.method == 'scale':
<DeepExtract>
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
(n_rows, n_cols) = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
(normalized_data, _, _) = (an, row_diag, col_diag)
</DeepExtract>
n_sv += 1
elif self.method == 'log':
<DeepExtract>
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError('Cannot compute log of a sparse matrix, because log(x) diverges to -infinity as x goes to 0.')
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
normalized_data = L - row_avg - col_avg + avg
</DeepExtract>
n_discard = 0 if self.method == 'log' else 1
<DeepExtract>
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
(u, _, vt) = randomized_svd(normalized_data, n_sv, random_state=self.random_state, **kwargs)
elif self.svd_method == 'arpack':
(u, _, vt) = svds(normalized_data, k=n_sv, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
A = safe_sparse_dot(normalized_data.T, normalized_data)
random_state = check_random_state(self.random_state)
v0 = random_state.uniform(-1, 1, A.shape[0])
(_, v) = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(normalized_data, normalized_data.T)
random_state = check_random_state(self.random_state)
v0 = random_state.uniform(-1, 1, A.shape[0])
(_, u) = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
(u, v) = (u, vt.T)
</DeepExtract>
ut = u.T
vt = v.T
try:
(n_row_clusters, n_col_clusters) = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
<DeepExtract>
def make_piecewise(v):
(centroid, labels) = self._k_means(v.reshape(-1, 1), n_row_clusters)
best_ut = centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise, axis=1, arr=ut)
dists = np.apply_along_axis(norm, axis=1, arr=ut - piecewise_vectors)
result = ut[np.argsort(dists)[:self.n_best]]
best_ut = result
</DeepExtract>
<DeepExtract>
def make_piecewise(v):
(centroid, labels) = self._k_means(v.reshape(-1, 1), n_col_clusters)
best_vt = centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise, axis=1, arr=vt)
dists = np.apply_along_axis(norm, axis=1, arr=vt - piecewise_vectors)
result = vt[np.argsort(dists)[:self.n_best]]
best_vt = result
</DeepExtract>
<DeepExtract>
projected = safe_sparse_dot(X, best_vt.T)
(_, labels) = self._k_means(projected, n_row_clusters)
self.row_labels_ = labels
</DeepExtract>
<DeepExtract>
projected = safe_sparse_dot(X.T, best_ut.T)
(_, labels) = self._k_means(projected, n_col_clusters)
self.column_labels_ = labels
</DeepExtract>
self.rows_ = np.vstack([self.row_labels_ == label for label in range(n_row_clusters) for _ in range(n_col_clusters)])
self.columns_ = np.vstack([self.column_labels_ == label for _ in range(n_row_clusters) for label in range(n_col_clusters)])
|
@pytest.mark.skipif(True, reason='skipping mini_batch_fit_transform.')
def test_mini_batch_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
n_features = (8, 8)[0] * (8, 8)[1]
rng = check_random_state(rng)
U = rng.randn(10, 3)
V = rng.randn(3, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(3):
img = np.zeros((8, 8))
(xmin, xmax) = (centers[k][0] - sz[k], centers[k][0] + sz[k])
(ymin, ymax) = (centers[k][1] - sz[k], centers[k][1] + sz[k])
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1])
(Y, _, _) = (Y, U, V)
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0, alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
if sys.platform == 'win32':
import joblib
_mp = joblib.parallel.multiprocessing
joblib.parallel.multiprocessing = None
try:
spca = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha, random_state=0)
U2 = spca.fit(Y).transform(Y)
finally:
joblib.parallel.multiprocessing = _mp
else:
spca = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha, random_state=0)
U2 = spca.fit(Y).transform(Y)
assert not np.all(spca_lars.components_ == 0)
assert_array_almost_equal(U1, U2)
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha, random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
@pytest.mark.skipif(True, reason='skipping mini_batch_fit_transform.')
def test_mini_batch_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
<DeepExtract>
n_features = (8, 8)[0] * (8, 8)[1]
rng = check_random_state(rng)
U = rng.randn(10, 3)
V = rng.randn(3, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(3):
img = np.zeros((8, 8))
(xmin, xmax) = (centers[k][0] - sz[k], centers[k][0] + sz[k])
(ymin, ymax) = (centers[k][1] - sz[k], centers[k][1] + sz[k])
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1])
(Y, _, _) = (Y, U, V)
</DeepExtract>
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0, alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
if sys.platform == 'win32':
import joblib
_mp = joblib.parallel.multiprocessing
joblib.parallel.multiprocessing = None
try:
spca = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha, random_state=0)
U2 = spca.fit(Y).transform(Y)
finally:
joblib.parallel.multiprocessing = _mp
else:
spca = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha, random_state=0)
U2 = spca.fit(Y).transform(Y)
assert not np.all(spca_lars.components_ == 0)
assert_array_almost_equal(U1, U2)
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha, random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
def test_classifier_matching():
n_samples = 20
(X, y) = make_blobs(n_samples=n_samples, centers=2, random_state=0, cluster_std=0.1)
y[y == 0] = -1
alpha = 1.1
fit_intercept = True
if classification:
step_size = 4.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + 4.0 * alpha)
else:
step_size = 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
for solver in ['sag', 'saga']:
if solver == 'sag':
n_iter = 80
else:
n_iter = 300
clf = LogisticRegression(solver=solver, fit_intercept=fit_intercept, tol=1e-11, C=1.0 / alpha / n_samples, max_iter=n_iter, random_state=10, multi_class='ovr')
clf.fit(X, y)
if step_size * alpha == 1.0:
raise ZeroDivisionError('Sparse sag does not handle the case step_size * alpha == 1')
(n_samples, n_features) = (X.shape[0], X.shape[1])
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=int)
gradient_memory = np.zeros(n_samples)
rng = check_random_state(random_state)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
if sparse:
decay = 0.01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter - 1] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
last_updated[j] = counter
p = wscale * np.dot(entry, weights) + intercept
gradient = log_dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
gradient_correction = update - gradient_memory[idx] * entry
sum_gradient += gradient_correction
if solver == 'saga':
for j in range(n_features):
weights[j] -= gradient_correction[j] * step_size * (1 - 1.0 / len(seen)) / wscale
if fit_intercept:
gradient_correction = gradient - gradient_memory[idx]
intercept_sum_gradient += gradient_correction
gradient_correction *= step_size * (1.0 - 1.0 / len(seen))
if solver == 'saga':
intercept -= step_size * intercept_sum_gradient / len(seen) * decay + gradient_correction
else:
intercept -= step_size * intercept_sum_gradient / len(seen) * decay
gradient_memory[idx] = gradient
wscale *= 1.0 - alpha * step_size
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = c_sum[counter - 1] + step_size / (wscale * len(seen))
if counter >= 1 and wscale < 1e-09:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter - 1] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
weights *= wscale
(weights, intercept) = (weights, intercept)
(n_samples, n_features) = (X.shape[0], X.shape[1])
weights = np.zeros(X.shape[1])
sum_gradient = np.zeros(X.shape[1])
gradient_memory = np.zeros((n_samples, n_features))
intercept = 0.0
intercept_sum_gradient = 0.0
intercept_gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
decay = 1.0
seen = set()
if sparse:
decay = 0.01
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
p = np.dot(entry, weights) + intercept
gradient = log_dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient + alpha * weights
gradient_correction = update - gradient_memory[idx]
sum_gradient += gradient_correction
gradient_memory[idx] = update
if solver == 'saga':
weights -= gradient_correction * step_size * (1 - 1.0 / len(seen))
if fit_intercept:
gradient_correction = gradient - intercept_gradient_memory[idx]
intercept_gradient_memory[idx] = gradient
intercept_sum_gradient += gradient_correction
gradient_correction *= step_size * (1.0 - 1.0 / len(seen))
if solver == 'saga':
intercept -= step_size * intercept_sum_gradient / len(seen) * decay + gradient_correction
else:
intercept -= step_size * intercept_sum_gradient / len(seen) * decay
weights -= step_size * sum_gradient / len(seen)
(weights2, intercept2) = (weights, intercept)
weights = np.atleast_2d(weights)
intercept = np.atleast_1d(intercept)
weights2 = np.atleast_2d(weights2)
intercept2 = np.atleast_1d(intercept2)
assert_array_almost_equal(weights, clf.coef_, decimal=9)
assert_array_almost_equal(intercept, clf.intercept_, decimal=9)
assert_array_almost_equal(weights2, clf.coef_, decimal=9)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=9)
|
def test_classifier_matching():
n_samples = 20
(X, y) = make_blobs(n_samples=n_samples, centers=2, random_state=0, cluster_std=0.1)
y[y == 0] = -1
alpha = 1.1
fit_intercept = True
<DeepExtract>
if classification:
step_size = 4.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + 4.0 * alpha)
else:
step_size = 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
</DeepExtract>
for solver in ['sag', 'saga']:
if solver == 'sag':
n_iter = 80
else:
n_iter = 300
clf = LogisticRegression(solver=solver, fit_intercept=fit_intercept, tol=1e-11, C=1.0 / alpha / n_samples, max_iter=n_iter, random_state=10, multi_class='ovr')
clf.fit(X, y)
<DeepExtract>
if step_size * alpha == 1.0:
raise ZeroDivisionError('Sparse sag does not handle the case step_size * alpha == 1')
(n_samples, n_features) = (X.shape[0], X.shape[1])
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=int)
gradient_memory = np.zeros(n_samples)
rng = check_random_state(random_state)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
if sparse:
decay = 0.01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter - 1] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
last_updated[j] = counter
p = wscale * np.dot(entry, weights) + intercept
gradient = log_dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
gradient_correction = update - gradient_memory[idx] * entry
sum_gradient += gradient_correction
if solver == 'saga':
for j in range(n_features):
weights[j] -= gradient_correction[j] * step_size * (1 - 1.0 / len(seen)) / wscale
if fit_intercept:
gradient_correction = gradient - gradient_memory[idx]
intercept_sum_gradient += gradient_correction
gradient_correction *= step_size * (1.0 - 1.0 / len(seen))
if solver == 'saga':
intercept -= step_size * intercept_sum_gradient / len(seen) * decay + gradient_correction
else:
intercept -= step_size * intercept_sum_gradient / len(seen) * decay
gradient_memory[idx] = gradient
wscale *= 1.0 - alpha * step_size
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = c_sum[counter - 1] + step_size / (wscale * len(seen))
if counter >= 1 and wscale < 1e-09:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= (c_sum[counter - 1] - c_sum[last_updated[j] - 1]) * sum_gradient[j]
weights *= wscale
(weights, intercept) = (weights, intercept)
</DeepExtract>
<DeepExtract>
(n_samples, n_features) = (X.shape[0], X.shape[1])
weights = np.zeros(X.shape[1])
sum_gradient = np.zeros(X.shape[1])
gradient_memory = np.zeros((n_samples, n_features))
intercept = 0.0
intercept_sum_gradient = 0.0
intercept_gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
decay = 1.0
seen = set()
if sparse:
decay = 0.01
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
p = np.dot(entry, weights) + intercept
gradient = log_dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient + alpha * weights
gradient_correction = update - gradient_memory[idx]
sum_gradient += gradient_correction
gradient_memory[idx] = update
if solver == 'saga':
weights -= gradient_correction * step_size * (1 - 1.0 / len(seen))
if fit_intercept:
gradient_correction = gradient - intercept_gradient_memory[idx]
intercept_gradient_memory[idx] = gradient
intercept_sum_gradient += gradient_correction
gradient_correction *= step_size * (1.0 - 1.0 / len(seen))
if solver == 'saga':
intercept -= step_size * intercept_sum_gradient / len(seen) * decay + gradient_correction
else:
intercept -= step_size * intercept_sum_gradient / len(seen) * decay
weights -= step_size * sum_gradient / len(seen)
(weights2, intercept2) = (weights, intercept)
</DeepExtract>
weights = np.atleast_2d(weights)
intercept = np.atleast_1d(intercept)
weights2 = np.atleast_2d(weights2)
intercept2 = np.atleast_1d(intercept2)
assert_array_almost_equal(weights, clf.coef_, decimal=9)
assert_array_almost_equal(intercept, clf.intercept_, decimal=9)
assert_array_almost_equal(weights2, clf.coef_, decimal=9)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=9)
|
def _ica_par(X, tol, g, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
(s, u) = linalg.eigh(np.dot(w_init, w_init.T))
s = np.clip(s, a_min=np.finfo(w_init.dtype).tiny, a_max=None)
w_init = np.linalg.multi_dot([u * (1.0 / np.sqrt(s)), u.T, w_init])
del w_init
p_ = float(X.shape[1])
for ii in range(max_iter):
(gwtx, g_wtx) = self.fun(np.dot(W, X), **fun_args)
(s, u) = linalg.eigh(np.dot(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W, np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W.T))
s = np.clip(s, a_min=np.finfo(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W.dtype).tiny, a_max=None)
W1 = np.linalg.multi_dot([u * (1.0 / np.sqrt(s)), u.T, np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W])
del gwtx, g_wtx
lim = max(abs(abs(np.einsum('ij,ij->i', W1, W)) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn('FastICA did not converge. Consider increasing tolerance or the maximum number of iterations.', ConvergenceWarning)
return (W, ii + 1)
|
def _ica_par(X, tol, g, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
<DeepExtract>
(s, u) = linalg.eigh(np.dot(w_init, w_init.T))
s = np.clip(s, a_min=np.finfo(w_init.dtype).tiny, a_max=None)
w_init = np.linalg.multi_dot([u * (1.0 / np.sqrt(s)), u.T, w_init])
</DeepExtract>
del w_init
p_ = float(X.shape[1])
for ii in range(max_iter):
<DeepExtract>
(gwtx, g_wtx) = self.fun(np.dot(W, X), **fun_args)
</DeepExtract>
<DeepExtract>
(s, u) = linalg.eigh(np.dot(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W, np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W.T))
s = np.clip(s, a_min=np.finfo(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W.dtype).tiny, a_max=None)
W1 = np.linalg.multi_dot([u * (1.0 / np.sqrt(s)), u.T, np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W])
</DeepExtract>
del gwtx, g_wtx
lim = max(abs(abs(np.einsum('ij,ij->i', W1, W)) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn('FastICA did not converge. Consider increasing tolerance or the maximum number of iterations.', ConvergenceWarning)
return (W, ii + 1)
|
def _compute_chunked_score_samples(self, X):
n_samples = _num_samples(X)
if self._max_features == X.shape[1]:
subsample_features = False
else:
subsample_features = True
chunk_n_rows = get_chunk_n_rows(row_bytes=16 * self._max_features, max_n_rows=n_samples)
slices = gen_batches(n_samples, chunk_n_rows)
scores = np.zeros(n_samples, order='f')
for sl in slices:
n_samples = X[sl].shape[0]
depths = np.zeros(n_samples, order='f')
average_path_length_max_samples = _average_path_length([self._max_samples])
for (tree_idx, (tree, features)) in enumerate(zip(self.estimators_, self.estimators_features_)):
X_subset = X[sl][:, features] if subsample_features else X[sl]
leaves_index = tree.apply(X_subset, check_input=False)
depths += self._decision_path_lengths[tree_idx][leaves_index] + self._average_path_length_per_tree[tree_idx][leaves_index] - 1.0
denominator = len(self.estimators_) * average_path_length_max_samples
scores = 2 ** (-np.divide(depths, denominator, out=np.ones_like(depths), where=denominator != 0))
scores[sl] = scores
return scores
|
def _compute_chunked_score_samples(self, X):
n_samples = _num_samples(X)
if self._max_features == X.shape[1]:
subsample_features = False
else:
subsample_features = True
chunk_n_rows = get_chunk_n_rows(row_bytes=16 * self._max_features, max_n_rows=n_samples)
slices = gen_batches(n_samples, chunk_n_rows)
scores = np.zeros(n_samples, order='f')
for sl in slices:
<DeepExtract>
n_samples = X[sl].shape[0]
depths = np.zeros(n_samples, order='f')
average_path_length_max_samples = _average_path_length([self._max_samples])
for (tree_idx, (tree, features)) in enumerate(zip(self.estimators_, self.estimators_features_)):
X_subset = X[sl][:, features] if subsample_features else X[sl]
leaves_index = tree.apply(X_subset, check_input=False)
depths += self._decision_path_lengths[tree_idx][leaves_index] + self._average_path_length_per_tree[tree_idx][leaves_index] - 1.0
denominator = len(self.estimators_) * average_path_length_max_samples
scores = 2 ** (-np.divide(depths, denominator, out=np.ones_like(depths), where=denominator != 0))
scores[sl] = scores
</DeepExtract>
return scores
|
def check_cv_coverage(cv, X, y, groups, expected_n_splits):
n_samples = _num_samples(X)
assert cv.get_n_splits(X, y, groups) == expected_n_splits
collected_test_samples = set()
iterations = 0
for (train, test) in cv.split(X, y, groups):
(train, test) = (set(train), set(test))
assert train.intersection(test) == set()
if n_samples is not None:
assert train.union(test) == set(range(n_samples))
iterations += 1
collected_test_samples.update(test)
assert iterations == expected_n_splits
if n_samples is not None:
assert collected_test_samples == set(range(n_samples))
|
def check_cv_coverage(cv, X, y, groups, expected_n_splits):
n_samples = _num_samples(X)
assert cv.get_n_splits(X, y, groups) == expected_n_splits
collected_test_samples = set()
iterations = 0
for (train, test) in cv.split(X, y, groups):
<DeepExtract>
(train, test) = (set(train), set(test))
assert train.intersection(test) == set()
if n_samples is not None:
assert train.union(test) == set(range(n_samples))
</DeepExtract>
iterations += 1
collected_test_samples.update(test)
assert iterations == expected_n_splits
if n_samples is not None:
assert collected_test_samples == set(range(n_samples))
|
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
n_features = (8, 8)[0] * (8, 8)[1]
rng = check_random_state(rng)
U = rng.randn(10, 3)
V = rng.randn(3, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(3):
img = np.zeros((8, 8))
(xmin, xmax) = (centers[k][0] - sz[k], centers[k][0] + sz[k])
(ymin, ymax) = (centers[k][1] - sz[k], centers[k][1] + sz[k])
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1])
(Y, _, _) = (Y, U, V)
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha, random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha, random_state=0).fit(Y)
U2 = spca.transform(Y)
assert not np.all(spca_lars.components_ == 0)
assert_array_almost_equal(U1, U2)
|
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
<DeepExtract>
n_features = (8, 8)[0] * (8, 8)[1]
rng = check_random_state(rng)
U = rng.randn(10, 3)
V = rng.randn(3, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(3):
img = np.zeros((8, 8))
(xmin, xmax) = (centers[k][0] - sz[k], centers[k][0] + sz[k])
(ymin, ymax) = (centers[k][1] - sz[k], centers[k][1] + sz[k])
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1])
(Y, _, _) = (Y, U, V)
</DeepExtract>
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha, random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha, random_state=0).fit(Y)
U2 = spca.transform(Y)
assert not np.all(spca_lars.components_ == 0)
assert_array_almost_equal(U1, U2)
|
@fails_if_pypy
@pytest.mark.parametrize('parser', ['liac-arff', 'pandas'])
@pytest.mark.parametrize('target_column', ['petalwidth', ['petalwidth', 'petallength']])
def test_fetch_openml_forcing_targets(monkeypatch, parser, target_column):
"""Check that we can force the target to not be the default target."""
pd = pytest.importorskip('pandas')
data_id = 61
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
bunch_forcing_target = fetch_openml(data_id=data_id, as_frame=True, cache=False, target_column=target_column, parser=parser)
bunch_default = fetch_openml(data_id=data_id, as_frame=True, cache=False, parser=parser)
pd.testing.assert_frame_equal(bunch_forcing_target.frame, bunch_default.frame)
if isinstance(target_column, list):
pd.testing.assert_index_equal(bunch_forcing_target.target.columns, pd.Index(target_column))
assert bunch_forcing_target.data.shape == (150, 3)
else:
assert bunch_forcing_target.target.name == target_column
assert bunch_forcing_target.data.shape == (150, 4)
|
@fails_if_pypy
@pytest.mark.parametrize('parser', ['liac-arff', 'pandas'])
@pytest.mark.parametrize('target_column', ['petalwidth', ['petalwidth', 'petallength']])
def test_fetch_openml_forcing_targets(monkeypatch, parser, target_column):
"""Check that we can force the target to not be the default target."""
pd = pytest.importorskip('pandas')
data_id = 61
<DeepExtract>
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
</DeepExtract>
bunch_forcing_target = fetch_openml(data_id=data_id, as_frame=True, cache=False, target_column=target_column, parser=parser)
bunch_default = fetch_openml(data_id=data_id, as_frame=True, cache=False, parser=parser)
pd.testing.assert_frame_equal(bunch_forcing_target.frame, bunch_default.frame)
if isinstance(target_column, list):
pd.testing.assert_index_equal(bunch_forcing_target.target.columns, pd.Index(target_column))
assert bunch_forcing_target.data.shape == (150, 3)
else:
assert bunch_forcing_target.target.name == target_column
assert bunch_forcing_target.data.shape == (150, 4)
|
def test_ignore_warning():
def _warning_function():
warnings.warn('deprecation warning', DeprecationWarning)
def _multiple_warning_function():
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
assert_no_warnings(ignore_warnings(_warning_function))
assert_no_warnings(ignore_warnings(_warning_function, category=DeprecationWarning))
with pytest.warns(DeprecationWarning):
ignore_warnings(_warning_function, category=UserWarning)()
with pytest.warns(UserWarning):
ignore_warnings(_multiple_warning_function, category=FutureWarning)()
with pytest.warns(DeprecationWarning):
ignore_warnings(_multiple_warning_function, category=UserWarning)()
assert_no_warnings(ignore_warnings(_warning_function, category=(DeprecationWarning, UserWarning)))
@ignore_warnings
def decorator_no_warning():
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def decorator_no_warning_multiple():
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_warning():
warnings.warn('deprecation warning', DeprecationWarning)
@ignore_warnings(category=UserWarning)
def decorator_no_user_warning():
warnings.warn('deprecation warning', DeprecationWarning)
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_multiple_warning():
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
@ignore_warnings(category=UserWarning)
def decorator_no_user_multiple_warning():
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
assert_no_warnings(decorator_no_warning)
assert_no_warnings(decorator_no_warning_multiple)
assert_no_warnings(decorator_no_deprecation_warning)
with pytest.warns(DeprecationWarning):
_warning_function()
with pytest.warns(UserWarning):
_multiple_warning_function()
with pytest.warns(DeprecationWarning):
_multiple_warning_function()
def context_manager_no_warning():
with ignore_warnings():
warnings.warn('deprecation warning', DeprecationWarning)
def context_manager_no_warning_multiple():
with ignore_warnings(category=(DeprecationWarning, UserWarning)):
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
def context_manager_no_deprecation_warning():
with ignore_warnings(category=DeprecationWarning):
warnings.warn('deprecation warning', DeprecationWarning)
def context_manager_no_user_warning():
with ignore_warnings(category=UserWarning):
warnings.warn('deprecation warning', DeprecationWarning)
def context_manager_no_deprecation_multiple_warning():
with ignore_warnings(category=DeprecationWarning):
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
def context_manager_no_user_multiple_warning():
with ignore_warnings(category=UserWarning):
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
assert_no_warnings(context_manager_no_warning)
assert_no_warnings(context_manager_no_warning_multiple)
assert_no_warnings(context_manager_no_deprecation_warning)
with pytest.warns(DeprecationWarning):
with ignore_warnings(category=UserWarning):
_warning_function()
with pytest.warns(UserWarning):
with ignore_warnings(category=DeprecationWarning):
_multiple_warning_function()
with pytest.warns(DeprecationWarning):
with ignore_warnings(category=UserWarning):
_multiple_warning_function()
warning_class = UserWarning
match = "'obj' should be a callable.+you should use 'category=UserWarning'"
with pytest.raises(ValueError, match=match):
silence_warnings_func = ignore_warnings(warning_class)(_warning_function)
silence_warnings_func()
with pytest.raises(ValueError, match=match):
@ignore_warnings(warning_class)
def test():
pass
|
def test_ignore_warning():
def _warning_function():
warnings.warn('deprecation warning', DeprecationWarning)
def _multiple_warning_function():
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
assert_no_warnings(ignore_warnings(_warning_function))
assert_no_warnings(ignore_warnings(_warning_function, category=DeprecationWarning))
with pytest.warns(DeprecationWarning):
ignore_warnings(_warning_function, category=UserWarning)()
with pytest.warns(UserWarning):
ignore_warnings(_multiple_warning_function, category=FutureWarning)()
with pytest.warns(DeprecationWarning):
ignore_warnings(_multiple_warning_function, category=UserWarning)()
assert_no_warnings(ignore_warnings(_warning_function, category=(DeprecationWarning, UserWarning)))
@ignore_warnings
def decorator_no_warning():
<DeepExtract>
warnings.warn('deprecation warning', DeprecationWarning)
</DeepExtract>
<DeepExtract>
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
</DeepExtract>
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def decorator_no_warning_multiple():
<DeepExtract>
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
</DeepExtract>
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_warning():
<DeepExtract>
warnings.warn('deprecation warning', DeprecationWarning)
</DeepExtract>
@ignore_warnings(category=UserWarning)
def decorator_no_user_warning():
<DeepExtract>
warnings.warn('deprecation warning', DeprecationWarning)
</DeepExtract>
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_multiple_warning():
<DeepExtract>
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
</DeepExtract>
@ignore_warnings(category=UserWarning)
def decorator_no_user_multiple_warning():
<DeepExtract>
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
</DeepExtract>
assert_no_warnings(decorator_no_warning)
assert_no_warnings(decorator_no_warning_multiple)
assert_no_warnings(decorator_no_deprecation_warning)
with pytest.warns(DeprecationWarning):
<DeepExtract>
_warning_function()
</DeepExtract>
with pytest.warns(UserWarning):
<DeepExtract>
_multiple_warning_function()
</DeepExtract>
with pytest.warns(DeprecationWarning):
<DeepExtract>
_multiple_warning_function()
</DeepExtract>
def context_manager_no_warning():
with ignore_warnings():
<DeepExtract>
warnings.warn('deprecation warning', DeprecationWarning)
</DeepExtract>
def context_manager_no_warning_multiple():
with ignore_warnings(category=(DeprecationWarning, UserWarning)):
<DeepExtract>
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
</DeepExtract>
def context_manager_no_deprecation_warning():
with ignore_warnings(category=DeprecationWarning):
<DeepExtract>
warnings.warn('deprecation warning', DeprecationWarning)
</DeepExtract>
def context_manager_no_user_warning():
with ignore_warnings(category=UserWarning):
<DeepExtract>
warnings.warn('deprecation warning', DeprecationWarning)
</DeepExtract>
def context_manager_no_deprecation_multiple_warning():
with ignore_warnings(category=DeprecationWarning):
<DeepExtract>
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
</DeepExtract>
def context_manager_no_user_multiple_warning():
with ignore_warnings(category=UserWarning):
<DeepExtract>
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
</DeepExtract>
assert_no_warnings(context_manager_no_warning)
assert_no_warnings(context_manager_no_warning_multiple)
assert_no_warnings(context_manager_no_deprecation_warning)
with pytest.warns(DeprecationWarning):
<DeepExtract>
with ignore_warnings(category=UserWarning):
_warning_function()
</DeepExtract>
with pytest.warns(UserWarning):
<DeepExtract>
with ignore_warnings(category=DeprecationWarning):
_multiple_warning_function()
</DeepExtract>
with pytest.warns(DeprecationWarning):
<DeepExtract>
with ignore_warnings(category=UserWarning):
_multiple_warning_function()
</DeepExtract>
warning_class = UserWarning
match = "'obj' should be a callable.+you should use 'category=UserWarning'"
with pytest.raises(ValueError, match=match):
silence_warnings_func = ignore_warnings(warning_class)(_warning_function)
silence_warnings_func()
with pytest.raises(ValueError, match=match):
@ignore_warnings(warning_class)
def test():
pass
|
@wraps(f)
def wrapper(*args, **kw):
if data_home is None:
return f(*args, **kw)
try:
return f(*args, **kw)
except URLError:
raise
except Exception as exc:
if no_retry_exception is not None and isinstance(exc, no_retry_exception):
raise
warn('Invalid cache, redownloading file', RuntimeWarning)
local_path = os.path.join(data_home, 'openml.org', openml_path + '.gz')
if os.path.exists(local_path):
os.unlink(local_path)
return f(*args, **kw)
|
@wraps(f)
def wrapper(*args, **kw):
if data_home is None:
return f(*args, **kw)
try:
return f(*args, **kw)
except URLError:
raise
except Exception as exc:
if no_retry_exception is not None and isinstance(exc, no_retry_exception):
raise
warn('Invalid cache, redownloading file', RuntimeWarning)
<DeepExtract>
local_path = os.path.join(data_home, 'openml.org', openml_path + '.gz')
</DeepExtract>
if os.path.exists(local_path):
os.unlink(local_path)
return f(*args, **kw)
|
def test_scaling_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
n_features = (8, 8)[0] * (8, 8)[1]
rng = check_random_state(rng)
U = rng.randn(1000, 3)
V = rng.randn(3, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(3):
img = np.zeros((8, 8))
(xmin, xmax) = (centers[k][0] - sz[k], centers[k][0] + sz[k])
(ymin, ymax) = (centers[k][1] - sz[k], centers[k][1] + sz[k])
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1])
(Y, _, _) = (Y, U, V)
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha, random_state=rng)
results_train = spca_lars.fit_transform(Y)
results_test = spca_lars.transform(Y[:10])
assert_allclose(results_train[0], results_test[0])
|
def test_scaling_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
<DeepExtract>
n_features = (8, 8)[0] * (8, 8)[1]
rng = check_random_state(rng)
U = rng.randn(1000, 3)
V = rng.randn(3, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(3):
img = np.zeros((8, 8))
(xmin, xmax) = (centers[k][0] - sz[k], centers[k][0] + sz[k])
(ymin, ymax) = (centers[k][1] - sz[k], centers[k][1] + sz[k])
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1])
(Y, _, _) = (Y, U, V)
</DeepExtract>
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha, random_state=rng)
results_train = spca_lars.fit_transform(Y)
results_test = spca_lars.transform(Y[:10])
assert_allclose(results_train[0], results_test[0])
|
def fit(self, X, y=None, sample_weight=None):
"""Compute k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory
copy if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
.. versionadded:: 0.20
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
X = self._validate_data(X, accept_sparse='csr', dtype=[np.float64, np.float32], order='C', copy=self.copy_x, accept_large_sparse=False)
if X.shape[0] < self.n_clusters:
raise ValueError(f'n_samples={X.shape[0]} should be >= n_clusters={self.n_clusters}.')
self._tol = _tolerance(X, self.tol)
self._n_init = self.n_init
if self._n_init == 'warn':
warnings.warn(f"The default value of `n_init` will change from {default_n_init} to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning", FutureWarning)
self._n_init = default_n_init
if self._n_init == 'auto':
if self.init == 'k-means++':
self._n_init = 1
else:
self._n_init = default_n_init
if _is_arraylike_not_scalar(self.init) and self._n_init != 1:
warnings.warn(f'Explicit initial center position passed: performing only one init in {self.__class__.__name__} instead of n_init={self._n_init}.', RuntimeWarning, stacklevel=2)
self._n_init = 1
random_state = check_random_state(self.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._n_threads = _openmp_effective_n_threads()
init = self.init
init_is_array_like = _is_arraylike_not_scalar(init)
if init_is_array_like:
init = check_array(init, dtype=X.dtype, copy=True, order='C')
if init.shape[0] != self.n_clusters:
raise ValueError(f'The shape of the initial centers {init.shape} does not match the number of clusters {self.n_clusters}.')
if init.shape[1] != X.shape[1]:
raise ValueError(f'The shape of the initial centers {init.shape} does not match the number of features of the data {X.shape[1]}.')
if not sp.issparse(X):
X_mean = X.mean(axis=0)
X -= X_mean
if init_is_array_like:
init -= X_mean
x_squared_norms = row_norms(X, squared=True)
if self._algorithm == 'elkan':
kmeans_single = _kmeans_single_elkan
else:
kmeans_single = _kmeans_single_lloyd
if sp.issparse(X):
return
n_active_threads = int(np.ceil(X.shape[0] / CHUNK_SIZE))
if n_active_threads < self._n_threads:
modules = threadpool_info()
has_vcomp = 'vcomp' in [module['prefix'] for module in modules]
has_mkl = ('mkl', 'intel') in [(module['internal_api'], module.get('threading_layer', None)) for module in modules]
if has_vcomp and has_mkl:
self._warn_mkl_vcomp(n_active_threads)
(best_inertia, best_labels) = (None, None)
for i in range(self._n_init):
n_samples = X.shape[0]
n_clusters = self.n_clusters if n_centroids is None else n_centroids
if init_size is not None and init_size < n_samples:
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
sample_weight = sample_weight[init_indices]
if isinstance(init, str) and init == 'k-means++':
(centers, _) = _kmeans_plusplus(X, n_clusters, random_state=random_state, x_squared_norms=x_squared_norms, sample_weight=sample_weight)
elif isinstance(init, str) and init == 'random':
seeds = random_state.choice(n_samples, size=n_clusters, replace=False, p=sample_weight / sample_weight.sum())
centers = X[seeds]
elif _is_arraylike_not_scalar(self.init):
centers = init
elif callable(init):
centers = init(X, n_clusters, random_state=random_state)
centers = check_array(centers, dtype=X.dtype, copy=False, order='C')
self._validate_center_shape(X, centers)
if sp.issparse(centers):
centers = centers.toarray()
centers_init = centers
if self.verbose:
print('Initialization complete')
(labels, inertia, centers, n_iter_) = kmeans_single(X, sample_weight, centers_init, max_iter=self.max_iter, verbose=self.verbose, tol=self._tol, n_threads=self._n_threads)
if best_inertia is None or (inertia < best_inertia and (not _is_same_clustering(labels, best_labels, self.n_clusters))):
best_labels = labels
best_centers = centers
best_inertia = inertia
best_n_iter = n_iter_
if not sp.issparse(X):
if not self.copy_x:
X += X_mean
best_centers += X_mean
distinct_clusters = len(set(best_labels))
if distinct_clusters < self.n_clusters:
warnings.warn('Number of distinct clusters ({}) found smaller than n_clusters ({}). Possibly due to duplicate points in X.'.format(distinct_clusters, self.n_clusters), ConvergenceWarning, stacklevel=2)
self.cluster_centers_ = best_centers
self._n_features_out = self.cluster_centers_.shape[0]
self.labels_ = best_labels
self.inertia_ = best_inertia
self.n_iter_ = best_n_iter
return self
|
def fit(self, X, y=None, sample_weight=None):
"""Compute k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory
copy if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
.. versionadded:: 0.20
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
X = self._validate_data(X, accept_sparse='csr', dtype=[np.float64, np.float32], order='C', copy=self.copy_x, accept_large_sparse=False)
<DeepExtract>
if X.shape[0] < self.n_clusters:
raise ValueError(f'n_samples={X.shape[0]} should be >= n_clusters={self.n_clusters}.')
self._tol = _tolerance(X, self.tol)
self._n_init = self.n_init
if self._n_init == 'warn':
warnings.warn(f"The default value of `n_init` will change from {default_n_init} to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning", FutureWarning)
self._n_init = default_n_init
if self._n_init == 'auto':
if self.init == 'k-means++':
self._n_init = 1
else:
self._n_init = default_n_init
if _is_arraylike_not_scalar(self.init) and self._n_init != 1:
warnings.warn(f'Explicit initial center position passed: performing only one init in {self.__class__.__name__} instead of n_init={self._n_init}.', RuntimeWarning, stacklevel=2)
self._n_init = 1
</DeepExtract>
random_state = check_random_state(self.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._n_threads = _openmp_effective_n_threads()
init = self.init
init_is_array_like = _is_arraylike_not_scalar(init)
if init_is_array_like:
init = check_array(init, dtype=X.dtype, copy=True, order='C')
<DeepExtract>
if init.shape[0] != self.n_clusters:
raise ValueError(f'The shape of the initial centers {init.shape} does not match the number of clusters {self.n_clusters}.')
if init.shape[1] != X.shape[1]:
raise ValueError(f'The shape of the initial centers {init.shape} does not match the number of features of the data {X.shape[1]}.')
</DeepExtract>
if not sp.issparse(X):
X_mean = X.mean(axis=0)
X -= X_mean
if init_is_array_like:
init -= X_mean
x_squared_norms = row_norms(X, squared=True)
if self._algorithm == 'elkan':
kmeans_single = _kmeans_single_elkan
else:
kmeans_single = _kmeans_single_lloyd
<DeepExtract>
if sp.issparse(X):
return
n_active_threads = int(np.ceil(X.shape[0] / CHUNK_SIZE))
if n_active_threads < self._n_threads:
modules = threadpool_info()
has_vcomp = 'vcomp' in [module['prefix'] for module in modules]
has_mkl = ('mkl', 'intel') in [(module['internal_api'], module.get('threading_layer', None)) for module in modules]
if has_vcomp and has_mkl:
self._warn_mkl_vcomp(n_active_threads)
</DeepExtract>
(best_inertia, best_labels) = (None, None)
for i in range(self._n_init):
<DeepExtract>
n_samples = X.shape[0]
n_clusters = self.n_clusters if n_centroids is None else n_centroids
if init_size is not None and init_size < n_samples:
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
sample_weight = sample_weight[init_indices]
if isinstance(init, str) and init == 'k-means++':
(centers, _) = _kmeans_plusplus(X, n_clusters, random_state=random_state, x_squared_norms=x_squared_norms, sample_weight=sample_weight)
elif isinstance(init, str) and init == 'random':
seeds = random_state.choice(n_samples, size=n_clusters, replace=False, p=sample_weight / sample_weight.sum())
centers = X[seeds]
elif _is_arraylike_not_scalar(self.init):
centers = init
elif callable(init):
centers = init(X, n_clusters, random_state=random_state)
centers = check_array(centers, dtype=X.dtype, copy=False, order='C')
self._validate_center_shape(X, centers)
if sp.issparse(centers):
centers = centers.toarray()
centers_init = centers
</DeepExtract>
if self.verbose:
print('Initialization complete')
(labels, inertia, centers, n_iter_) = kmeans_single(X, sample_weight, centers_init, max_iter=self.max_iter, verbose=self.verbose, tol=self._tol, n_threads=self._n_threads)
if best_inertia is None or (inertia < best_inertia and (not _is_same_clustering(labels, best_labels, self.n_clusters))):
best_labels = labels
best_centers = centers
best_inertia = inertia
best_n_iter = n_iter_
if not sp.issparse(X):
if not self.copy_x:
X += X_mean
best_centers += X_mean
distinct_clusters = len(set(best_labels))
if distinct_clusters < self.n_clusters:
warnings.warn('Number of distinct clusters ({}) found smaller than n_clusters ({}). Possibly due to duplicate points in X.'.format(distinct_clusters, self.n_clusters), ConvergenceWarning, stacklevel=2)
self.cluster_centers_ = best_centers
self._n_features_out = self.cluster_centers_.shape[0]
self.labels_ = best_labels
self.inertia_ = best_inertia
self.n_iter_ = best_n_iter
return self
|
def apportion(v, default_ancestor, distance):
w = v.lbrother()
if w is not None:
vir = vor = v
vil = w
vol = v.lmost_sibling
sir = sor = v.mod
sil = vil.mod
sol = vol.mod
while vil.right() and vir.left():
vil = vil.right()
vir = vir.left()
vol = vol.left()
vor = vor.right()
vor.ancestor = v
shift = vil.x + sil - (vir.x + sir) + distance
if shift > 0:
subtrees = v.number - ancestor(vil, v, default_ancestor).number
v.change -= shift / subtrees
v.shift += shift
ancestor(vil, v, default_ancestor).change += shift / subtrees
v.x += shift
v.mod += shift
sir = sir + shift
sor = sor + shift
sil += vil.mod
sir += vir.mod
sol += vol.mod
sor += vor.mod
if vil.right() and (not vor.right()):
vor.thread = vil.right()
vor.mod += sil - sor
else:
if vir.left() and (not vol.left()):
vol.thread = vir.left()
vol.mod += sir - sol
default_ancestor = v
return default_ancestor
|
def apportion(v, default_ancestor, distance):
w = v.lbrother()
if w is not None:
vir = vor = v
vil = w
vol = v.lmost_sibling
sir = sor = v.mod
sil = vil.mod
sol = vol.mod
while vil.right() and vir.left():
vil = vil.right()
vir = vir.left()
vol = vol.left()
vor = vor.right()
vor.ancestor = v
shift = vil.x + sil - (vir.x + sir) + distance
if shift > 0:
<DeepExtract>
subtrees = v.number - ancestor(vil, v, default_ancestor).number
v.change -= shift / subtrees
v.shift += shift
ancestor(vil, v, default_ancestor).change += shift / subtrees
v.x += shift
v.mod += shift
</DeepExtract>
sir = sir + shift
sor = sor + shift
sil += vil.mod
sir += vir.mod
sol += vol.mod
sor += vor.mod
if vil.right() and (not vor.right()):
vor.thread = vil.right()
vor.mod += sil - sor
else:
if vir.left() and (not vol.left()):
vol.thread = vir.left()
vol.mod += sir - sol
default_ancestor = v
return default_ancestor
|
def track_same_transform(self, *args):
path = Path(__file__).resolve().parent / 'cache'
path = path / 'estimators' / Benchmark.base_commit if True else path / 'tmp'
filename = self.__class__.__name__ + '_estimator_' + '_'.join(list(map(str, args))) + '.pkl'
est_path = path / filename
with est_path.open(mode='rb') as f:
estimator_base = pickle.load(f)
X_val_t_base = estimator_base.transform(self.X_val)
X_val_t = self.estimator.transform(self.X_val)
return np.allclose(X_val_t_base, X_val_t)
|
def track_same_transform(self, *args):
<DeepExtract>
path = Path(__file__).resolve().parent / 'cache'
path = path / 'estimators' / Benchmark.base_commit if True else path / 'tmp'
filename = self.__class__.__name__ + '_estimator_' + '_'.join(list(map(str, args))) + '.pkl'
est_path = path / filename
</DeepExtract>
with est_path.open(mode='rb') as f:
estimator_base = pickle.load(f)
X_val_t_base = estimator_base.transform(self.X_val)
X_val_t = self.estimator.transform(self.X_val)
return np.allclose(X_val_t_base, X_val_t)
|
def test_sgd_oneclass():
X_train = np.array([[-2, -1], [-1, -1], [1, 1]])
X_test = np.array([[0.5, -2], [2, 2]])
_update_kwargs(kwargs)
clf = linear_model.SGDOneClassSVM(**kwargs)
clf.fit(X_train)
assert_allclose(clf.coef_, np.array([-0.125, 0.4375]))
assert clf.offset_[0] == -0.5
scores = clf.score_samples(X_test)
assert_allclose(scores, np.array([-0.9375, 0.625]))
dec = clf.score_samples(X_test) - clf.offset_
assert_allclose(clf.decision_function(X_test), dec)
pred = clf.predict(X_test)
assert_array_equal(pred, np.array([-1, 1]))
|
def test_sgd_oneclass():
X_train = np.array([[-2, -1], [-1, -1], [1, 1]])
X_test = np.array([[0.5, -2], [2, 2]])
<DeepExtract>
_update_kwargs(kwargs)
clf = linear_model.SGDOneClassSVM(**kwargs)
</DeepExtract>
clf.fit(X_train)
assert_allclose(clf.coef_, np.array([-0.125, 0.4375]))
assert clf.offset_[0] == -0.5
scores = clf.score_samples(X_test)
assert_allclose(scores, np.array([-0.9375, 0.625]))
dec = clf.score_samples(X_test) - clf.offset_
assert_allclose(clf.decision_function(X_test), dec)
pred = clf.predict(X_test)
assert_array_equal(pred, np.array([-1, 1]))
|
@validate_params({'y_true': ['array-like', 'sparse matrix'], 'y_pred': ['array-like', 'sparse matrix'], 'labels': ['array-like', None], 'pos_label': [Real, str, 'boolean', None], 'average': [StrOptions({'micro', 'macro', 'samples', 'weighted', 'binary'}), None], 'sample_weight': ['array-like', None], 'zero_division': [Options(Real, {0.0, 1.0, np.nan}), StrOptions({'warn'})]})
def precision_score(y_true, y_pred, *, labels=None, pos_label=1, average='binary', sample_weight=None, zero_division='warn'):
"""Compute the precision.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
Parameter `labels` improved for multiclass problem.
pos_label : int, float, bool or str, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None, default='binary'
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : {"warn", 0.0, 1.0, np.nan}, default="warn"
Sets the value to return when there is a zero division.
Notes:
- If set to "warn", this acts like 0, but a warning is also raised.
- If set to `np.nan`, such values will be excluded from the average.
.. versionadded:: 1.3
`np.nan` option was added.
Returns
-------
precision : float (if average is not None) or array of float of shape (n_unique_labels,)
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
See Also
--------
precision_recall_fscore_support : Compute precision, recall, F-measure and
support for each class.
recall_score : Compute the ratio ``tp / (tp + fn)`` where ``tp`` is the
number of true positives and ``fn`` the number of false negatives.
PrecisionRecallDisplay.from_estimator : Plot precision-recall curve given
an estimator and some data.
PrecisionRecallDisplay.from_predictions : Plot precision-recall curve given
binary class predictions.
multilabel_confusion_matrix : Compute a confusion matrix for each class or
sample.
Notes
-----
When ``true positive + false positive == 0``, precision returns 0 and
raises ``UndefinedMetricWarning``. This behavior can be
modified with ``zero_division``.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro')
0.22...
>>> precision_score(y_true, y_pred, average='micro')
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
0.22...
>>> precision_score(y_true, y_pred, average=None)
array([0.66..., 0. , 0. ])
>>> y_pred = [0, 0, 0, 0, 0, 0]
>>> precision_score(y_true, y_pred, average=None)
array([0.33..., 0. , 0. ])
>>> precision_score(y_true, y_pred, average=None, zero_division=1)
array([0.33..., 1. , 1. ])
>>> precision_score(y_true, y_pred, average=None, zero_division=np.nan)
array([0.33..., nan, nan])
>>> # multilabel classification
>>> y_true = [[0, 0, 0], [1, 1, 1], [0, 1, 1]]
>>> y_pred = [[0, 0, 0], [1, 1, 1], [1, 1, 0]]
>>> precision_score(y_true, y_pred, average=None)
array([0.5, 1. , 1. ])
"""
zero_division_value = _check_zero_division(zero_division)
labels = _check_set_wise_labels(y_true, y_pred, average, labels, pos_label)
samplewise = average == 'samples'
MCM = multilabel_confusion_matrix(y_true, y_pred, sample_weight=sample_weight, labels=labels, samplewise=samplewise)
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
beta2 = beta ** 2
precision = _prf_divide(tp_sum, pred_sum, 'precision', 'predicted', average, ('precision',), zero_division)
recall = _prf_divide(tp_sum, true_sum, 'recall', 'true', average, ('precision',), zero_division)
if zero_division == 'warn' and ('f-score',) == ('precision',):
if (pred_sum[true_sum == 0] == 0).any():
_warn_prf(average, 'true nor predicted', 'F-score is', len(true_sum))
if np.isposinf(beta):
f_score = recall
elif beta == 0:
f_score = precision
else:
denom = beta2 * precision + recall
mask = np.isclose(denom, 0) | np.isclose(pred_sum + true_sum, 0)
denom[mask] = 1
f_score = (1 + beta2) * precision * recall / denom
f_score[mask] = zero_division_value
if average == 'weighted':
weights = true_sum
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = _nanaverage(precision, weights=weights)
recall = _nanaverage(recall, weights=weights)
f_score = _nanaverage(f_score, weights=weights)
true_sum = None
(p, _, _, _) = (precision, recall, f_score, true_sum)
return p
|
@validate_params({'y_true': ['array-like', 'sparse matrix'], 'y_pred': ['array-like', 'sparse matrix'], 'labels': ['array-like', None], 'pos_label': [Real, str, 'boolean', None], 'average': [StrOptions({'micro', 'macro', 'samples', 'weighted', 'binary'}), None], 'sample_weight': ['array-like', None], 'zero_division': [Options(Real, {0.0, 1.0, np.nan}), StrOptions({'warn'})]})
def precision_score(y_true, y_pred, *, labels=None, pos_label=1, average='binary', sample_weight=None, zero_division='warn'):
"""Compute the precision.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
Parameter `labels` improved for multiclass problem.
pos_label : int, float, bool or str, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None, default='binary'
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : {"warn", 0.0, 1.0, np.nan}, default="warn"
Sets the value to return when there is a zero division.
Notes:
- If set to "warn", this acts like 0, but a warning is also raised.
- If set to `np.nan`, such values will be excluded from the average.
.. versionadded:: 1.3
`np.nan` option was added.
Returns
-------
precision : float (if average is not None) or array of float of shape (n_unique_labels,)
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
See Also
--------
precision_recall_fscore_support : Compute precision, recall, F-measure and
support for each class.
recall_score : Compute the ratio ``tp / (tp + fn)`` where ``tp`` is the
number of true positives and ``fn`` the number of false negatives.
PrecisionRecallDisplay.from_estimator : Plot precision-recall curve given
an estimator and some data.
PrecisionRecallDisplay.from_predictions : Plot precision-recall curve given
binary class predictions.
multilabel_confusion_matrix : Compute a confusion matrix for each class or
sample.
Notes
-----
When ``true positive + false positive == 0``, precision returns 0 and
raises ``UndefinedMetricWarning``. This behavior can be
modified with ``zero_division``.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro')
0.22...
>>> precision_score(y_true, y_pred, average='micro')
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
0.22...
>>> precision_score(y_true, y_pred, average=None)
array([0.66..., 0. , 0. ])
>>> y_pred = [0, 0, 0, 0, 0, 0]
>>> precision_score(y_true, y_pred, average=None)
array([0.33..., 0. , 0. ])
>>> precision_score(y_true, y_pred, average=None, zero_division=1)
array([0.33..., 1. , 1. ])
>>> precision_score(y_true, y_pred, average=None, zero_division=np.nan)
array([0.33..., nan, nan])
>>> # multilabel classification
>>> y_true = [[0, 0, 0], [1, 1, 1], [0, 1, 1]]
>>> y_pred = [[0, 0, 0], [1, 1, 1], [1, 1, 0]]
>>> precision_score(y_true, y_pred, average=None)
array([0.5, 1. , 1. ])
"""
<DeepExtract>
zero_division_value = _check_zero_division(zero_division)
labels = _check_set_wise_labels(y_true, y_pred, average, labels, pos_label)
samplewise = average == 'samples'
MCM = multilabel_confusion_matrix(y_true, y_pred, sample_weight=sample_weight, labels=labels, samplewise=samplewise)
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
beta2 = beta ** 2
precision = _prf_divide(tp_sum, pred_sum, 'precision', 'predicted', average, ('precision',), zero_division)
recall = _prf_divide(tp_sum, true_sum, 'recall', 'true', average, ('precision',), zero_division)
if zero_division == 'warn' and ('f-score',) == ('precision',):
if (pred_sum[true_sum == 0] == 0).any():
_warn_prf(average, 'true nor predicted', 'F-score is', len(true_sum))
if np.isposinf(beta):
f_score = recall
elif beta == 0:
f_score = precision
else:
denom = beta2 * precision + recall
mask = np.isclose(denom, 0) | np.isclose(pred_sum + true_sum, 0)
denom[mask] = 1
f_score = (1 + beta2) * precision * recall / denom
f_score[mask] = zero_division_value
if average == 'weighted':
weights = true_sum
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = _nanaverage(precision, weights=weights)
recall = _nanaverage(recall, weights=weights)
f_score = _nanaverage(f_score, weights=weights)
true_sum = None
(p, _, _, _) = (precision, recall, f_score, true_sum)
</DeepExtract>
return p
|
def __init__(self, exprs, savelist=True):
super(And, self).__init__(exprs, savelist)
self.mayReturnEmpty = all((e.mayReturnEmpty for e in self.exprs))
self.skipWhitespace = True
self.whiteChars = self.exprs[0].whiteChars
self.copyDefaultWhiteChars = False
return self
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
|
def __init__(self, exprs, savelist=True):
super(And, self).__init__(exprs, savelist)
self.mayReturnEmpty = all((e.mayReturnEmpty for e in self.exprs))
<DeepExtract>
self.skipWhitespace = True
self.whiteChars = self.exprs[0].whiteChars
self.copyDefaultWhiteChars = False
return self
</DeepExtract>
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
|
def test_predict_2_classes():
n_samples = len(Y1)
classes = np.unique(Y1)
n_classes = classes.shape[0]
predicted = LogisticRegression(random_state=0).fit(X, Y1).predict(X)
assert_array_equal(LogisticRegression(random_state=0).classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, Y1)
probabilities = LogisticRegression(random_state=0).predict_proba(X)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), Y1)
n_samples = len(Y1)
classes = np.unique(Y1)
n_classes = classes.shape[0]
predicted = LogisticRegression(random_state=0).fit(X_sp, Y1).predict(X_sp)
assert_array_equal(LogisticRegression(random_state=0).classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, Y1)
probabilities = LogisticRegression(random_state=0).predict_proba(X_sp)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), Y1)
n_samples = len(Y1)
classes = np.unique(Y1)
n_classes = classes.shape[0]
predicted = LogisticRegression(C=100, random_state=0).fit(X, Y1).predict(X)
assert_array_equal(LogisticRegression(C=100, random_state=0).classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, Y1)
probabilities = LogisticRegression(C=100, random_state=0).predict_proba(X)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), Y1)
n_samples = len(Y1)
classes = np.unique(Y1)
n_classes = classes.shape[0]
predicted = LogisticRegression(C=100, random_state=0).fit(X_sp, Y1).predict(X_sp)
assert_array_equal(LogisticRegression(C=100, random_state=0).classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, Y1)
probabilities = LogisticRegression(C=100, random_state=0).predict_proba(X_sp)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), Y1)
n_samples = len(Y1)
classes = np.unique(Y1)
n_classes = classes.shape[0]
predicted = LogisticRegression(fit_intercept=False, random_state=0).fit(X, Y1).predict(X)
assert_array_equal(LogisticRegression(fit_intercept=False, random_state=0).classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, Y1)
probabilities = LogisticRegression(fit_intercept=False, random_state=0).predict_proba(X)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), Y1)
n_samples = len(Y1)
classes = np.unique(Y1)
n_classes = classes.shape[0]
predicted = LogisticRegression(fit_intercept=False, random_state=0).fit(X_sp, Y1).predict(X_sp)
assert_array_equal(LogisticRegression(fit_intercept=False, random_state=0).classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, Y1)
probabilities = LogisticRegression(fit_intercept=False, random_state=0).predict_proba(X_sp)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), Y1)
</DeepExtract>
|
def test_predict_2_classes():
<DeepExtract>
n_samples = len(Y1)
classes = np.unique(Y1)
n_classes = classes.shape[0]
predicted = LogisticRegression(random_state=0).fit(X, Y1).predict(X)
assert_array_equal(LogisticRegression(random_state=0).classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, Y1)
probabilities = LogisticRegression(random_state=0).predict_proba(X)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), Y1)
</DeepExtract>
<DeepExtract>
n_samples = len(Y1)
classes = np.unique(Y1)
n_classes = classes.shape[0]
predicted = LogisticRegression(random_state=0).fit(X_sp, Y1).predict(X_sp)
assert_array_equal(LogisticRegression(random_state=0).classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, Y1)
probabilities = LogisticRegression(random_state=0).predict_proba(X_sp)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), Y1)
</DeepExtract>
<DeepExtract>
n_samples = len(Y1)
classes = np.unique(Y1)
n_classes = classes.shape[0]
predicted = LogisticRegression(C=100, random_state=0).fit(X, Y1).predict(X)
assert_array_equal(LogisticRegression(C=100, random_state=0).classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, Y1)
probabilities = LogisticRegression(C=100, random_state=0).predict_proba(X)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), Y1)
</DeepExtract>
<DeepExtract>
n_samples = len(Y1)
classes = np.unique(Y1)
n_classes = classes.shape[0]
predicted = LogisticRegression(C=100, random_state=0).fit(X_sp, Y1).predict(X_sp)
assert_array_equal(LogisticRegression(C=100, random_state=0).classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, Y1)
probabilities = LogisticRegression(C=100, random_state=0).predict_proba(X_sp)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), Y1)
</DeepExtract>
<DeepExtract>
n_samples = len(Y1)
classes = np.unique(Y1)
n_classes = classes.shape[0]
predicted = LogisticRegression(fit_intercept=False, random_state=0).fit(X, Y1).predict(X)
assert_array_equal(LogisticRegression(fit_intercept=False, random_state=0).classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, Y1)
probabilities = LogisticRegression(fit_intercept=False, random_state=0).predict_proba(X)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), Y1)
</DeepExtract>
<DeepExtract>
n_samples = len(Y1)
classes = np.unique(Y1)
n_classes = classes.shape[0]
predicted = LogisticRegression(fit_intercept=False, random_state=0).fit(X_sp, Y1).predict(X_sp)
assert_array_equal(LogisticRegression(fit_intercept=False, random_state=0).classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, Y1)
probabilities = LogisticRegression(fit_intercept=False, random_state=0).predict_proba(X_sp)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), Y1)
</DeepExtract>
|
def write_pip_lock_file(build_metadata):
build_name = build_metadata['build_name']
python_version = build_metadata['python_version']
environment_name = f'pip-tools-python{python_version}'
command = f'conda create -c conda-forge -n pip-tools-python{python_version} python={python_version} pip-tools -y'
proc = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
(out, err) = (out.decode(), err.decode())
if proc.returncode != 0:
command_str = ' '.join(shlex.split(command))
raise RuntimeError('Command exited with non-zero exit code.\nExit code: {}\nCommand:\n{}\nstdout:\n{}\nstderr:\n{}\n'.format(proc.returncode, command_str, out, err))
return out
proc = subprocess.Popen(shlex.split('conda info --json'), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
(out, err) = (out.decode(), err.decode())
if proc.returncode != 0:
command_str = ' '.join(shlex.split('conda info --json'))
raise RuntimeError('Command exited with non-zero exit code.\nExit code: {}\nCommand:\n{}\nstdout:\n{}\nstderr:\n{}\n'.format(proc.returncode, command_str, out, err))
json_output = out
conda_info = json.loads(json_output)
environment_folder = [each for each in conda_info['envs'] if each.endswith(environment_name)][0]
environment_path = Path(environment_folder)
pip_compile_path = environment_path / 'bin' / 'pip-compile'
folder_path = Path(build_metadata['folder'])
requirement_path = folder_path / f'{build_name}_requirements.txt'
lock_file_path = folder_path / f'{build_name}_lock.txt'
command = f'{pip_compile_path} --upgrade {requirement_path} -o {lock_file_path}'
logger.debug('pip-compile command: %s', command)
execute_command(shlex.split(command))
</DeepExtract>
|
def write_pip_lock_file(build_metadata):
build_name = build_metadata['build_name']
python_version = build_metadata['python_version']
environment_name = f'pip-tools-python{python_version}'
command = f'conda create -c conda-forge -n pip-tools-python{python_version} python={python_version} pip-tools -y'
<DeepExtract>
proc = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
(out, err) = (out.decode(), err.decode())
if proc.returncode != 0:
command_str = ' '.join(shlex.split(command))
raise RuntimeError('Command exited with non-zero exit code.\nExit code: {}\nCommand:\n{}\nstdout:\n{}\nstderr:\n{}\n'.format(proc.returncode, command_str, out, err))
return out
</DeepExtract>
<DeepExtract>
proc = subprocess.Popen(shlex.split('conda info --json'), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
(out, err) = (out.decode(), err.decode())
if proc.returncode != 0:
command_str = ' '.join(shlex.split('conda info --json'))
raise RuntimeError('Command exited with non-zero exit code.\nExit code: {}\nCommand:\n{}\nstdout:\n{}\nstderr:\n{}\n'.format(proc.returncode, command_str, out, err))
json_output = out
</DeepExtract>
conda_info = json.loads(json_output)
environment_folder = [each for each in conda_info['envs'] if each.endswith(environment_name)][0]
environment_path = Path(environment_folder)
pip_compile_path = environment_path / 'bin' / 'pip-compile'
folder_path = Path(build_metadata['folder'])
requirement_path = folder_path / f'{build_name}_requirements.txt'
lock_file_path = folder_path / f'{build_name}_lock.txt'
<DeepExtract>
command = f'{pip_compile_path} --upgrade {requirement_path} -o {lock_file_path}'
logger.debug('pip-compile command: %s', command)
execute_command(shlex.split(command))
</DeepExtract>
|
@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None], 'power': [Interval(Real, None, 0, closed='right'), Interval(Real, 1, None, closed='left')]})
def d2_tweedie_score(y_true, y_pred, *, sample_weight=None, power=0):
"""D^2 regression score function, fraction of Tweedie deviance explained.
Best possible score is 1.0 and it can be negative (because the model can be
arbitrarily worse). A model that always uses the empirical mean of `y_true` as
constant prediction, disregarding the input features, gets a D^2 score of 0.0.
Read more in the :ref:`User Guide <d2_score>`.
.. versionadded:: 1.0
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
power : float, default=0
Tweedie power parameter. Either power <= 0 or power >= 1.
The higher `p` the less weight is given to extreme
deviations between true and predicted targets.
- power < 0: Extreme stable distribution. Requires: y_pred > 0.
- power = 0 : Normal distribution, output corresponds to r2_score.
y_true and y_pred can be any real numbers.
- power = 1 : Poisson distribution. Requires: y_true >= 0 and
y_pred > 0.
- 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0
and y_pred > 0.
- power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0.
- power = 3 : Inverse Gaussian distribution. Requires: y_true > 0
and y_pred > 0.
- otherwise : Positive stable distribution. Requires: y_true > 0
and y_pred > 0.
Returns
-------
z : float or ndarray of floats
The D^2 score.
Notes
-----
This is not a symmetric function.
Like R^2, D^2 score may be negative (it need not actually be the square of
a quantity D).
This metric is not well-defined for single samples and will return a NaN
value if n_samples is less than two.
References
----------
.. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
Wainwright. "Statistical Learning with Sparsity: The Lasso and
Generalizations." (2015). https://hastie.su.domains/StatLearnSparsity/
Examples
--------
>>> from sklearn.metrics import d2_tweedie_score
>>> y_true = [0.5, 1, 2.5, 7]
>>> y_pred = [1, 1, 5, 3.5]
>>> d2_tweedie_score(y_true, y_pred)
0.285...
>>> d2_tweedie_score(y_true, y_pred, power=1)
0.487...
>>> d2_tweedie_score(y_true, y_pred, power=2)
0.630...
>>> d2_tweedie_score(y_true, y_true, power=2)
1.0
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False, dtype=[np.float64, np.float32])
y_pred = check_array(y_pred, ensure_2d=False, dtype=[np.float64, np.float32])
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError('y_true and y_pred have different number of output ({0}!={1})'.format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
allowed_multioutput_str = ('raw_values', 'uniform_average', 'variance_weighted')
if isinstance(None, str):
if None not in allowed_multioutput_str:
raise ValueError("Allowed 'multioutput' string values are {}. You provided multioutput={!r}".format(allowed_multioutput_str, None))
elif None is not None:
None = check_array(None, ensure_2d=False)
if n_outputs == 1:
raise ValueError('Custom weights are useful only in multi-output cases.')
elif n_outputs != len(None):
raise ValueError('There must be equally many custom weights (%d) as outputs (%d).' % (len(None), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
(y_type, y_true, y_pred, _) = (y_type, y_true, y_pred, None)
if y_type == 'continuous-multioutput':
raise ValueError('Multioutput not supported in d2_tweedie_score')
if _num_samples(y_pred) < 2:
msg = 'D^2 score is not well-defined with less than two samples.'
warnings.warn(msg, UndefinedMetricWarning)
return float('nan')
(y_true, y_pred) = (np.squeeze(y_true), np.squeeze(y_pred))
(y_type, y_true, y_pred, _) = _check_reg_targets(y_true, y_pred, None, dtype=[np.float64, np.float32])
if y_type == 'continuous-multioutput':
raise ValueError('Multioutput not supported in mean_tweedie_deviance')
check_consistent_length(y_true, y_pred, sample_weight)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
sample_weight = sample_weight[:, np.newaxis]
message = f'Mean Tweedie deviance error with power={power} can only be used on '
if power < 0:
if (y_pred <= 0).any():
raise ValueError(message + 'strictly positive y_pred.')
elif power == 0:
pass
elif 1 <= power < 2:
if (y_true < 0).any() or (y_pred <= 0).any():
raise ValueError(message + 'non-negative y and strictly positive y_pred.')
elif power >= 2:
if (y_true <= 0).any() or (y_pred <= 0).any():
raise ValueError(message + 'strictly positive y and y_pred.')
else:
raise ValueError
numerator = _mean_tweedie_deviance(y_true, y_pred, sample_weight=sample_weight, power=power)
y_avg = np.average(y_true, weights=sample_weight)
p = power
if p < 0:
dev = 2 * (np.power(np.maximum(y_true, 0), 2 - p) / ((1 - p) * (2 - p)) - y_true * np.power(y_avg, 1 - p) / (1 - p) + np.power(y_avg, 2 - p) / (2 - p))
elif p == 0:
dev = (y_true - y_avg) ** 2
elif p == 1:
dev = 2 * (xlogy(y_true, y_true / y_avg) - y_true + y_avg)
elif p == 2:
dev = 2 * (np.log(y_avg / y_true) + y_true / y_avg - 1)
else:
dev = 2 * (np.power(y_true, 2 - p) / ((1 - p) * (2 - p)) - y_true * np.power(y_avg, 1 - p) / (1 - p) + np.power(y_avg, 2 - p) / (2 - p))
denominator = np.average(dev, weights=sample_weight)
return 1 - numerator / denominator
|
@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None], 'power': [Interval(Real, None, 0, closed='right'), Interval(Real, 1, None, closed='left')]})
def d2_tweedie_score(y_true, y_pred, *, sample_weight=None, power=0):
"""D^2 regression score function, fraction of Tweedie deviance explained.
Best possible score is 1.0 and it can be negative (because the model can be
arbitrarily worse). A model that always uses the empirical mean of `y_true` as
constant prediction, disregarding the input features, gets a D^2 score of 0.0.
Read more in the :ref:`User Guide <d2_score>`.
.. versionadded:: 1.0
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
power : float, default=0
Tweedie power parameter. Either power <= 0 or power >= 1.
The higher `p` the less weight is given to extreme
deviations between true and predicted targets.
- power < 0: Extreme stable distribution. Requires: y_pred > 0.
- power = 0 : Normal distribution, output corresponds to r2_score.
y_true and y_pred can be any real numbers.
- power = 1 : Poisson distribution. Requires: y_true >= 0 and
y_pred > 0.
- 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0
and y_pred > 0.
- power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0.
- power = 3 : Inverse Gaussian distribution. Requires: y_true > 0
and y_pred > 0.
- otherwise : Positive stable distribution. Requires: y_true > 0
and y_pred > 0.
Returns
-------
z : float or ndarray of floats
The D^2 score.
Notes
-----
This is not a symmetric function.
Like R^2, D^2 score may be negative (it need not actually be the square of
a quantity D).
This metric is not well-defined for single samples and will return a NaN
value if n_samples is less than two.
References
----------
.. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
Wainwright. "Statistical Learning with Sparsity: The Lasso and
Generalizations." (2015). https://hastie.su.domains/StatLearnSparsity/
Examples
--------
>>> from sklearn.metrics import d2_tweedie_score
>>> y_true = [0.5, 1, 2.5, 7]
>>> y_pred = [1, 1, 5, 3.5]
>>> d2_tweedie_score(y_true, y_pred)
0.285...
>>> d2_tweedie_score(y_true, y_pred, power=1)
0.487...
>>> d2_tweedie_score(y_true, y_pred, power=2)
0.630...
>>> d2_tweedie_score(y_true, y_true, power=2)
1.0
"""
<DeepExtract>
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False, dtype=[np.float64, np.float32])
y_pred = check_array(y_pred, ensure_2d=False, dtype=[np.float64, np.float32])
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError('y_true and y_pred have different number of output ({0}!={1})'.format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
allowed_multioutput_str = ('raw_values', 'uniform_average', 'variance_weighted')
if isinstance(None, str):
if None not in allowed_multioutput_str:
raise ValueError("Allowed 'multioutput' string values are {}. You provided multioutput={!r}".format(allowed_multioutput_str, None))
elif None is not None:
None = check_array(None, ensure_2d=False)
if n_outputs == 1:
raise ValueError('Custom weights are useful only in multi-output cases.')
elif n_outputs != len(None):
raise ValueError('There must be equally many custom weights (%d) as outputs (%d).' % (len(None), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
(y_type, y_true, y_pred, _) = (y_type, y_true, y_pred, None)
</DeepExtract>
if y_type == 'continuous-multioutput':
raise ValueError('Multioutput not supported in d2_tweedie_score')
if _num_samples(y_pred) < 2:
msg = 'D^2 score is not well-defined with less than two samples.'
warnings.warn(msg, UndefinedMetricWarning)
return float('nan')
(y_true, y_pred) = (np.squeeze(y_true), np.squeeze(y_pred))
<DeepExtract>
(y_type, y_true, y_pred, _) = _check_reg_targets(y_true, y_pred, None, dtype=[np.float64, np.float32])
if y_type == 'continuous-multioutput':
raise ValueError('Multioutput not supported in mean_tweedie_deviance')
check_consistent_length(y_true, y_pred, sample_weight)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
sample_weight = sample_weight[:, np.newaxis]
message = f'Mean Tweedie deviance error with power={power} can only be used on '
if power < 0:
if (y_pred <= 0).any():
raise ValueError(message + 'strictly positive y_pred.')
elif power == 0:
pass
elif 1 <= power < 2:
if (y_true < 0).any() or (y_pred <= 0).any():
raise ValueError(message + 'non-negative y and strictly positive y_pred.')
elif power >= 2:
if (y_true <= 0).any() or (y_pred <= 0).any():
raise ValueError(message + 'strictly positive y and y_pred.')
else:
raise ValueError
numerator = _mean_tweedie_deviance(y_true, y_pred, sample_weight=sample_weight, power=power)
</DeepExtract>
y_avg = np.average(y_true, weights=sample_weight)
<DeepExtract>
p = power
if p < 0:
dev = 2 * (np.power(np.maximum(y_true, 0), 2 - p) / ((1 - p) * (2 - p)) - y_true * np.power(y_avg, 1 - p) / (1 - p) + np.power(y_avg, 2 - p) / (2 - p))
elif p == 0:
dev = (y_true - y_avg) ** 2
elif p == 1:
dev = 2 * (xlogy(y_true, y_true / y_avg) - y_true + y_avg)
elif p == 2:
dev = 2 * (np.log(y_avg / y_true) + y_true / y_avg - 1)
else:
dev = 2 * (np.power(y_true, 2 - p) / ((1 - p) * (2 - p)) - y_true * np.power(y_avg, 1 - p) / (1 - p) + np.power(y_avg, 2 - p) / (2 - p))
denominator = np.average(dev, weights=sample_weight)
</DeepExtract>
return 1 - numerator / denominator
|
@validate_params({'image': [np.ndarray], 'patch_size': [tuple, list], 'max_patches': [Interval(RealNotInt, 0, 1, closed='neither'), Interval(Integral, 1, None, closed='left'), None], 'random_state': ['random_state']})
def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches.
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : ndarray of shape (image_height, image_width) or (image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of int (patch_height, patch_width)
The dimensions of one patch.
max_patches : int or float, default=None
The maximum number of patches to extract. If `max_patches` is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches. If `max_patches` is None it corresponds to the total number
of patches that can be extracted.
random_state : int, RandomState instance, default=None
Determines the random number generator used for random sampling when
`max_patches` is not None. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
patches : array of shape (n_patches, patch_height, patch_width) or (n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> from sklearn.feature_extraction import image
>>> # Use the array data from the first image in this dataset:
>>> one_image = load_sample_image("china.jpg")
>>> print('Image shape: {}'.format(one_image.shape))
Image shape: (427, 640, 3)
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print('Patches shape: {}'.format(patches.shape))
Patches shape: (272214, 2, 2, 3)
>>> # Here are just two of these patches:
>>> print(patches[1])
[[[174 201 231]
[174 201 231]]
[[173 200 230]
[173 200 230]]]
>>> print(patches[800])
[[[187 214 243]
[188 215 244]]
[[187 214 243]
[188 215 244]]]
"""
(i_h, i_w) = image.shape[:2]
(p_h, p_w) = patch_size
if p_h > i_h:
raise ValueError('Height of the patch should be less than the height of the image.')
if p_w > i_w:
raise ValueError('Width of the patch should be less than the width of the image.')
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
arr_ndim = image.ndim
if isinstance((p_h, p_w, n_colors), Number):
(p_h, p_w, n_colors) = tuple([(p_h, p_w, n_colors)] * arr_ndim)
if isinstance(1, Number):
1 = tuple([1] * arr_ndim)
patch_strides = image.strides
slices = tuple((slice(None, None, st) for st in 1))
indexing_strides = image[slices].strides
patch_indices_shape = (np.array(image.shape) - np.array((p_h, p_w, n_colors))) // np.array(1) + 1
shape = tuple(list(patch_indices_shape) + list((p_h, p_w, n_colors)))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(image, shape=shape, strides=strides)
extracted_patches = patches
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if isinstance(max_patches, Integral) and max_patches < all_patches:
n_patches = max_patches
elif isinstance(max_patches, Integral) and max_patches >= all_patches:
n_patches = all_patches
elif isinstance(max_patches, Real) and 0 < max_patches < 1:
n_patches = int(max_patches * all_patches)
else:
raise ValueError('Invalid value for max_patches: %r' % max_patches)
else:
n_patches = all_patches
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
|
@validate_params({'image': [np.ndarray], 'patch_size': [tuple, list], 'max_patches': [Interval(RealNotInt, 0, 1, closed='neither'), Interval(Integral, 1, None, closed='left'), None], 'random_state': ['random_state']})
def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches.
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : ndarray of shape (image_height, image_width) or (image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of int (patch_height, patch_width)
The dimensions of one patch.
max_patches : int or float, default=None
The maximum number of patches to extract. If `max_patches` is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches. If `max_patches` is None it corresponds to the total number
of patches that can be extracted.
random_state : int, RandomState instance, default=None
Determines the random number generator used for random sampling when
`max_patches` is not None. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
patches : array of shape (n_patches, patch_height, patch_width) or (n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> from sklearn.feature_extraction import image
>>> # Use the array data from the first image in this dataset:
>>> one_image = load_sample_image("china.jpg")
>>> print('Image shape: {}'.format(one_image.shape))
Image shape: (427, 640, 3)
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print('Patches shape: {}'.format(patches.shape))
Patches shape: (272214, 2, 2, 3)
>>> # Here are just two of these patches:
>>> print(patches[1])
[[[174 201 231]
[174 201 231]]
[[173 200 230]
[173 200 230]]]
>>> print(patches[800])
[[[187 214 243]
[188 215 244]]
[[187 214 243]
[188 215 244]]]
"""
(i_h, i_w) = image.shape[:2]
(p_h, p_w) = patch_size
if p_h > i_h:
raise ValueError('Height of the patch should be less than the height of the image.')
if p_w > i_w:
raise ValueError('Width of the patch should be less than the width of the image.')
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
<DeepExtract>
arr_ndim = image.ndim
if isinstance((p_h, p_w, n_colors), Number):
(p_h, p_w, n_colors) = tuple([(p_h, p_w, n_colors)] * arr_ndim)
if isinstance(1, Number):
1 = tuple([1] * arr_ndim)
patch_strides = image.strides
slices = tuple((slice(None, None, st) for st in 1))
indexing_strides = image[slices].strides
patch_indices_shape = (np.array(image.shape) - np.array((p_h, p_w, n_colors))) // np.array(1) + 1
shape = tuple(list(patch_indices_shape) + list((p_h, p_w, n_colors)))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(image, shape=shape, strides=strides)
extracted_patches = patches
</DeepExtract>
<DeepExtract>
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if isinstance(max_patches, Integral) and max_patches < all_patches:
n_patches = max_patches
elif isinstance(max_patches, Integral) and max_patches >= all_patches:
n_patches = all_patches
elif isinstance(max_patches, Real) and 0 < max_patches < 1:
n_patches = int(max_patches * all_patches)
else:
raise ValueError('Invalid value for max_patches: %r' % max_patches)
else:
n_patches = all_patches
</DeepExtract>
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
|
def _generate_get_feature_names_out(estimator, n_features_out, input_features=None):
"""Generate feature names out for estimator using the estimator name as the prefix.
The input_feature names are validated but not used. This function is useful
for estimators that generate their own names based on `n_features_out`, i.e. PCA.
Parameters
----------
estimator : estimator instance
Estimator producing output feature names.
n_feature_out : int
Number of feature names out.
input_features : array-like of str or None, default=None
Only used to validate feature names with `estimator.feature_names_in_`.
Returns
-------
feature_names_in : ndarray of str or `None`
Feature names in.
"""
feature_names_in_ = getattr(estimator, 'feature_names_in_', None)
n_features_in_ = getattr(estimator, 'n_features_in_', None)
if input_features is not None:
input_features = np.asarray(input_features, dtype=object)
if feature_names_in_ is not None and (not np.array_equal(feature_names_in_, input_features)):
raise ValueError('input_features is not equal to feature_names_in_')
if n_features_in_ is not None and len(input_features) != n_features_in_:
raise ValueError(f'input_features should have length equal to number of features ({n_features_in_}), got {len(input_features)}')
return input_features
if feature_names_in_ is not None:
return feature_names_in_
if not False:
return
if n_features_in_ is None:
raise ValueError('Unable to generate feature names without n_features_in_')
return np.asarray([f'x{i}' for i in range(n_features_in_)], dtype=object)
estimator_name = estimator.__class__.__name__.lower()
return np.asarray([f'{estimator_name}{i}' for i in range(n_features_out)], dtype=object)
|
def _generate_get_feature_names_out(estimator, n_features_out, input_features=None):
"""Generate feature names out for estimator using the estimator name as the prefix.
The input_feature names are validated but not used. This function is useful
for estimators that generate their own names based on `n_features_out`, i.e. PCA.
Parameters
----------
estimator : estimator instance
Estimator producing output feature names.
n_feature_out : int
Number of feature names out.
input_features : array-like of str or None, default=None
Only used to validate feature names with `estimator.feature_names_in_`.
Returns
-------
feature_names_in : ndarray of str or `None`
Feature names in.
"""
<DeepExtract>
feature_names_in_ = getattr(estimator, 'feature_names_in_', None)
n_features_in_ = getattr(estimator, 'n_features_in_', None)
if input_features is not None:
input_features = np.asarray(input_features, dtype=object)
if feature_names_in_ is not None and (not np.array_equal(feature_names_in_, input_features)):
raise ValueError('input_features is not equal to feature_names_in_')
if n_features_in_ is not None and len(input_features) != n_features_in_:
raise ValueError(f'input_features should have length equal to number of features ({n_features_in_}), got {len(input_features)}')
return input_features
if feature_names_in_ is not None:
return feature_names_in_
if not False:
return
if n_features_in_ is None:
raise ValueError('Unable to generate feature names without n_features_in_')
return np.asarray([f'x{i}' for i in range(n_features_in_)], dtype=object)
</DeepExtract>
estimator_name = estimator.__class__.__name__.lower()
return np.asarray([f'{estimator_name}{i}' for i in range(n_features_out)], dtype=object)
|
def _fit(X, y=None, force_transform=False):
X = self._validate_data(X, ensure_2d=True, dtype=FLOAT_DTYPES, copy=self.copy, force_all_finite='allow-nan', reset=True)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'All-NaN (slice|axis) encountered')
if True and self.method == 'box-cox' and (np.nanmin(X) <= 0):
raise ValueError('The Box-Cox transformation can only be applied to strictly positive data')
if check_shape and (not X.shape[1] == len(self.lambdas_)):
raise ValueError('Input data has a different number of features than fitting data. Should have {n}, data has {m}'.format(n=len(self.lambdas_), m=X.shape[1]))
X = X
if not self.copy and (not force_transform):
X = X.copy()
optim_function = {'box-cox': self._box_cox_optimize, 'yeo-johnson': self._yeo_johnson_optimize}[self.method]
with np.errstate(invalid='ignore'):
self.lambdas_ = np.array([optim_function(col) for col in X.T])
if self.standardize or force_transform:
transform_function = {'box-cox': boxcox, 'yeo-johnson': self._yeo_johnson_transform}[self.method]
for (i, lmbda) in enumerate(self.lambdas_):
with np.errstate(invalid='ignore'):
X[:, i] = transform_function(X[:, i], lmbda)
if self.standardize:
self._scaler = StandardScaler(copy=False)
if force_transform:
X = self._scaler.fit_transform(X)
else:
self._scaler.fit(X)
return X
|
def _fit(X, y=None, force_transform=False):
<DeepExtract>
X = self._validate_data(X, ensure_2d=True, dtype=FLOAT_DTYPES, copy=self.copy, force_all_finite='allow-nan', reset=True)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'All-NaN (slice|axis) encountered')
if True and self.method == 'box-cox' and (np.nanmin(X) <= 0):
raise ValueError('The Box-Cox transformation can only be applied to strictly positive data')
if check_shape and (not X.shape[1] == len(self.lambdas_)):
raise ValueError('Input data has a different number of features than fitting data. Should have {n}, data has {m}'.format(n=len(self.lambdas_), m=X.shape[1]))
X = X
</DeepExtract>
if not self.copy and (not force_transform):
X = X.copy()
optim_function = {'box-cox': self._box_cox_optimize, 'yeo-johnson': self._yeo_johnson_optimize}[self.method]
with np.errstate(invalid='ignore'):
self.lambdas_ = np.array([optim_function(col) for col in X.T])
if self.standardize or force_transform:
transform_function = {'box-cox': boxcox, 'yeo-johnson': self._yeo_johnson_transform}[self.method]
for (i, lmbda) in enumerate(self.lambdas_):
with np.errstate(invalid='ignore'):
X[:, i] = transform_function(X[:, i], lmbda)
if self.standardize:
self._scaler = StandardScaler(copy=False)
if force_transform:
X = self._scaler.fit_transform(X)
else:
self._scaler.fit(X)
return X
|
@available_if(_check_proba)
def predict_proba(self, X):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`~sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data for prediction.
Returns
-------
ndarray of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
https://dl.acm.org/doi/pdf/10.1145/775047.775151
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
check_is_fitted(self)
if self.loss == 'log_loss':
return self._predict_proba_lr(X)
elif self.loss == 'modified_huber':
binary = len(self.classes_) == 2
check_is_fitted(self, 'coef_')
X = self._validate_data(X, accept_sparse='csr', reset=False)
decisions = safe_sparse_dot(X, self.coef_.T, dense_output=True) - self.offset_
scores = decisions.ravel()
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.0
prob /= 2.0
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
prob_sum = prob.sum(axis=1)
all_zero = prob_sum == 0
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when loss='log_loss' or loss='modified_huber' (%r given)" % self.loss)
|
@available_if(_check_proba)
def predict_proba(self, X):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`~sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data for prediction.
Returns
-------
ndarray of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
https://dl.acm.org/doi/pdf/10.1145/775047.775151
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
check_is_fitted(self)
if self.loss == 'log_loss':
return self._predict_proba_lr(X)
elif self.loss == 'modified_huber':
binary = len(self.classes_) == 2
<DeepExtract>
check_is_fitted(self, 'coef_')
X = self._validate_data(X, accept_sparse='csr', reset=False)
decisions = safe_sparse_dot(X, self.coef_.T, dense_output=True) - self.offset_
scores = decisions.ravel()
</DeepExtract>
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.0
prob /= 2.0
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
prob_sum = prob.sum(axis=1)
all_zero = prob_sum == 0
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when loss='log_loss' or loss='modified_huber' (%r given)" % self.loss)
|
def test_lda_fit_perplexity():
n_components = 3
block = np.full((3, 3), n_components, dtype=int)
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
(n_components, X) = (n_components, X)
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1, learning_method='batch', random_state=0, evaluate_every=1)
lda.fit(X)
perplexity1 = lda.bound_
perplexity2 = lda.perplexity(X)
assert_almost_equal(perplexity1, perplexity2)
|
def test_lda_fit_perplexity():
<DeepExtract>
n_components = 3
block = np.full((3, 3), n_components, dtype=int)
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
(n_components, X) = (n_components, X)
</DeepExtract>
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1, learning_method='batch', random_state=0, evaluate_every=1)
lda.fit(X)
perplexity1 = lda.bound_
perplexity2 = lda.perplexity(X)
assert_almost_equal(perplexity1, perplexity2)
|
@pytest.mark.parametrize('n_neighbors, radius', [(24, None), (None, np.inf)])
@pytest.mark.parametrize('eigen_solver', eigen_solvers)
@pytest.mark.parametrize('path_method', path_methods)
def test_isomap_simple_grid(global_dtype, n_neighbors, radius, eigen_solver, path_method):
n_pts = 25
n_per_side = int(math.sqrt(n_pts))
X = np.array(list(product(range(n_per_side), repeat=2))).astype(global_dtype, copy=False)
if False:
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(n_pts, 1).astype(global_dtype, copy=False)
X = np.concatenate((X, noise), 1)
X = X
if n_neighbors is not None:
G = neighbors.kneighbors_graph(X, n_neighbors, mode='distance')
else:
G = neighbors.radius_neighbors_graph(X, radius, mode='distance')
clf = manifold.Isomap(n_neighbors=n_neighbors, radius=radius, n_components=2, eigen_solver=eigen_solver, path_method=path_method)
clf.fit(X)
if n_neighbors is not None:
G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode='distance')
else:
G_iso = neighbors.radius_neighbors_graph(clf.embedding_, radius, mode='distance')
atol = 1e-05 if global_dtype == np.float32 else 0
assert_allclose_dense_sparse(G, G_iso, atol=atol)
|
@pytest.mark.parametrize('n_neighbors, radius', [(24, None), (None, np.inf)])
@pytest.mark.parametrize('eigen_solver', eigen_solvers)
@pytest.mark.parametrize('path_method', path_methods)
def test_isomap_simple_grid(global_dtype, n_neighbors, radius, eigen_solver, path_method):
n_pts = 25
<DeepExtract>
n_per_side = int(math.sqrt(n_pts))
X = np.array(list(product(range(n_per_side), repeat=2))).astype(global_dtype, copy=False)
if False:
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(n_pts, 1).astype(global_dtype, copy=False)
X = np.concatenate((X, noise), 1)
X = X
</DeepExtract>
if n_neighbors is not None:
G = neighbors.kneighbors_graph(X, n_neighbors, mode='distance')
else:
G = neighbors.radius_neighbors_graph(X, radius, mode='distance')
clf = manifold.Isomap(n_neighbors=n_neighbors, radius=radius, n_components=2, eigen_solver=eigen_solver, path_method=path_method)
clf.fit(X)
if n_neighbors is not None:
G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode='distance')
else:
G_iso = neighbors.radius_neighbors_graph(clf.embedding_, radius, mode='distance')
atol = 1e-05 if global_dtype == np.float32 else 0
assert_allclose_dense_sparse(G, G_iso, atol=atol)
|
@validate_params({'X': ['array-like', 'sparse matrix'], 'y': ['array-like', 'sparse matrix'], 'f': [str, HasMethods(['write'])], 'zero_based': ['boolean'], 'comment': [str, bytes, None], 'query_id': ['array-like', None], 'multilabel': ['boolean']})
def dump_svmlight_file(X, y, f, *, zero_based=True, comment=None, query_id=None, multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : {array-like, sparse matrix}, shape = (n_samples,) or (n_samples, n_labels)
Target values. Class labels must be an
integer or float, or array-like objects of integer or float for
multilabel classifications.
f : str or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : bool, default=True
Whether column indices should be written zero-based (True) or one-based
(False).
comment : str or bytes, default=None
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like of shape (n_samples,), default=None
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel : bool, default=False
Samples may have several labels each (see
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).
.. versionadded:: 0.17
parameter `multilabel` to support multilabel datasets.
"""
if comment is not None:
if isinstance(comment, bytes):
comment.decode('ascii')
else:
comment = comment.encode('utf-8')
if b'\x00' in comment:
raise ValueError('comment string contains NUL byte')
yval = check_array(y, accept_sparse='csr', ensure_2d=False)
if sp.issparse(yval):
if yval.shape[1] != 1 and (not multilabel):
raise ValueError('expected y of shape (n_samples, 1), got %r' % (yval.shape,))
elif yval.ndim != 1 and (not multilabel):
raise ValueError('expected y of shape (n_samples,), got %r' % (yval.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != yval.shape[0]:
raise ValueError('X.shape[0] and y.shape[0] should be the same, got %r and %r instead.' % (Xval.shape[0], yval.shape[0]))
if yval is y and hasattr(yval, 'sorted_indices'):
y = yval.sorted_indices()
else:
y = yval
if hasattr(y, 'sort_indices'):
y.sort_indices()
if Xval is X and hasattr(Xval, 'sorted_indices'):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, 'sort_indices'):
X.sort_indices()
if query_id is None:
query_id = np.array([], dtype=np.int32)
else:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError('expected query_id of shape (n_samples,), got %r' % (query_id.shape,))
one_based = not zero_based
if hasattr(f, 'write'):
if comment:
f.write(('# Generated by dump_svmlight_file from scikit-learn %s\n' % __version__).encode())
f.write(('# Column indices are %s-based\n' % ['zero', 'one'][one_based]).encode())
f.write(b'#\n')
f.writelines((b'# %s\n' % line for line in comment.splitlines()))
X_is_sp = sp.issparse(X)
y_is_sp = sp.issparse(y)
if not multilabel and (not y_is_sp):
y = y[:, np.newaxis]
_dump_svmlight_file(X, y, f, multilabel, one_based, query_id, X_is_sp, y_is_sp)
else:
with open(f, 'wb') as f:
if comment:
f.write(('# Generated by dump_svmlight_file from scikit-learn %s\n' % __version__).encode())
f.write(('# Column indices are %s-based\n' % ['zero', 'one'][one_based]).encode())
f.write(b'#\n')
f.writelines((b'# %s\n' % line for line in comment.splitlines()))
X_is_sp = sp.issparse(X)
y_is_sp = sp.issparse(y)
if not multilabel and (not y_is_sp):
y = y[:, np.newaxis]
_dump_svmlight_file(X, y, f, multilabel, one_based, query_id, X_is_sp, y_is_sp)
</DeepExtract>
|
@validate_params({'X': ['array-like', 'sparse matrix'], 'y': ['array-like', 'sparse matrix'], 'f': [str, HasMethods(['write'])], 'zero_based': ['boolean'], 'comment': [str, bytes, None], 'query_id': ['array-like', None], 'multilabel': ['boolean']})
def dump_svmlight_file(X, y, f, *, zero_based=True, comment=None, query_id=None, multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : {array-like, sparse matrix}, shape = (n_samples,) or (n_samples, n_labels)
Target values. Class labels must be an
integer or float, or array-like objects of integer or float for
multilabel classifications.
f : str or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : bool, default=True
Whether column indices should be written zero-based (True) or one-based
(False).
comment : str or bytes, default=None
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like of shape (n_samples,), default=None
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel : bool, default=False
Samples may have several labels each (see
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).
.. versionadded:: 0.17
parameter `multilabel` to support multilabel datasets.
"""
if comment is not None:
if isinstance(comment, bytes):
comment.decode('ascii')
else:
comment = comment.encode('utf-8')
if b'\x00' in comment:
raise ValueError('comment string contains NUL byte')
yval = check_array(y, accept_sparse='csr', ensure_2d=False)
if sp.issparse(yval):
if yval.shape[1] != 1 and (not multilabel):
raise ValueError('expected y of shape (n_samples, 1), got %r' % (yval.shape,))
elif yval.ndim != 1 and (not multilabel):
raise ValueError('expected y of shape (n_samples,), got %r' % (yval.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != yval.shape[0]:
raise ValueError('X.shape[0] and y.shape[0] should be the same, got %r and %r instead.' % (Xval.shape[0], yval.shape[0]))
if yval is y and hasattr(yval, 'sorted_indices'):
y = yval.sorted_indices()
else:
y = yval
if hasattr(y, 'sort_indices'):
y.sort_indices()
if Xval is X and hasattr(Xval, 'sorted_indices'):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, 'sort_indices'):
X.sort_indices()
if query_id is None:
query_id = np.array([], dtype=np.int32)
else:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError('expected query_id of shape (n_samples,), got %r' % (query_id.shape,))
one_based = not zero_based
if hasattr(f, 'write'):
<DeepExtract>
if comment:
f.write(('# Generated by dump_svmlight_file from scikit-learn %s\n' % __version__).encode())
f.write(('# Column indices are %s-based\n' % ['zero', 'one'][one_based]).encode())
f.write(b'#\n')
f.writelines((b'# %s\n' % line for line in comment.splitlines()))
X_is_sp = sp.issparse(X)
y_is_sp = sp.issparse(y)
if not multilabel and (not y_is_sp):
y = y[:, np.newaxis]
_dump_svmlight_file(X, y, f, multilabel, one_based, query_id, X_is_sp, y_is_sp)
</DeepExtract>
else:
with open(f, 'wb') as f:
<DeepExtract>
if comment:
f.write(('# Generated by dump_svmlight_file from scikit-learn %s\n' % __version__).encode())
f.write(('# Column indices are %s-based\n' % ['zero', 'one'][one_based]).encode())
f.write(b'#\n')
f.writelines((b'# %s\n' % line for line in comment.splitlines()))
X_is_sp = sp.issparse(X)
y_is_sp = sp.issparse(y)
if not multilabel and (not y_is_sp):
y = y[:, np.newaxis]
_dump_svmlight_file(X, y, f, multilabel, one_based, query_id, X_is_sp, y_is_sp)
</DeepExtract>
|
def checkSubIndent(s, l, t):
s = s
curCol = 1 if 0 < l < len(s) and s[l - 1] == '\n' else l - s.rfind('\n', 0, l)
if curCol > indentStack[-1]:
indentStack.append(curCol)
else:
raise ParseException(s, l, 'not a subentry')
|
def checkSubIndent(s, l, t):
<DeepExtract>
s = s
curCol = 1 if 0 < l < len(s) and s[l - 1] == '\n' else l - s.rfind('\n', 0, l)
</DeepExtract>
if curCol > indentStack[-1]:
indentStack.append(curCol)
else:
raise ParseException(s, l, 'not a subentry')
|
def _parseCache(self, instring, loc, doActions=True, callPreParse=True):
(HIT, MISS) = (0, 1)
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
debugging = self.debug
if debugging or self.failAction:
if self.debugActions[0]:
self.debugActions[0](instring, loc, self)
if callPreParse and self.callPreparse:
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
try:
try:
(loc, tokens) = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
except ParseBaseException as err:
if self.debugActions[2]:
self.debugActions[2](instring, tokensStart, self, err)
if self.failAction:
self.failAction(instring, tokensStart, self, err)
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
(loc, tokens) = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
else:
(loc, tokens) = self.parseImpl(instring, preloc, doActions)
tokens = self.postParse(instring, loc, tokens)
retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults)
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn(instring, tokensStart, retTokens)
if tokens is not None:
retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), modal=self.modalResults)
except ParseBaseException as err:
if self.debugActions[2]:
self.debugActions[2](instring, tokensStart, self, err)
raise
else:
for fn in self.parseAction:
tokens = fn(instring, tokensStart, retTokens)
if tokens is not None:
retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), modal=self.modalResults)
if debugging:
if self.debugActions[1]:
self.debugActions[1](instring, tokensStart, loc, self, retTokens)
value = (loc, retTokens)
except ParseBaseException as pe:
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return (value[0], value[1].copy())
|
def _parseCache(self, instring, loc, doActions=True, callPreParse=True):
(HIT, MISS) = (0, 1)
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
<DeepExtract>
debugging = self.debug
if debugging or self.failAction:
if self.debugActions[0]:
self.debugActions[0](instring, loc, self)
if callPreParse and self.callPreparse:
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
try:
try:
(loc, tokens) = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
except ParseBaseException as err:
if self.debugActions[2]:
self.debugActions[2](instring, tokensStart, self, err)
if self.failAction:
self.failAction(instring, tokensStart, self, err)
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
(loc, tokens) = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
else:
(loc, tokens) = self.parseImpl(instring, preloc, doActions)
tokens = self.postParse(instring, loc, tokens)
retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults)
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn(instring, tokensStart, retTokens)
if tokens is not None:
retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), modal=self.modalResults)
except ParseBaseException as err:
if self.debugActions[2]:
self.debugActions[2](instring, tokensStart, self, err)
raise
else:
for fn in self.parseAction:
tokens = fn(instring, tokensStart, retTokens)
if tokens is not None:
retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), modal=self.modalResults)
if debugging:
if self.debugActions[1]:
self.debugActions[1](instring, tokensStart, loc, self, retTokens)
value = (loc, retTokens)
</DeepExtract>
except ParseBaseException as pe:
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return (value[0], value[1].copy())
|
def graphical_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd', tol=0.0001, enet_tol=0.0001, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : array-like of shape (n_alphas,)
The list of regularization parameters, decreasing order.
cov_init : array of shape (n_features, n_features), default=None
The initial guess for the covariance.
X_test : array of shape (n_test_samples, n_features), default=None
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}, default='cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : float, default=1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped. The tolerance must be a positive
number.
enet_tol : float, default=1e-4
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'. The tolerance must be a positive number.
max_iter : int, default=100
The maximum number of iterations. This parameter should be a strictly
positive integer.
verbose : int or bool, default=False
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : list of shape (n_alphas,) of ndarray of shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : list of shape (n_alphas,) of ndarray of shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : list of shape (n_alphas,), dtype=float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
(_, n_features) = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = -2.0 * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
(covariance_, precision_) = (emp_cov, precision_, (cost, d_gap), 0)
else:
(covariance_, precision_) = (emp_cov, precision_, (cost, d_gap))
elif return_n_iter:
(covariance_, precision_) = (emp_cov, linalg.inv(emp_cov), 0)
else:
(covariance_, precision_) = (emp_cov, linalg.inv(emp_cov))
if covariance_ is None:
covariance_ = emp_cov.copy()
else:
covariance_ = covariance_.copy()
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = linalg.pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
d_gap = np.inf
sub_covariance = np.copy(covariance_[1:, 1:], order='C')
for i in range(max_iter):
for idx in range(n_features):
if idx > 0:
di = idx - 1
sub_covariance[di] = covariance_[di][indices != idx]
sub_covariance[:, di] = covariance_[:, di][indices != idx]
else:
sub_covariance[:] = covariance_[1:, 1:]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
coefs = -(precision_[indices != idx, idx] / (precision_[idx, idx] + 1000 * eps))
(coefs, _, _, _) = cd_fast.enet_coordinate_descent_gram(coefs, alpha, 0, sub_covariance, row, row, max_iter, enet_tol, check_random_state(None), False)
else:
(_, _, coefs) = lars_path_gram(Xy=row, Gram=sub_covariance, n_samples=row.size, alpha_min=alpha / (n_features - 1), copy_Gram=True, eps=eps, method='lars', return_path=False)
precision_[idx, idx] = 1.0 / (covariance_[idx, idx] - np.dot(covariance_[indices != idx, idx], coefs))
precision_[indices != idx, idx] = -precision_[idx, idx] * coefs
precision_[idx, indices != idx] = -precision_[idx, idx] * coefs
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
if not np.isfinite(precision_.sum()):
raise FloatingPointError('The system is too ill-conditioned for this solver')
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if inner_verbose:
print('[graphical_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e' % (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is too ill-conditioned for this solver')
else:
warnings.warn('graphical_lasso: did not converge after %i iteration: dual gap: %.3e' % (max_iter, d_gap), ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0] + '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
(covariance_, precision_) = (covariance_, precision_, costs, i + 1)
else:
(covariance_, precision_) = (covariance_, precision_, costs)
elif return_n_iter:
(covariance_, precision_) = (covariance_, precision_, i + 1)
else:
(covariance_, precision_) = (covariance_, precision_)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graphical_lasso_path] alpha: %.2e, score: %.2e' % (alpha, this_score))
else:
print('[graphical_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return (covariances_, precisions_, scores_)
return (covariances_, precisions_)
|
def graphical_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd', tol=0.0001, enet_tol=0.0001, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : array-like of shape (n_alphas,)
The list of regularization parameters, decreasing order.
cov_init : array of shape (n_features, n_features), default=None
The initial guess for the covariance.
X_test : array of shape (n_test_samples, n_features), default=None
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}, default='cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : float, default=1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped. The tolerance must be a positive
number.
enet_tol : float, default=1e-4
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'. The tolerance must be a positive number.
max_iter : int, default=100
The maximum number of iterations. This parameter should be a strictly
positive integer.
verbose : int or bool, default=False
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : list of shape (n_alphas,) of ndarray of shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : list of shape (n_alphas,) of ndarray of shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : list of shape (n_alphas,), dtype=float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
<DeepExtract>
(_, n_features) = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = -2.0 * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
(covariance_, precision_) = (emp_cov, precision_, (cost, d_gap), 0)
else:
(covariance_, precision_) = (emp_cov, precision_, (cost, d_gap))
elif return_n_iter:
(covariance_, precision_) = (emp_cov, linalg.inv(emp_cov), 0)
else:
(covariance_, precision_) = (emp_cov, linalg.inv(emp_cov))
if covariance_ is None:
covariance_ = emp_cov.copy()
else:
covariance_ = covariance_.copy()
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = linalg.pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
d_gap = np.inf
sub_covariance = np.copy(covariance_[1:, 1:], order='C')
for i in range(max_iter):
for idx in range(n_features):
if idx > 0:
di = idx - 1
sub_covariance[di] = covariance_[di][indices != idx]
sub_covariance[:, di] = covariance_[:, di][indices != idx]
else:
sub_covariance[:] = covariance_[1:, 1:]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
coefs = -(precision_[indices != idx, idx] / (precision_[idx, idx] + 1000 * eps))
(coefs, _, _, _) = cd_fast.enet_coordinate_descent_gram(coefs, alpha, 0, sub_covariance, row, row, max_iter, enet_tol, check_random_state(None), False)
else:
(_, _, coefs) = lars_path_gram(Xy=row, Gram=sub_covariance, n_samples=row.size, alpha_min=alpha / (n_features - 1), copy_Gram=True, eps=eps, method='lars', return_path=False)
precision_[idx, idx] = 1.0 / (covariance_[idx, idx] - np.dot(covariance_[indices != idx, idx], coefs))
precision_[indices != idx, idx] = -precision_[idx, idx] * coefs
precision_[idx, indices != idx] = -precision_[idx, idx] * coefs
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
if not np.isfinite(precision_.sum()):
raise FloatingPointError('The system is too ill-conditioned for this solver')
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if inner_verbose:
print('[graphical_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e' % (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is too ill-conditioned for this solver')
else:
warnings.warn('graphical_lasso: did not converge after %i iteration: dual gap: %.3e' % (max_iter, d_gap), ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0] + '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
(covariance_, precision_) = (covariance_, precision_, costs, i + 1)
else:
(covariance_, precision_) = (covariance_, precision_, costs)
elif return_n_iter:
(covariance_, precision_) = (covariance_, precision_, i + 1)
else:
(covariance_, precision_) = (covariance_, precision_)
</DeepExtract>
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graphical_lasso_path] alpha: %.2e, score: %.2e' % (alpha, this_score))
else:
print('[graphical_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return (covariances_, precisions_, scores_)
return (covariances_, precisions_)
|
def __init__(self, encoding='latin-1'):
HTMLParser.__init__(self)
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ''
self.body = ''
self.topics = []
self.topic_d = ''
self.encoding = encoding
|
def __init__(self, encoding='latin-1'):
HTMLParser.__init__(self)
<DeepExtract>
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ''
self.body = ''
self.topics = []
self.topic_d = ''
</DeepExtract>
self.encoding = encoding
|
def _solve_W(self, X, H, max_iter):
"""Minimize the objective function w.r.t W.
Update W with H being fixed, until convergence. This is the heart
of `transform` but it's also used during `fit` when doing fresh restarts.
"""
avg = np.sqrt(X.mean() / self._n_components)
W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype)
W_buffer = W.copy()
(n_samples, n_features) = X.shape
alpha_W = self.alpha_W
alpha_H = self.alpha_W if self.alpha_H == 'same' else self.alpha_H
l1_reg_W = n_features * alpha_W * self.l1_ratio
l1_reg_H = n_samples * alpha_H * self.l1_ratio
l2_reg_W = n_features * alpha_W * (1.0 - self.l1_ratio)
l2_reg_H = n_samples * alpha_H * (1.0 - self.l1_ratio)
(l1_reg_W, _, l2_reg_W, _) = (l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H)
for _ in range(max_iter):
if self._beta_loss == 2:
if XHt is None:
XHt = safe_sparse_dot(X, H.T)
if update_H:
numerator = XHt
else:
numerator = XHt.copy()
if HHt is None:
HHt = np.dot(H, H.T)
denominator = np.dot(W, HHt)
else:
WH_safe_X = _special_sparse_dot(W, H, X)
if sp.issparse(X):
WH_safe_X_data = WH_safe_X.data
X_data = X.data
else:
WH_safe_X_data = WH_safe_X
X_data = X
WH = WH_safe_X.copy()
if self._beta_loss - 1.0 < 0:
WH[WH < EPSILON] = EPSILON
if self._beta_loss - 2.0 < 0:
WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON
if self._beta_loss == 1:
np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
elif self._beta_loss == 0:
WH_safe_X_data **= -1
WH_safe_X_data **= 2
WH_safe_X_data *= X_data
else:
WH_safe_X_data **= self._beta_loss - 2
WH_safe_X_data *= X_data
numerator = safe_sparse_dot(WH_safe_X, H.T)
if self._beta_loss == 1:
if H_sum is None:
H_sum = np.sum(H, axis=1)
denominator = H_sum[np.newaxis, :]
else:
if sp.issparse(X):
WHHt = np.empty(W.shape)
for i in range(X.shape[0]):
WHi = np.dot(W[i, :], H)
if self._beta_loss - 1 < 0:
WHi[WHi < EPSILON] = EPSILON
WHi **= self._beta_loss - 1
WHHt[i, :] = np.dot(WHi, H.T)
else:
WH **= self._beta_loss - 1
WHHt = np.dot(WH, H.T)
denominator = WHHt
if l1_reg_W > 0:
denominator += l1_reg_W
if l2_reg_W > 0:
denominator = denominator + l2_reg_W * W
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta_W = numerator
if self._gamma != 1:
delta_W **= self._gamma
W *= delta_W
(W, *_) = (W, H_sum, HHt, XHt)
W_diff = linalg.norm(W - W_buffer) / linalg.norm(W)
if self.tol > 0 and W_diff <= self.tol:
break
W_buffer[:] = W
return W
|
def _solve_W(self, X, H, max_iter):
"""Minimize the objective function w.r.t W.
Update W with H being fixed, until convergence. This is the heart
of `transform` but it's also used during `fit` when doing fresh restarts.
"""
avg = np.sqrt(X.mean() / self._n_components)
W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype)
W_buffer = W.copy()
<DeepExtract>
(n_samples, n_features) = X.shape
alpha_W = self.alpha_W
alpha_H = self.alpha_W if self.alpha_H == 'same' else self.alpha_H
l1_reg_W = n_features * alpha_W * self.l1_ratio
l1_reg_H = n_samples * alpha_H * self.l1_ratio
l2_reg_W = n_features * alpha_W * (1.0 - self.l1_ratio)
l2_reg_H = n_samples * alpha_H * (1.0 - self.l1_ratio)
(l1_reg_W, _, l2_reg_W, _) = (l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H)
</DeepExtract>
for _ in range(max_iter):
<DeepExtract>
if self._beta_loss == 2:
if XHt is None:
XHt = safe_sparse_dot(X, H.T)
if update_H:
numerator = XHt
else:
numerator = XHt.copy()
if HHt is None:
HHt = np.dot(H, H.T)
denominator = np.dot(W, HHt)
else:
WH_safe_X = _special_sparse_dot(W, H, X)
if sp.issparse(X):
WH_safe_X_data = WH_safe_X.data
X_data = X.data
else:
WH_safe_X_data = WH_safe_X
X_data = X
WH = WH_safe_X.copy()
if self._beta_loss - 1.0 < 0:
WH[WH < EPSILON] = EPSILON
if self._beta_loss - 2.0 < 0:
WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON
if self._beta_loss == 1:
np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
elif self._beta_loss == 0:
WH_safe_X_data **= -1
WH_safe_X_data **= 2
WH_safe_X_data *= X_data
else:
WH_safe_X_data **= self._beta_loss - 2
WH_safe_X_data *= X_data
numerator = safe_sparse_dot(WH_safe_X, H.T)
if self._beta_loss == 1:
if H_sum is None:
H_sum = np.sum(H, axis=1)
denominator = H_sum[np.newaxis, :]
else:
if sp.issparse(X):
WHHt = np.empty(W.shape)
for i in range(X.shape[0]):
WHi = np.dot(W[i, :], H)
if self._beta_loss - 1 < 0:
WHi[WHi < EPSILON] = EPSILON
WHi **= self._beta_loss - 1
WHHt[i, :] = np.dot(WHi, H.T)
else:
WH **= self._beta_loss - 1
WHHt = np.dot(WH, H.T)
denominator = WHHt
if l1_reg_W > 0:
denominator += l1_reg_W
if l2_reg_W > 0:
denominator = denominator + l2_reg_W * W
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta_W = numerator
if self._gamma != 1:
delta_W **= self._gamma
W *= delta_W
(W, *_) = (W, H_sum, HHt, XHt)
</DeepExtract>
W_diff = linalg.norm(W - W_buffer) / linalg.norm(W)
if self.tol > 0 and W_diff <= self.tol:
break
W_buffer[:] = W
return W
|
def parseString(self, instring, parseAll=False):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explicitly expand the tabs in your input string before calling
C{parseString}
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamlined = True
self.strRepr = None
return self
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
(loc, tokens) = self._parse(instring, 0)
if parseAll:
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
loc = loc
se = Empty() + StringEnd()
se._parse(instring, loc)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
else:
return tokens
|
def parseString(self, instring, parseAll=False):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explicitly expand the tabs in your input string before calling
C{parseString}
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
<DeepExtract>
self.streamlined = True
self.strRepr = None
return self
</DeepExtract>
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
(loc, tokens) = self._parse(instring, 0)
if parseAll:
<DeepExtract>
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
loc = loc
</DeepExtract>
se = Empty() + StringEnd()
se._parse(instring, loc)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
else:
return tokens
|
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
score : float
Use approximate bound as score.
"""
check_is_fitted(self)
dtype = [np.float64, np.float32] if False else self.components_.dtype
X = self._validate_data(X, reset=False, accept_sparse='csr', dtype=dtype)
check_non_negative(X, 'LatentDirichletAllocation.score')
X = X
(doc_topic_distr, _) = self._e_step(X, cal_sstats=False, random_init=False)
doc_topic_distr = doc_topic_distr
def _loglikelihood(prior, distr, dirichlet_distr, size):
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
score = score
is_sparse_x = sp.issparse(X)
(n_samples, n_components) = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in range(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = dirichlet_doc_topic[idx_d, :, np.newaxis] + dirichlet_component_[:, ids]
norm_phi = logsumexp(temp, axis=0)
score += np.dot(cnts, norm_phi)
score += _loglikelihood(doc_topic_prior, doc_topic_distr, dirichlet_doc_topic, self.n_components)
if False:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
score += _loglikelihood(topic_word_prior, self.components_, dirichlet_component_, n_features)
score = score
return score
|
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
score : float
Use approximate bound as score.
"""
check_is_fitted(self)
<DeepExtract>
dtype = [np.float64, np.float32] if False else self.components_.dtype
X = self._validate_data(X, reset=False, accept_sparse='csr', dtype=dtype)
check_non_negative(X, 'LatentDirichletAllocation.score')
X = X
</DeepExtract>
<DeepExtract>
(doc_topic_distr, _) = self._e_step(X, cal_sstats=False, random_init=False)
doc_topic_distr = doc_topic_distr
</DeepExtract>
<DeepExtract>
def _loglikelihood(prior, distr, dirichlet_distr, size):
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
score = score
is_sparse_x = sp.issparse(X)
(n_samples, n_components) = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in range(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = dirichlet_doc_topic[idx_d, :, np.newaxis] + dirichlet_component_[:, ids]
norm_phi = logsumexp(temp, axis=0)
score += np.dot(cnts, norm_phi)
score += _loglikelihood(doc_topic_prior, doc_topic_distr, dirichlet_doc_topic, self.n_components)
if False:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
score += _loglikelihood(topic_word_prior, self.components_, dirichlet_component_, n_features)
score = score
</DeepExtract>
return score
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.