before
stringlengths 87
36.6k
| after
stringlengths 116
37.2k
|
|---|---|
def test_threshold(global_random_seed, global_dtype):
(X, y) = make_blobs(n_samples=80, centers=4, random_state=global_random_seed)
X = X.astype(global_dtype, copy=False)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
current_leaf = brc.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert 0.5 >= sc.radius
current_leaf = current_leaf.next_leaf_
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
current_leaf = brc.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert 5.0 >= sc.radius
current_leaf = current_leaf.next_leaf_
</DeepExtract>
|
def test_threshold(global_random_seed, global_dtype):
(X, y) = make_blobs(n_samples=80, centers=4, random_state=global_random_seed)
X = X.astype(global_dtype, copy=False)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
<DeepExtract>
current_leaf = brc.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert 0.5 >= sc.radius
current_leaf = current_leaf.next_leaf_
</DeepExtract>
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
<DeepExtract>
current_leaf = brc.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert 5.0 >= sc.radius
current_leaf = current_leaf.next_leaf_
</DeepExtract>
|
@fails_if_pypy
@pytest.mark.parametrize('data_id', [61, 1119, 40945])
def test_fetch_openml_consistency_parser(monkeypatch, data_id):
"""Check the consistency of the LIAC-ARFF and pandas parsers."""
pd = pytest.importorskip('pandas')
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
bunch_liac = fetch_openml(data_id=data_id, as_frame=True, cache=False, parser='liac-arff')
bunch_pandas = fetch_openml(data_id=data_id, as_frame=True, cache=False, parser='pandas')
(data_liac, data_pandas) = (bunch_liac.data, bunch_pandas.data)
def convert_numerical_dtypes(series):
pandas_series = data_pandas[series.name]
if pd.api.types.is_numeric_dtype(pandas_series):
return series.astype(pandas_series.dtype)
else:
return series
data_liac_with_fixed_dtypes = data_liac.apply(convert_numerical_dtypes)
pd.testing.assert_frame_equal(data_liac_with_fixed_dtypes, data_pandas)
(frame_liac, frame_pandas) = (bunch_liac.frame, bunch_pandas.frame)
pd.testing.assert_frame_equal(frame_pandas[bunch_pandas.feature_names], data_pandas)
def convert_numerical_and_categorical_dtypes(series):
pandas_series = frame_pandas[series.name]
if pd.api.types.is_numeric_dtype(pandas_series):
return series.astype(pandas_series.dtype)
elif pd.api.types.is_categorical_dtype(pandas_series):
return series.cat.rename_categories(pandas_series.cat.categories)
else:
return series
frame_liac_with_fixed_dtypes = frame_liac.apply(convert_numerical_and_categorical_dtypes)
pd.testing.assert_frame_equal(frame_liac_with_fixed_dtypes, frame_pandas)
|
@fails_if_pypy
@pytest.mark.parametrize('data_id', [61, 1119, 40945])
def test_fetch_openml_consistency_parser(monkeypatch, data_id):
"""Check the consistency of the LIAC-ARFF and pandas parsers."""
pd = pytest.importorskip('pandas')
<DeepExtract>
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
</DeepExtract>
bunch_liac = fetch_openml(data_id=data_id, as_frame=True, cache=False, parser='liac-arff')
bunch_pandas = fetch_openml(data_id=data_id, as_frame=True, cache=False, parser='pandas')
(data_liac, data_pandas) = (bunch_liac.data, bunch_pandas.data)
def convert_numerical_dtypes(series):
pandas_series = data_pandas[series.name]
if pd.api.types.is_numeric_dtype(pandas_series):
return series.astype(pandas_series.dtype)
else:
return series
data_liac_with_fixed_dtypes = data_liac.apply(convert_numerical_dtypes)
pd.testing.assert_frame_equal(data_liac_with_fixed_dtypes, data_pandas)
(frame_liac, frame_pandas) = (bunch_liac.frame, bunch_pandas.frame)
pd.testing.assert_frame_equal(frame_pandas[bunch_pandas.feature_names], data_pandas)
def convert_numerical_and_categorical_dtypes(series):
pandas_series = frame_pandas[series.name]
if pd.api.types.is_numeric_dtype(pandas_series):
return series.astype(pandas_series.dtype)
elif pd.api.types.is_categorical_dtype(pandas_series):
return series.cat.rename_categories(pandas_series.cat.categories)
else:
return series
frame_liac_with_fixed_dtypes = frame_liac.apply(convert_numerical_and_categorical_dtypes)
pd.testing.assert_frame_equal(frame_liac_with_fixed_dtypes, frame_pandas)
|
def check_fit_check_is_fitted(name, estimator_orig):
rng = np.random.RandomState(42)
estimator = clone(estimator_orig)
set_random_state(estimator)
if 'warm_start' in estimator.get_params():
estimator.set_params(warm_start=False)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
if '1darray' in _safe_tags(estimator, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator, key='pairwise'):
X = kernel(X, X)
X = X
if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
if not _safe_tags(estimator).get('stateless', False):
try:
check_is_fitted(estimator)
raise AssertionError(f'{estimator.__class__.__name__} passes check_is_fitted before being fit!')
except NotFittedError:
pass
estimator.fit(X, y)
try:
check_is_fitted(estimator)
except NotFittedError as e:
raise NotFittedError('Estimator fails to pass `check_is_fitted` even though it has been fit.') from e
|
def check_fit_check_is_fitted(name, estimator_orig):
rng = np.random.RandomState(42)
estimator = clone(estimator_orig)
set_random_state(estimator)
if 'warm_start' in estimator.get_params():
estimator.set_params(warm_start=False)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
<DeepExtract>
if '1darray' in _safe_tags(estimator, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator, key='pairwise'):
X = kernel(X, X)
X = X
</DeepExtract>
if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
<DeepExtract>
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
if not _safe_tags(estimator).get('stateless', False):
try:
check_is_fitted(estimator)
raise AssertionError(f'{estimator.__class__.__name__} passes check_is_fitted before being fit!')
except NotFittedError:
pass
estimator.fit(X, y)
try:
check_is_fitted(estimator)
except NotFittedError as e:
raise NotFittedError('Estimator fails to pass `check_is_fitted` even though it has been fit.') from e
|
def fit_transform(X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
"""
self._validate_params()
X = self._validate_data(X, accept_sparse=('csr', 'csc'), dtype=[np.float64, np.float32])
with config_context(assume_finite=True):
check_non_negative(X, 'NMF (input X)')
self._check_params(X)
if X.min() == 0 and self._beta_loss <= 0:
raise ValueError('When beta_loss <= 0 and X contains zeros, the solver may diverge. Please add small values to X, or use a positive beta_loss.')
(W, H) = self._check_w_h(X, W, H, update_H)
(l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H) = self._compute_regularization(X)
if self.solver == 'cd':
(W, H, n_iter) = _fit_coordinate_descent(X, W, H, self.tol, self.max_iter, l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H, update_H=update_H, verbose=self.verbose, shuffle=self.shuffle, random_state=self.random_state)
elif self.solver == 'mu':
(W, H, n_iter, *_) = _fit_multiplicative_update(X, W, H, self._beta_loss, self.max_iter, self.tol, l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H, update_H, self.verbose)
else:
raise ValueError("Invalid solver parameter '%s'." % self.solver)
if n_iter == self.max_iter and self.tol > 0:
warnings.warn('Maximum number of iterations %d reached. Increase it to improve convergence.' % self.max_iter, ConvergenceWarning)
(W, H, n_iter) = (W, H, n_iter)
self._beta_loss = _beta_loss_to_float(self._beta_loss)
if not sp.issparse(X):
X = np.atleast_2d(X)
W = np.atleast_2d(W)
H = np.atleast_2d(H)
if self._beta_loss == 2:
if sp.issparse(X):
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H)
cross_prod = trace_dot(X * H.T, W)
res = (norm_X + norm_WH - 2.0 * cross_prod) / 2.0
else:
res = squared_norm(X - np.dot(W, H)) / 2.0
if True:
self.reconstruction_err_ = np.sqrt(res * 2)
else:
self.reconstruction_err_ = res
if sp.issparse(X):
WH_data = _special_sparse_dot(W, H, X).data
X_data = X.data
else:
WH = np.dot(W, H)
WH_data = WH.ravel()
X_data = X.ravel()
indices = X_data > EPSILON
WH_data = WH_data[indices]
X_data = X_data[indices]
WH_data[WH_data < EPSILON] = EPSILON
if self._beta_loss == 1:
sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
div = X_data / WH_data
res = np.dot(X_data, np.log(div))
res += sum_WH - X_data.sum()
elif self._beta_loss == 0:
div = X_data / WH_data
res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div))
else:
if sp.issparse(X):
sum_WH_beta = 0
for i in range(X.shape[1]):
sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** self._beta_loss)
else:
sum_WH_beta = np.sum(WH ** self._beta_loss)
sum_X_WH = np.dot(X_data, WH_data ** (self._beta_loss - 1))
res = (X_data ** self._beta_loss).sum() - self._beta_loss * sum_X_WH
res += sum_WH_beta * (self._beta_loss - 1)
res /= self._beta_loss * (self._beta_loss - 1)
if True:
res = max(res, 0)
self.reconstruction_err_ = np.sqrt(2 * res)
else:
self.reconstruction_err_ = res
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter
return W
|
def fit_transform(X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
"""
self._validate_params()
X = self._validate_data(X, accept_sparse=('csr', 'csc'), dtype=[np.float64, np.float32])
with config_context(assume_finite=True):
<DeepExtract>
check_non_negative(X, 'NMF (input X)')
self._check_params(X)
if X.min() == 0 and self._beta_loss <= 0:
raise ValueError('When beta_loss <= 0 and X contains zeros, the solver may diverge. Please add small values to X, or use a positive beta_loss.')
(W, H) = self._check_w_h(X, W, H, update_H)
(l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H) = self._compute_regularization(X)
if self.solver == 'cd':
(W, H, n_iter) = _fit_coordinate_descent(X, W, H, self.tol, self.max_iter, l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H, update_H=update_H, verbose=self.verbose, shuffle=self.shuffle, random_state=self.random_state)
elif self.solver == 'mu':
(W, H, n_iter, *_) = _fit_multiplicative_update(X, W, H, self._beta_loss, self.max_iter, self.tol, l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H, update_H, self.verbose)
else:
raise ValueError("Invalid solver parameter '%s'." % self.solver)
if n_iter == self.max_iter and self.tol > 0:
warnings.warn('Maximum number of iterations %d reached. Increase it to improve convergence.' % self.max_iter, ConvergenceWarning)
(W, H, n_iter) = (W, H, n_iter)
</DeepExtract>
<DeepExtract>
self._beta_loss = _beta_loss_to_float(self._beta_loss)
if not sp.issparse(X):
X = np.atleast_2d(X)
W = np.atleast_2d(W)
H = np.atleast_2d(H)
if self._beta_loss == 2:
if sp.issparse(X):
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H)
cross_prod = trace_dot(X * H.T, W)
res = (norm_X + norm_WH - 2.0 * cross_prod) / 2.0
else:
res = squared_norm(X - np.dot(W, H)) / 2.0
if True:
self.reconstruction_err_ = np.sqrt(res * 2)
else:
self.reconstruction_err_ = res
if sp.issparse(X):
WH_data = _special_sparse_dot(W, H, X).data
X_data = X.data
else:
WH = np.dot(W, H)
WH_data = WH.ravel()
X_data = X.ravel()
indices = X_data > EPSILON
WH_data = WH_data[indices]
X_data = X_data[indices]
WH_data[WH_data < EPSILON] = EPSILON
if self._beta_loss == 1:
sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
div = X_data / WH_data
res = np.dot(X_data, np.log(div))
res += sum_WH - X_data.sum()
elif self._beta_loss == 0:
div = X_data / WH_data
res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div))
else:
if sp.issparse(X):
sum_WH_beta = 0
for i in range(X.shape[1]):
sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** self._beta_loss)
else:
sum_WH_beta = np.sum(WH ** self._beta_loss)
sum_X_WH = np.dot(X_data, WH_data ** (self._beta_loss - 1))
res = (X_data ** self._beta_loss).sum() - self._beta_loss * sum_X_WH
res += sum_WH_beta * (self._beta_loss - 1)
res /= self._beta_loss * (self._beta_loss - 1)
if True:
res = max(res, 0)
self.reconstruction_err_ = np.sqrt(2 * res)
else:
self.reconstruction_err_ = res
</DeepExtract>
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter
return W
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
self.length_scale = np.squeeze(self.length_scale).astype(float)
if np.ndim(self.length_scale) > 1:
raise ValueError('length_scale cannot be of dimension greater than 1')
if np.ndim(self.length_scale) == 1 and X.shape[1] != self.length_scale.shape[0]:
raise ValueError('Anisotropic kernel must have the same number of dimensions as data (%d!=%d)' % (self.length_scale.shape[0], X.shape[1]))
self.length_scale = self.length_scale
if Y is None:
dists = pdist(X / length_scale, metric='sqeuclidean')
K = np.exp(-0.5 * dists)
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError('Gradient can only be evaluated when Y is None.')
dists = cdist(X / length_scale, Y / length_scale, metric='sqeuclidean')
K = np.exp(-0.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
return (K, np.empty((X.shape[0], X.shape[0], 0)))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = (K * squareform(dists))[:, :, np.newaxis]
return (K, K_gradient)
elif self.anisotropic:
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / length_scale ** 2
K_gradient *= K[..., np.newaxis]
return (K, K_gradient)
else:
return K
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
<DeepExtract>
self.length_scale = np.squeeze(self.length_scale).astype(float)
if np.ndim(self.length_scale) > 1:
raise ValueError('length_scale cannot be of dimension greater than 1')
if np.ndim(self.length_scale) == 1 and X.shape[1] != self.length_scale.shape[0]:
raise ValueError('Anisotropic kernel must have the same number of dimensions as data (%d!=%d)' % (self.length_scale.shape[0], X.shape[1]))
self.length_scale = self.length_scale
</DeepExtract>
if Y is None:
dists = pdist(X / length_scale, metric='sqeuclidean')
K = np.exp(-0.5 * dists)
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError('Gradient can only be evaluated when Y is None.')
dists = cdist(X / length_scale, Y / length_scale, metric='sqeuclidean')
K = np.exp(-0.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
return (K, np.empty((X.shape[0], X.shape[0], 0)))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = (K * squareform(dists))[:, :, np.newaxis]
return (K, K_gradient)
elif self.anisotropic:
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / length_scale ** 2
K_gradient *= K[..., np.newaxis]
return (K, K_gradient)
else:
return K
|
def fit(self, X, y=None, sample_weight=None):
"""Compute knot positions of splines.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default = None
Individual weights for each sample. Used to calculate quantiles if
`knots="quantile"`. For `knots="uniform"`, zero weighted
observations are ignored for finding the min and max of `X`.
Returns
-------
self : object
Fitted transformer.
"""
self._validate_params()
X = self._validate_data(X, reset=True, accept_sparse=False, ensure_min_samples=2, ensure_2d=True)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
(_, n_features) = X.shape
if isinstance(self.knots, str):
if self.knots == 'quantile':
percentiles = 100 * np.linspace(start=0, stop=1, num=self.n_knots, dtype=np.float64)
if sample_weight is None:
self.knots = np.percentile(X, percentiles, axis=0)
else:
self.knots = np.array([_weighted_percentile(X, sample_weight, percentile) for percentile in percentiles])
else:
mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0
x_min = np.amin(X[mask], axis=0)
x_max = np.amax(X[mask], axis=0)
self.knots = np.linspace(start=x_min, stop=x_max, num=self.n_knots, endpoint=True, dtype=np.float64)
base_knots = self.knots
else:
base_knots = check_array(self.knots, dtype=np.float64)
if base_knots.shape[0] < 2:
raise ValueError('Number of knots, knots.shape[0], must be >= 2.')
elif base_knots.shape[1] != n_features:
raise ValueError('knots.shape[1] == n_features is violated.')
elif not np.all(np.diff(base_knots, axis=0) > 0):
raise ValueError('knots must be sorted without duplicates.')
n_knots = base_knots.shape[0]
if self.extrapolation == 'periodic' and n_knots <= self.degree:
raise ValueError(f'Periodic splines require degree < n_knots. Got n_knots={n_knots} and degree={self.degree}.')
if self.extrapolation != 'periodic':
n_splines = n_knots + self.degree - 1
else:
n_splines = n_knots - 1
degree = self.degree
n_out = n_features * n_splines
if self.extrapolation == 'periodic':
period = base_knots[-1] - base_knots[0]
knots = np.r_[base_knots[-(degree + 1):-1] - period, base_knots, base_knots[1:degree + 1] + period]
else:
dist_min = base_knots[1] - base_knots[0]
dist_max = base_knots[-1] - base_knots[-2]
knots = np.r_[np.linspace(base_knots[0] - degree * dist_min, base_knots[0] - dist_min, num=degree), base_knots, np.linspace(base_knots[-1] + dist_max, base_knots[-1] + degree * dist_max, num=degree)]
coef = np.eye(n_splines, dtype=np.float64)
if self.extrapolation == 'periodic':
coef = np.concatenate((coef, coef[:degree, :]))
extrapolate = self.extrapolation in ['periodic', 'continue']
bsplines = [BSpline.construct_fast(knots[:, i], coef, self.degree, extrapolate=extrapolate) for i in range(n_features)]
self.bsplines_ = bsplines
self.n_features_out_ = n_out - n_features * (1 - self.include_bias)
return self
|
def fit(self, X, y=None, sample_weight=None):
"""Compute knot positions of splines.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default = None
Individual weights for each sample. Used to calculate quantiles if
`knots="quantile"`. For `knots="uniform"`, zero weighted
observations are ignored for finding the min and max of `X`.
Returns
-------
self : object
Fitted transformer.
"""
self._validate_params()
X = self._validate_data(X, reset=True, accept_sparse=False, ensure_min_samples=2, ensure_2d=True)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
(_, n_features) = X.shape
if isinstance(self.knots, str):
<DeepExtract>
if self.knots == 'quantile':
percentiles = 100 * np.linspace(start=0, stop=1, num=self.n_knots, dtype=np.float64)
if sample_weight is None:
self.knots = np.percentile(X, percentiles, axis=0)
else:
self.knots = np.array([_weighted_percentile(X, sample_weight, percentile) for percentile in percentiles])
else:
mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0
x_min = np.amin(X[mask], axis=0)
x_max = np.amax(X[mask], axis=0)
self.knots = np.linspace(start=x_min, stop=x_max, num=self.n_knots, endpoint=True, dtype=np.float64)
base_knots = self.knots
</DeepExtract>
else:
base_knots = check_array(self.knots, dtype=np.float64)
if base_knots.shape[0] < 2:
raise ValueError('Number of knots, knots.shape[0], must be >= 2.')
elif base_knots.shape[1] != n_features:
raise ValueError('knots.shape[1] == n_features is violated.')
elif not np.all(np.diff(base_knots, axis=0) > 0):
raise ValueError('knots must be sorted without duplicates.')
n_knots = base_knots.shape[0]
if self.extrapolation == 'periodic' and n_knots <= self.degree:
raise ValueError(f'Periodic splines require degree < n_knots. Got n_knots={n_knots} and degree={self.degree}.')
if self.extrapolation != 'periodic':
n_splines = n_knots + self.degree - 1
else:
n_splines = n_knots - 1
degree = self.degree
n_out = n_features * n_splines
if self.extrapolation == 'periodic':
period = base_knots[-1] - base_knots[0]
knots = np.r_[base_knots[-(degree + 1):-1] - period, base_knots, base_knots[1:degree + 1] + period]
else:
dist_min = base_knots[1] - base_knots[0]
dist_max = base_knots[-1] - base_knots[-2]
knots = np.r_[np.linspace(base_knots[0] - degree * dist_min, base_knots[0] - dist_min, num=degree), base_knots, np.linspace(base_knots[-1] + dist_max, base_knots[-1] + degree * dist_max, num=degree)]
coef = np.eye(n_splines, dtype=np.float64)
if self.extrapolation == 'periodic':
coef = np.concatenate((coef, coef[:degree, :]))
extrapolate = self.extrapolation in ['periodic', 'continue']
bsplines = [BSpline.construct_fast(knots[:, i], coef, self.degree, extrapolate=extrapolate) for i in range(n_features)]
self.bsplines_ = bsplines
self.n_features_out_ = n_out - n_features * (1 - self.include_bias)
return self
|
def _fit_binary(X, y, alpha, C, sample_weight, learning_rate, max_iter):
"""Fit a binary classifier on X and y."""
(y_i, coef, intercept, average_coef, average_intercept) = _prepare_fit_binary(self, y, 1, input_dtye=X.dtype)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
self.random_state = check_random_state(self.random_state)
(dataset, intercept_decay) = make_dataset(X, y_i, sample_weight, random_state=self.random_state)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if validation_mask is None:
validation_mask = self._make_validation_split(y_i, sample_mask=sample_weight > 0)
classes = np.array([-1, 1], dtype=y_i.dtype)
validation_score_cb = self._make_validation_score_cb(validation_mask, X, y_i, sample_weight, classes=classes)
seed = self.random_state.randint(MAX_INT)
tol = self.tol if self.tol is not None else -np.inf
_plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype)
(coef, intercept, average_coef, average_intercept, n_iter_) = _plain_sgd(coef, intercept, average_coef, average_intercept, self.loss_function_, penalty_type, alpha, C, self.l1_ratio, dataset, validation_mask, self.early_stopping, validation_score_cb, int(self.n_iter_no_change), max_iter, tol, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, self._expanded_class_weight[1], self._expanded_class_weight[0], learning_rate_type, self.eta0, self.power_t, 0, self.t_, intercept_decay, self.average)
if self.average:
if len(self.classes_) == 2:
self._average_intercept[0] = average_intercept
else:
self._average_intercept[1] = average_intercept
(coef, intercept, n_iter_) = (coef, intercept, n_iter_)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self._average_coef.reshape(1, -1)
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef.reshape(1, -1)
self._standard_intercept = np.atleast_1d(intercept)
self.intercept_ = self._standard_intercept
else:
self.coef_ = coef.reshape(1, -1)
self.intercept_ = np.atleast_1d(intercept)
|
def _fit_binary(X, y, alpha, C, sample_weight, learning_rate, max_iter):
"""Fit a binary classifier on X and y."""
<DeepExtract>
(y_i, coef, intercept, average_coef, average_intercept) = _prepare_fit_binary(self, y, 1, input_dtye=X.dtype)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
self.random_state = check_random_state(self.random_state)
(dataset, intercept_decay) = make_dataset(X, y_i, sample_weight, random_state=self.random_state)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if validation_mask is None:
validation_mask = self._make_validation_split(y_i, sample_mask=sample_weight > 0)
classes = np.array([-1, 1], dtype=y_i.dtype)
validation_score_cb = self._make_validation_score_cb(validation_mask, X, y_i, sample_weight, classes=classes)
seed = self.random_state.randint(MAX_INT)
tol = self.tol if self.tol is not None else -np.inf
_plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype)
(coef, intercept, average_coef, average_intercept, n_iter_) = _plain_sgd(coef, intercept, average_coef, average_intercept, self.loss_function_, penalty_type, alpha, C, self.l1_ratio, dataset, validation_mask, self.early_stopping, validation_score_cb, int(self.n_iter_no_change), max_iter, tol, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, self._expanded_class_weight[1], self._expanded_class_weight[0], learning_rate_type, self.eta0, self.power_t, 0, self.t_, intercept_decay, self.average)
if self.average:
if len(self.classes_) == 2:
self._average_intercept[0] = average_intercept
else:
self._average_intercept[1] = average_intercept
(coef, intercept, n_iter_) = (coef, intercept, n_iter_)
</DeepExtract>
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self._average_coef.reshape(1, -1)
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef.reshape(1, -1)
self._standard_intercept = np.atleast_1d(intercept)
self.intercept_ = self._standard_intercept
else:
self.coef_ = coef.reshape(1, -1)
self.intercept_ = np.atleast_1d(intercept)
|
@fails_if_pypy
@pytest.mark.filterwarnings('ignore:Version 1 of dataset Australian is inactive')
@pytest.mark.parametrize('data_id, data_type', [(61, 'dataframe'), (292, 'sparse')])
def test_fetch_openml_auto_mode(monkeypatch, data_id, data_type):
"""Check the auto mode of `fetch_openml`."""
pd = pytest.importorskip('pandas')
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
data = fetch_openml(data_id=data_id, as_frame='auto', parser='auto', cache=False)
klass = pd.DataFrame if data_type == 'dataframe' else scipy.sparse.csr_matrix
assert isinstance(data.data, klass)
|
@fails_if_pypy
@pytest.mark.filterwarnings('ignore:Version 1 of dataset Australian is inactive')
@pytest.mark.parametrize('data_id, data_type', [(61, 'dataframe'), (292, 'sparse')])
def test_fetch_openml_auto_mode(monkeypatch, data_id, data_type):
"""Check the auto mode of `fetch_openml`."""
pd = pytest.importorskip('pandas')
<DeepExtract>
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
</DeepExtract>
data = fetch_openml(data_id=data_id, as_frame='auto', parser='auto', cache=False)
klass = pd.DataFrame if data_type == 'dataframe' else scipy.sparse.csr_matrix
assert isinstance(data.data, klass)
|
@fails_if_pypy
@pytest.mark.parametrize('data_id', [61, 2, 561, 40589, 1119])
@pytest.mark.parametrize('parser', ['liac-arff', 'pandas'])
def test_fetch_openml_equivalence_frame_return_X_y(monkeypatch, data_id, parser):
"""Check the behaviour of `return_X_y=True` when `as_frame=True`."""
pd = pytest.importorskip('pandas')
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False, return_X_y=False, parser=parser)
(X, y) = fetch_openml(data_id=data_id, as_frame=True, cache=False, return_X_y=True, parser=parser)
pd.testing.assert_frame_equal(bunch.data, X)
if isinstance(y, pd.Series):
pd.testing.assert_series_equal(bunch.target, y)
else:
pd.testing.assert_frame_equal(bunch.target, y)
|
@fails_if_pypy
@pytest.mark.parametrize('data_id', [61, 2, 561, 40589, 1119])
@pytest.mark.parametrize('parser', ['liac-arff', 'pandas'])
def test_fetch_openml_equivalence_frame_return_X_y(monkeypatch, data_id, parser):
"""Check the behaviour of `return_X_y=True` when `as_frame=True`."""
pd = pytest.importorskip('pandas')
<DeepExtract>
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
</DeepExtract>
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False, return_X_y=False, parser=parser)
(X, y) = fetch_openml(data_id=data_id, as_frame=True, cache=False, return_X_y=True, parser=parser)
pd.testing.assert_frame_equal(bunch.data, X)
if isinstance(y, pd.Series):
pd.testing.assert_series_equal(bunch.target, y)
else:
pd.testing.assert_frame_equal(bunch.target, y)
|
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs such arrays if n_outputs > 1
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
proba = proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
proba = all_proba
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
|
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs such arrays if n_outputs > 1
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
<DeepExtract>
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
proba = proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
proba = all_proba
</DeepExtract>
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
|
def test_lda_score_perplexity():
n_components = 3
block = np.full((3, 3), n_components, dtype=int)
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
(n_components, X) = (n_components, X)
lda = LatentDirichletAllocation(n_components=n_components, max_iter=10, random_state=0)
lda.fit(X)
perplexity_1 = lda.perplexity(X, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1.0 * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
|
def test_lda_score_perplexity():
<DeepExtract>
n_components = 3
block = np.full((3, 3), n_components, dtype=int)
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
(n_components, X) = (n_components, X)
</DeepExtract>
lda = LatentDirichletAllocation(n_components=n_components, max_iter=10, random_state=0)
lda.fit(X)
perplexity_1 = lda.perplexity(X, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1.0 * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
|
def _init_centroids(self, X, x_squared_norms, init, random_state, init_size=None, n_centroids=None, sample_weight=None):
"""Compute the initial centroids.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point. Pass it if you have it
at hands already to avoid it being recomputed here.
init : {'k-means++', 'random'}, callable or ndarray of shape (n_clusters, n_features)
Method for initialization.
random_state : RandomState instance
Determines random number generation for centroid initialization.
See :term:`Glossary <random_state>`.
init_size : int, default=None
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy).
n_centroids : int, default=None
Number of centroids to initialize.
If left to 'None' the number of centroids will be equal to
number of clusters to form (self.n_clusters)
sample_weight : ndarray of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
"""
n_samples = X.shape[0]
n_clusters = self.n_clusters if n_centroids is None else n_centroids
if init_size is not None and init_size < n_samples:
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
sample_weight = sample_weight[init_indices]
if isinstance(init, str) and init == 'k-means++':
(n_samples, n_features) = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
if n_local_trials is None:
n_local_trials = 2 + int(np.log(n_clusters))
center_id = random_state.choice(n_samples, p=sample_weight / sample_weight.sum())
indices = np.full(n_clusters, -1, dtype=int)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
indices[0] = center_id
closest_dist_sq = _euclidean_distances(centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms, squared=True)
current_pot = closest_dist_sq @ sample_weight
for c in range(1, n_clusters):
rand_vals = random_state.uniform(size=n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(sample_weight * closest_dist_sq), rand_vals)
np.clip(candidate_ids, None, closest_dist_sq.size - 1, out=candidate_ids)
distance_to_candidates = _euclidean_distances(X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
np.minimum(closest_dist_sq, distance_to_candidates, out=distance_to_candidates)
candidates_pot = distance_to_candidates @ sample_weight.reshape(-1, 1)
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
indices[c] = best_candidate
(centers, _) = (centers, indices)
elif isinstance(init, str) and init == 'random':
seeds = random_state.choice(n_samples, size=n_clusters, replace=False, p=sample_weight / sample_weight.sum())
centers = X[seeds]
elif _is_arraylike_not_scalar(self.init):
centers = init
elif callable(init):
centers = init(X, n_clusters, random_state=random_state)
centers = check_array(centers, dtype=X.dtype, copy=False, order='C')
if centers.shape[0] != self.n_clusters:
raise ValueError(f'The shape of the initial centers {centers.shape} does not match the number of clusters {self.n_clusters}.')
if centers.shape[1] != X.shape[1]:
raise ValueError(f'The shape of the initial centers {centers.shape} does not match the number of features of the data {X.shape[1]}.')
if sp.issparse(centers):
centers = centers.toarray()
return centers
|
def _init_centroids(self, X, x_squared_norms, init, random_state, init_size=None, n_centroids=None, sample_weight=None):
"""Compute the initial centroids.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point. Pass it if you have it
at hands already to avoid it being recomputed here.
init : {'k-means++', 'random'}, callable or ndarray of shape (n_clusters, n_features)
Method for initialization.
random_state : RandomState instance
Determines random number generation for centroid initialization.
See :term:`Glossary <random_state>`.
init_size : int, default=None
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy).
n_centroids : int, default=None
Number of centroids to initialize.
If left to 'None' the number of centroids will be equal to
number of clusters to form (self.n_clusters)
sample_weight : ndarray of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
"""
n_samples = X.shape[0]
n_clusters = self.n_clusters if n_centroids is None else n_centroids
if init_size is not None and init_size < n_samples:
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
sample_weight = sample_weight[init_indices]
if isinstance(init, str) and init == 'k-means++':
<DeepExtract>
(n_samples, n_features) = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
if n_local_trials is None:
n_local_trials = 2 + int(np.log(n_clusters))
center_id = random_state.choice(n_samples, p=sample_weight / sample_weight.sum())
indices = np.full(n_clusters, -1, dtype=int)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
indices[0] = center_id
closest_dist_sq = _euclidean_distances(centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms, squared=True)
current_pot = closest_dist_sq @ sample_weight
for c in range(1, n_clusters):
rand_vals = random_state.uniform(size=n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(sample_weight * closest_dist_sq), rand_vals)
np.clip(candidate_ids, None, closest_dist_sq.size - 1, out=candidate_ids)
distance_to_candidates = _euclidean_distances(X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
np.minimum(closest_dist_sq, distance_to_candidates, out=distance_to_candidates)
candidates_pot = distance_to_candidates @ sample_weight.reshape(-1, 1)
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
indices[c] = best_candidate
(centers, _) = (centers, indices)
</DeepExtract>
elif isinstance(init, str) and init == 'random':
seeds = random_state.choice(n_samples, size=n_clusters, replace=False, p=sample_weight / sample_weight.sum())
centers = X[seeds]
elif _is_arraylike_not_scalar(self.init):
centers = init
elif callable(init):
centers = init(X, n_clusters, random_state=random_state)
centers = check_array(centers, dtype=X.dtype, copy=False, order='C')
<DeepExtract>
if centers.shape[0] != self.n_clusters:
raise ValueError(f'The shape of the initial centers {centers.shape} does not match the number of clusters {self.n_clusters}.')
if centers.shape[1] != X.shape[1]:
raise ValueError(f'The shape of the initial centers {centers.shape} does not match the number of features of the data {X.shape[1]}.')
</DeepExtract>
if sp.issparse(centers):
centers = centers.toarray()
return centers
|
def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamlined = True
self.strRepr = None
return self
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn(instring, loc)
(nextLoc, tokens) = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = preloc + 1
else:
if nextLoc > loc:
matches += 1
yield (tokens, preloc, nextLoc)
if overlap:
nextloc = preparseFn(instring, loc)
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc + 1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
|
def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
<DeepExtract>
self.streamlined = True
self.strRepr = None
return self
</DeepExtract>
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn(instring, loc)
(nextLoc, tokens) = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = preloc + 1
else:
if nextLoc > loc:
matches += 1
yield (tokens, preloc, nextLoc)
if overlap:
nextloc = preparseFn(instring, loc)
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc + 1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
|
def test_huber_max_iter():
rng = np.random.RandomState(0)
(X, y) = make_regression(n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05)
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
(X, y) = (X, y)
huber = HuberRegressor(max_iter=1)
huber.fit(X, y)
assert huber.n_iter_ == huber.max_iter
|
def test_huber_max_iter():
<DeepExtract>
rng = np.random.RandomState(0)
(X, y) = make_regression(n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05)
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
(X, y) = (X, y)
</DeepExtract>
huber = HuberRegressor(max_iter=1)
huber.fit(X, y)
assert huber.n_iter_ == huber.max_iter
|
@ignore_warnings(category=FutureWarning)
def check_fit2d_predict1d(name, estimator_orig):
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
if '1darray' in _safe_tags(estimator_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator_orig, key='pairwise'):
X = kernel(X, X)
X = X
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
if hasattr(estimator, 'n_components'):
estimator.n_components = 1
if hasattr(estimator, 'n_clusters'):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ['predict', 'transform', 'decision_function', 'predict_proba']:
if hasattr(estimator, method):
assert_raise_message(ValueError, 'Reshape your data', getattr(estimator, method), X[0])
|
@ignore_warnings(category=FutureWarning)
def check_fit2d_predict1d(name, estimator_orig):
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
<DeepExtract>
if '1darray' in _safe_tags(estimator_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator_orig, key='pairwise'):
X = kernel(X, X)
X = X
</DeepExtract>
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
<DeepExtract>
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
if hasattr(estimator, 'n_components'):
estimator.n_components = 1
if hasattr(estimator, 'n_clusters'):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ['predict', 'transform', 'decision_function', 'predict_proba']:
if hasattr(estimator, method):
assert_raise_message(ValueError, 'Reshape your data', getattr(estimator, method), X[0])
|
def score(self, X, y, sample_weight=None):
"""Compute D^2, the percentage of deviance explained.
D^2 is a generalization of the coefficient of determination R^2.
R^2 uses squared error and D^2 uses the deviance of this GLM, see the
:ref:`User Guide <regression_metrics>`.
D^2 is defined as
:math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`,
:math:`D_{null}` is the null deviance, i.e. the deviance of a model
with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`.
The mean :math:`\\bar{y}` is averaged by sample_weight.
Best possible score is 1.0 and it can be negative (because the model
can be arbitrarily worse).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True values of target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
D^2 of self.predict(X) w.r.t. y.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=['csr', 'csc', 'coo'], dtype=[np.float64, np.float32], ensure_2d=True, allow_nd=False, reset=False)
raw_prediction = X @ self.coef_ + self.intercept_
y = check_array(y, dtype=raw_prediction.dtype, order='C', ensure_2d=False)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype)
base_loss = self._base_loss
if not base_loss.in_y_true_range(y):
raise ValueError(f'Some value(s) of y are out of the valid range of the loss {base_loss.__name__}.')
constant = np.mean(base_loss.constant_to_optimal_zero(y_true=y))
if sample_weight is not None:
constant *= sample_weight.shape[0] / np.sum(sample_weight)
deviance = base_loss(y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=1)
y_mean = base_loss.link.link(np.average(y, weights=sample_weight))
deviance_null = base_loss(y_true=y, raw_prediction=np.tile(y_mean, y.shape[0]), sample_weight=sample_weight, n_threads=1)
return 1 - (deviance + constant) / (deviance_null + constant)
|
def score(self, X, y, sample_weight=None):
"""Compute D^2, the percentage of deviance explained.
D^2 is a generalization of the coefficient of determination R^2.
R^2 uses squared error and D^2 uses the deviance of this GLM, see the
:ref:`User Guide <regression_metrics>`.
D^2 is defined as
:math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`,
:math:`D_{null}` is the null deviance, i.e. the deviance of a model
with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`.
The mean :math:`\\bar{y}` is averaged by sample_weight.
Best possible score is 1.0 and it can be negative (because the model
can be arbitrarily worse).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True values of target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
D^2 of self.predict(X) w.r.t. y.
"""
<DeepExtract>
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=['csr', 'csc', 'coo'], dtype=[np.float64, np.float32], ensure_2d=True, allow_nd=False, reset=False)
raw_prediction = X @ self.coef_ + self.intercept_
</DeepExtract>
y = check_array(y, dtype=raw_prediction.dtype, order='C', ensure_2d=False)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype)
base_loss = self._base_loss
if not base_loss.in_y_true_range(y):
raise ValueError(f'Some value(s) of y are out of the valid range of the loss {base_loss.__name__}.')
constant = np.mean(base_loss.constant_to_optimal_zero(y_true=y))
if sample_weight is not None:
constant *= sample_weight.shape[0] / np.sum(sample_weight)
deviance = base_loss(y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=1)
y_mean = base_loss.link.link(np.average(y, weights=sample_weight))
deviance_null = base_loss(y_true=y, raw_prediction=np.tile(y_mean, y.shape[0]), sample_weight=sample_weight, n_threads=1)
return 1 - (deviance + constant) / (deviance_null + constant)
|
def fit(self, X, y, sample_weight=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,) default=None
Weights of training data.
.. versionadded:: 0.23
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
fit_start_time = time()
acc_find_split_time = 0.0
acc_apply_split_time = 0.0
acc_compute_hist_time = 0.0
acc_prediction_time = 0.0
(X, y) = self._validate_data(X, y, dtype=[X_DTYPE], force_all_finite=False)
pass
check_consistent_length(X, y)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64)
self._fitted_with_sw = True
sample_weight = sample_weight
rng = check_random_state(self.random_state)
if not (self.warm_start and self._is_fitted()):
self._random_seed = rng.randint(np.iinfo(np.uint32).max, dtype='u8')
if self.monotonic_cst is not None and self.n_trees_per_iteration_ != 1:
raise ValueError('monotonic constraints are not supported for multiclass classification.')
monotonic_cst = _check_monotonic_cst(self, self.monotonic_cst)
(n_samples, self._n_features) = X.shape
if self.categorical_features is None:
(self.is_categorical_, known_categories) = (None, None)
categorical_features = np.asarray(self.categorical_features)
if categorical_features.size == 0:
(self.is_categorical_, known_categories) = (None, None)
if categorical_features.dtype.kind not in ('i', 'b', 'U', 'O'):
raise ValueError(f'categorical_features must be an array-like of bool, int or str, got: {categorical_features.dtype.name}.')
if categorical_features.dtype.kind == 'O':
types = set((type(f) for f in categorical_features))
if types != {str}:
raise ValueError(f"categorical_features must be an array-like of bool, int or str, got: {', '.join(sorted((t.__name__ for t in types)))}.")
n_features = X.shape[1]
if categorical_features.dtype.kind in ('U', 'O'):
if not hasattr(self, 'feature_names_in_'):
raise ValueError('categorical_features should be passed as an array of integers or as a boolean mask when the model is fitted on data without feature names.')
is_categorical = np.zeros(n_features, dtype=bool)
feature_names = self.feature_names_in_.tolist()
for feature_name in categorical_features:
try:
is_categorical[feature_names.index(feature_name)] = True
except ValueError as e:
raise ValueError(f"categorical_features has a item value '{feature_name}' which is not a valid feature name of the training data. Observed feature names: {feature_names}") from e
elif categorical_features.dtype.kind == 'i':
if np.max(categorical_features) >= n_features or np.min(categorical_features) < 0:
raise ValueError('categorical_features set as integer indices must be in [0, n_features - 1]')
is_categorical = np.zeros(n_features, dtype=bool)
is_categorical[categorical_features] = True
else:
if categorical_features.shape[0] != n_features:
raise ValueError(f'categorical_features set as a boolean mask must have shape (n_features,), got: {categorical_features.shape}')
is_categorical = categorical_features
if not np.any(is_categorical):
(self.is_categorical_, known_categories) = (None, None)
known_categories = []
for f_idx in range(n_features):
if is_categorical[f_idx]:
categories = np.unique(X[:, f_idx])
missing = np.isnan(categories)
if missing.any():
categories = categories[~missing]
if hasattr(self, 'feature_names_in_'):
feature_name = f"'{self.feature_names_in_[f_idx]}'"
else:
feature_name = f'at index {f_idx}'
if categories.size > self.max_bins:
raise ValueError(f'Categorical feature {feature_name} is expected to have a cardinality <= {self.max_bins} but actually has a cardinality of {categories.size}.')
if (categories >= self.max_bins).any():
raise ValueError(f'Categorical feature {feature_name} is expected to be encoded with values < {self.max_bins} but the largest value for the encoded categories is {categories.max()}.')
else:
categories = None
known_categories.append(categories)
(self.is_categorical_, known_categories) = (is_categorical, known_categories)
if self.interaction_cst is None:
interaction_cst = None
if self.interaction_cst == 'no_interactions':
interaction_cst = [[i] for i in range(self._n_features)]
elif self.interaction_cst == 'pairwise':
interaction_cst = itertools.combinations(range(self._n_features), 2)
else:
interaction_cst = self.interaction_cst
try:
constraints = [set(group) for group in interaction_cst]
except TypeError:
raise ValueError(f'Interaction constraints must be a sequence of tuples or lists, got: {self.interaction_cst!r}.')
for group in constraints:
for x in group:
if not (isinstance(x, Integral) and 0 <= x < self._n_features):
raise ValueError(f'Interaction constraints must consist of integer indices in [0, n_features - 1] = [0, {self._n_features - 1}], specifying the position of features, got invalid indices: {group!r}')
rest = set(range(self._n_features)) - set().union(*constraints)
if len(rest) > 0:
constraints.append(rest)
interaction_cst = constraints
self._in_fit = True
n_threads = _openmp_effective_n_threads()
if isinstance(self.loss, str):
pass
elif isinstance(self.loss, BaseLoss):
self._loss = self.loss
if self.early_stopping == 'auto':
self.do_early_stopping_ = n_samples > 10000
else:
self.do_early_stopping_ = self.early_stopping
self._use_validation_data = self.validation_fraction is not None
if self.do_early_stopping_ and self._use_validation_data:
stratify = y if hasattr(self._loss, 'predict_proba') else None
if sample_weight is None:
(X_train, X_val, y_train, y_val) = train_test_split(X, y, test_size=self.validation_fraction, stratify=stratify, random_state=self._random_seed)
sample_weight_train = sample_weight_val = None
else:
(X_train, X_val, y_train, y_val, sample_weight_train, sample_weight_val) = train_test_split(X, y, sample_weight, test_size=self.validation_fraction, stratify=stratify, random_state=self._random_seed)
else:
(X_train, y_train, sample_weight_train) = (X, y, sample_weight)
X_val = y_val = sample_weight_val = None
n_bins = self.max_bins + 1
self._bin_mapper = _BinMapper(n_bins=n_bins, is_categorical=self.is_categorical_, known_categories=known_categories, random_state=self._random_seed, n_threads=n_threads)
description = 'training' if True else 'validation'
if self.verbose:
print('Binning {:.3f} GB of {} data: '.format(X_train.nbytes / 1000000000.0, description), end='', flush=True)
tic = time()
if True:
X_binned = self._bin_mapper.fit_transform(X_train)
else:
X_binned = self._bin_mapper.transform(X_train)
X_binned = np.ascontiguousarray(X_binned)
toc = time()
if self.verbose:
duration = toc - tic
print('{:.3f} s'.format(duration))
X_binned_train = X_binned
if X_val is not None:
description = 'training' if False else 'validation'
if self.verbose:
print('Binning {:.3f} GB of {} data: '.format(X_val.nbytes / 1000000000.0, description), end='', flush=True)
tic = time()
if False:
X_binned = self._bin_mapper.fit_transform(X_val)
else:
X_binned = self._bin_mapper.transform(X_val)
X_binned = np.ascontiguousarray(X_binned)
toc = time()
if self.verbose:
duration = toc - tic
print('{:.3f} s'.format(duration))
X_binned_val = X_binned
else:
X_binned_val = None
has_missing_values = (X_binned_train == self._bin_mapper.missing_values_bin_idx_).any(axis=0).astype(np.uint8)
if self.verbose:
print('Fitting gradient boosted rounds:')
n_samples = X_binned_train.shape[0]
if not (self._is_fitted() and self.warm_start):
for var in ('train_score_', 'validation_score_'):
if hasattr(self, var):
delattr(self, var)
self._baseline_prediction = self._loss.fit_intercept_only(y_true=y_train, sample_weight=sample_weight_train).reshape((1, -1))
raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions += self._baseline_prediction
self._predictors = predictors = []
self._scorer = None
raw_predictions_val = None
self.train_score_ = []
self.validation_score_ = []
if self.do_early_stopping_:
if self.scoring == 'loss':
if self._use_validation_data:
raw_predictions_val = np.zeros(shape=(X_binned_val.shape[0], self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions_val += self._baseline_prediction
self.train_score_.append(-self._loss(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, n_threads=n_threads))
if self._use_validation_data:
self.validation_score_.append(-self._loss(y_true=y_val, raw_prediction=raw_predictions_val, sample_weight=sample_weight_val, n_threads=n_threads))
return self._should_stop(self.validation_score_)
else:
return self._should_stop(self.train_score_)
else:
self._scorer = check_scoring(self, self.scoring)
subsample_size = 10000
if X_binned_train.shape[0] > subsample_size:
indices = np.arange(X_binned_train.shape[0])
stratify = y_train if is_classifier(self) else None
indices = resample(indices, n_samples=subsample_size, replace=False, random_state=self._random_seed, stratify=stratify)
X_binned_small_train = X_binned_train[indices]
y_small_train = y_train[indices]
if sample_weight_train is not None:
sample_weight_small_train = sample_weight_train[indices]
else:
sample_weight_small_train = None
X_binned_small_train = np.ascontiguousarray(X_binned_small_train)
(X_binned_small_train, y_small_train, sample_weight_small_train) = (X_binned_small_train, y_small_train, sample_weight_small_train)
else:
(X_binned_small_train, y_small_train, sample_weight_small_train) = (X_binned_train, y_train, sample_weight_train)
if is_classifier(self):
y_small_train = self.classes_[y_small_train.astype(int)]
if sample_weight_small_train is None:
self.train_score_.append(self._scorer(self, X_binned_small_train, y_small_train))
else:
self.train_score_.append(self._scorer(self, X_binned_small_train, y_small_train, sample_weight=sample_weight_small_train))
if self._use_validation_data:
if is_classifier(self):
y_val = self.classes_[y_val.astype(int)]
if sample_weight_val is None:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val))
else:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val, sample_weight=sample_weight_val))
return self._should_stop(self.validation_score_)
else:
return self._should_stop(self.train_score_)
begin_at_stage = 0
else:
if self.max_iter < self.n_iter_:
raise ValueError('max_iter=%d must be larger than or equal to n_iter_=%d when warm_start==True' % (self.max_iter, self.n_iter_))
self.train_score_ = self.train_score_.tolist()
self.validation_score_ = self.validation_score_.tolist()
is_binned = getattr(self, '_in_fit', False)
if not is_binned:
X_binned_train = self._validate_data(X_binned_train, dtype=X_DTYPE, force_all_finite=False, reset=False)
check_is_fitted(self)
if X_binned_train.shape[1] != self._n_features:
raise ValueError('X has {} features but this estimator was trained with {} features.'.format(X_binned_train.shape[1], self._n_features))
n_samples = X_binned_train.shape[0]
raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions += self._baseline_prediction
n_threads = _openmp_effective_n_threads(n_threads)
self._predict_iterations(X_binned_train, self._predictors, raw_predictions, is_binned, n_threads)
raw_predictions = raw_predictions
if self.do_early_stopping_ and self._use_validation_data:
is_binned = getattr(self, '_in_fit', False)
if not is_binned:
X_binned_val = self._validate_data(X_binned_val, dtype=X_DTYPE, force_all_finite=False, reset=False)
check_is_fitted(self)
if X_binned_val.shape[1] != self._n_features:
raise ValueError('X has {} features but this estimator was trained with {} features.'.format(X_binned_val.shape[1], self._n_features))
n_samples = X_binned_val.shape[0]
raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions += self._baseline_prediction
n_threads = _openmp_effective_n_threads(n_threads)
self._predict_iterations(X_binned_val, self._predictors, raw_predictions, is_binned, n_threads)
raw_predictions_val = raw_predictions
else:
raw_predictions_val = None
if self.do_early_stopping_ and self.scoring != 'loss':
subsample_size = 10000
if X_binned_train.shape[0] > subsample_size:
indices = np.arange(X_binned_train.shape[0])
stratify = y_train if is_classifier(self) else None
indices = resample(indices, n_samples=subsample_size, replace=False, random_state=self._random_seed, stratify=stratify)
X_binned_small_train = X_binned_train[indices]
y_small_train = y_train[indices]
if sample_weight_train is not None:
sample_weight_small_train = sample_weight_train[indices]
else:
sample_weight_small_train = None
X_binned_small_train = np.ascontiguousarray(X_binned_small_train)
(X_binned_small_train, y_small_train, sample_weight_small_train) = (X_binned_small_train, y_small_train, sample_weight_small_train)
else:
(X_binned_small_train, y_small_train, sample_weight_small_train) = (X_binned_train, y_train, sample_weight_train)
predictors = self._predictors
begin_at_stage = self.n_iter_
(gradient, hessian) = self._loss.init_gradient_and_hessian(n_samples=n_samples, dtype=G_H_DTYPE, order='F')
for iteration in range(begin_at_stage, self.max_iter):
if self.verbose:
iteration_start_time = time()
print('[{}/{}] '.format(iteration + 1, self.max_iter), end='', flush=True)
if self._loss.constant_hessian:
self._loss.gradient(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, gradient_out=gradient, n_threads=n_threads)
else:
self._loss.gradient_hessian(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, gradient_out=gradient, hessian_out=hessian, n_threads=n_threads)
predictors.append([])
if gradient.ndim == 1:
g_view = gradient.reshape((-1, 1))
h_view = hessian.reshape((-1, 1))
else:
g_view = gradient
h_view = hessian
for k in range(self.n_trees_per_iteration_):
grower = TreeGrower(X_binned=X_binned_train, gradients=g_view[:, k], hessians=h_view[:, k], n_bins=n_bins, n_bins_non_missing=self._bin_mapper.n_bins_non_missing_, has_missing_values=has_missing_values, is_categorical=self.is_categorical_, monotonic_cst=monotonic_cst, interaction_cst=interaction_cst, max_leaf_nodes=self.max_leaf_nodes, max_depth=self.max_depth, min_samples_leaf=self.min_samples_leaf, l2_regularization=self.l2_regularization, shrinkage=self.learning_rate, n_threads=n_threads)
grower.grow()
acc_apply_split_time += grower.total_apply_split_time
acc_find_split_time += grower.total_find_split_time
acc_compute_hist_time += grower.total_compute_hist_time
if self._loss.need_update_leaves_values:
for leaf in grower.finalized_leaves:
indices = leaf.sample_indices
if sample_weight_train is None:
sw = None
else:
sw = sample_weight_train[indices]
update = self._loss.fit_intercept_only(y_true=y_train[indices] - raw_predictions[:, k][indices], sample_weight=sw)
leaf.value = grower.shrinkage * update
predictor = grower.make_predictor(binning_thresholds=self._bin_mapper.bin_thresholds_)
predictors[-1].append(predictor)
tic_pred = time()
_update_raw_predictions(raw_predictions[:, k], grower, n_threads)
toc_pred = time()
acc_prediction_time += toc_pred - tic_pred
should_early_stop = False
if self.do_early_stopping_:
if self.scoring == 'loss':
if self._use_validation_data:
for (k, pred) in enumerate(self._predictors[-1]):
raw_predictions_val[:, k] += pred.predict_binned(X_binned_val, self._bin_mapper.missing_values_bin_idx_, n_threads)
self.train_score_.append(-self._loss(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, n_threads=n_threads))
if self._use_validation_data:
self.validation_score_.append(-self._loss(y_true=y_val, raw_prediction=raw_predictions_val, sample_weight=sample_weight_val, n_threads=n_threads))
should_early_stop = self._should_stop(self.validation_score_)
else:
should_early_stop = self._should_stop(self.train_score_)
else:
if is_classifier(self):
y_small_train = self.classes_[y_small_train.astype(int)]
if sample_weight_small_train is None:
self.train_score_.append(self._scorer(self, X_binned_small_train, y_small_train))
else:
self.train_score_.append(self._scorer(self, X_binned_small_train, y_small_train, sample_weight=sample_weight_small_train))
if self._use_validation_data:
if is_classifier(self):
y_val = self.classes_[y_val.astype(int)]
if sample_weight_val is None:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val))
else:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val, sample_weight=sample_weight_val))
should_early_stop = self._should_stop(self.validation_score_)
else:
should_early_stop = self._should_stop(self.train_score_)
if self.verbose:
log_msg = ''
predictors_of_ith_iteration = [predictors_list for predictors_list in self._predictors[-1] if predictors_list]
n_trees = len(predictors_of_ith_iteration)
max_depth = max((predictor.get_max_depth() for predictor in predictors_of_ith_iteration))
n_leaves = sum((predictor.get_n_leaf_nodes() for predictor in predictors_of_ith_iteration))
if n_trees == 1:
log_msg += '{} tree, {} leaves, '.format(n_trees, n_leaves)
else:
log_msg += '{} trees, {} leaves '.format(n_trees, n_leaves)
log_msg += '({} on avg), '.format(int(n_leaves / n_trees))
log_msg += 'max depth = {}, '.format(max_depth)
if self.do_early_stopping_:
if self.scoring == 'loss':
factor = -1
name = 'loss'
else:
factor = 1
name = 'score'
log_msg += 'train {}: {:.5f}, '.format(name, factor * self.train_score_[-1])
if self._use_validation_data:
log_msg += 'val {}: {:.5f}, '.format(name, factor * self.validation_score_[-1])
iteration_time = time() - iteration_start_time
log_msg += 'in {:0.3f}s'.format(iteration_time)
print(log_msg)
if should_early_stop:
break
if self.verbose:
duration = time() - fit_start_time
n_total_leaves = sum((predictor.get_n_leaf_nodes() for predictors_at_ith_iteration in self._predictors for predictor in predictors_at_ith_iteration))
n_predictors = sum((len(predictors_at_ith_iteration) for predictors_at_ith_iteration in self._predictors))
print('Fit {} trees in {:.3f} s, ({} total leaves)'.format(n_predictors, duration, n_total_leaves))
print('{:<32} {:.3f}s'.format('Time spent computing histograms:', acc_compute_hist_time))
print('{:<32} {:.3f}s'.format('Time spent finding best splits:', acc_find_split_time))
print('{:<32} {:.3f}s'.format('Time spent applying splits:', acc_apply_split_time))
print('{:<32} {:.3f}s'.format('Time spent predicting:', acc_prediction_time))
self.train_score_ = np.asarray(self.train_score_)
self.validation_score_ = np.asarray(self.validation_score_)
del self._in_fit
return self
|
def fit(self, X, y, sample_weight=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,) default=None
Weights of training data.
.. versionadded:: 0.23
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
fit_start_time = time()
acc_find_split_time = 0.0
acc_apply_split_time = 0.0
acc_compute_hist_time = 0.0
acc_prediction_time = 0.0
(X, y) = self._validate_data(X, y, dtype=[X_DTYPE], force_all_finite=False)
<DeepExtract>
pass
</DeepExtract>
check_consistent_length(X, y)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64)
self._fitted_with_sw = True
<DeepExtract>
sample_weight = sample_weight
</DeepExtract>
rng = check_random_state(self.random_state)
if not (self.warm_start and self._is_fitted()):
self._random_seed = rng.randint(np.iinfo(np.uint32).max, dtype='u8')
<DeepExtract>
if self.monotonic_cst is not None and self.n_trees_per_iteration_ != 1:
raise ValueError('monotonic constraints are not supported for multiclass classification.')
</DeepExtract>
monotonic_cst = _check_monotonic_cst(self, self.monotonic_cst)
(n_samples, self._n_features) = X.shape
<DeepExtract>
if self.categorical_features is None:
(self.is_categorical_, known_categories) = (None, None)
categorical_features = np.asarray(self.categorical_features)
if categorical_features.size == 0:
(self.is_categorical_, known_categories) = (None, None)
if categorical_features.dtype.kind not in ('i', 'b', 'U', 'O'):
raise ValueError(f'categorical_features must be an array-like of bool, int or str, got: {categorical_features.dtype.name}.')
if categorical_features.dtype.kind == 'O':
types = set((type(f) for f in categorical_features))
if types != {str}:
raise ValueError(f"categorical_features must be an array-like of bool, int or str, got: {', '.join(sorted((t.__name__ for t in types)))}.")
n_features = X.shape[1]
if categorical_features.dtype.kind in ('U', 'O'):
if not hasattr(self, 'feature_names_in_'):
raise ValueError('categorical_features should be passed as an array of integers or as a boolean mask when the model is fitted on data without feature names.')
is_categorical = np.zeros(n_features, dtype=bool)
feature_names = self.feature_names_in_.tolist()
for feature_name in categorical_features:
try:
is_categorical[feature_names.index(feature_name)] = True
except ValueError as e:
raise ValueError(f"categorical_features has a item value '{feature_name}' which is not a valid feature name of the training data. Observed feature names: {feature_names}") from e
elif categorical_features.dtype.kind == 'i':
if np.max(categorical_features) >= n_features or np.min(categorical_features) < 0:
raise ValueError('categorical_features set as integer indices must be in [0, n_features - 1]')
is_categorical = np.zeros(n_features, dtype=bool)
is_categorical[categorical_features] = True
else:
if categorical_features.shape[0] != n_features:
raise ValueError(f'categorical_features set as a boolean mask must have shape (n_features,), got: {categorical_features.shape}')
is_categorical = categorical_features
if not np.any(is_categorical):
(self.is_categorical_, known_categories) = (None, None)
known_categories = []
for f_idx in range(n_features):
if is_categorical[f_idx]:
categories = np.unique(X[:, f_idx])
missing = np.isnan(categories)
if missing.any():
categories = categories[~missing]
if hasattr(self, 'feature_names_in_'):
feature_name = f"'{self.feature_names_in_[f_idx]}'"
else:
feature_name = f'at index {f_idx}'
if categories.size > self.max_bins:
raise ValueError(f'Categorical feature {feature_name} is expected to have a cardinality <= {self.max_bins} but actually has a cardinality of {categories.size}.')
if (categories >= self.max_bins).any():
raise ValueError(f'Categorical feature {feature_name} is expected to be encoded with values < {self.max_bins} but the largest value for the encoded categories is {categories.max()}.')
else:
categories = None
known_categories.append(categories)
(self.is_categorical_, known_categories) = (is_categorical, known_categories)
</DeepExtract>
<DeepExtract>
if self.interaction_cst is None:
interaction_cst = None
if self.interaction_cst == 'no_interactions':
interaction_cst = [[i] for i in range(self._n_features)]
elif self.interaction_cst == 'pairwise':
interaction_cst = itertools.combinations(range(self._n_features), 2)
else:
interaction_cst = self.interaction_cst
try:
constraints = [set(group) for group in interaction_cst]
except TypeError:
raise ValueError(f'Interaction constraints must be a sequence of tuples or lists, got: {self.interaction_cst!r}.')
for group in constraints:
for x in group:
if not (isinstance(x, Integral) and 0 <= x < self._n_features):
raise ValueError(f'Interaction constraints must consist of integer indices in [0, n_features - 1] = [0, {self._n_features - 1}], specifying the position of features, got invalid indices: {group!r}')
rest = set(range(self._n_features)) - set().union(*constraints)
if len(rest) > 0:
constraints.append(rest)
interaction_cst = constraints
</DeepExtract>
self._in_fit = True
n_threads = _openmp_effective_n_threads()
if isinstance(self.loss, str):
<DeepExtract>
pass
</DeepExtract>
elif isinstance(self.loss, BaseLoss):
self._loss = self.loss
if self.early_stopping == 'auto':
self.do_early_stopping_ = n_samples > 10000
else:
self.do_early_stopping_ = self.early_stopping
self._use_validation_data = self.validation_fraction is not None
if self.do_early_stopping_ and self._use_validation_data:
stratify = y if hasattr(self._loss, 'predict_proba') else None
if sample_weight is None:
(X_train, X_val, y_train, y_val) = train_test_split(X, y, test_size=self.validation_fraction, stratify=stratify, random_state=self._random_seed)
sample_weight_train = sample_weight_val = None
else:
(X_train, X_val, y_train, y_val, sample_weight_train, sample_weight_val) = train_test_split(X, y, sample_weight, test_size=self.validation_fraction, stratify=stratify, random_state=self._random_seed)
else:
(X_train, y_train, sample_weight_train) = (X, y, sample_weight)
X_val = y_val = sample_weight_val = None
n_bins = self.max_bins + 1
self._bin_mapper = _BinMapper(n_bins=n_bins, is_categorical=self.is_categorical_, known_categories=known_categories, random_state=self._random_seed, n_threads=n_threads)
<DeepExtract>
description = 'training' if True else 'validation'
if self.verbose:
print('Binning {:.3f} GB of {} data: '.format(X_train.nbytes / 1000000000.0, description), end='', flush=True)
tic = time()
if True:
X_binned = self._bin_mapper.fit_transform(X_train)
else:
X_binned = self._bin_mapper.transform(X_train)
X_binned = np.ascontiguousarray(X_binned)
toc = time()
if self.verbose:
duration = toc - tic
print('{:.3f} s'.format(duration))
X_binned_train = X_binned
</DeepExtract>
if X_val is not None:
<DeepExtract>
description = 'training' if False else 'validation'
if self.verbose:
print('Binning {:.3f} GB of {} data: '.format(X_val.nbytes / 1000000000.0, description), end='', flush=True)
tic = time()
if False:
X_binned = self._bin_mapper.fit_transform(X_val)
else:
X_binned = self._bin_mapper.transform(X_val)
X_binned = np.ascontiguousarray(X_binned)
toc = time()
if self.verbose:
duration = toc - tic
print('{:.3f} s'.format(duration))
X_binned_val = X_binned
</DeepExtract>
else:
X_binned_val = None
has_missing_values = (X_binned_train == self._bin_mapper.missing_values_bin_idx_).any(axis=0).astype(np.uint8)
if self.verbose:
print('Fitting gradient boosted rounds:')
n_samples = X_binned_train.shape[0]
if not (self._is_fitted() and self.warm_start):
<DeepExtract>
for var in ('train_score_', 'validation_score_'):
if hasattr(self, var):
delattr(self, var)
</DeepExtract>
self._baseline_prediction = self._loss.fit_intercept_only(y_true=y_train, sample_weight=sample_weight_train).reshape((1, -1))
raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions += self._baseline_prediction
self._predictors = predictors = []
self._scorer = None
raw_predictions_val = None
self.train_score_ = []
self.validation_score_ = []
if self.do_early_stopping_:
if self.scoring == 'loss':
if self._use_validation_data:
raw_predictions_val = np.zeros(shape=(X_binned_val.shape[0], self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions_val += self._baseline_prediction
<DeepExtract>
self.train_score_.append(-self._loss(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, n_threads=n_threads))
if self._use_validation_data:
self.validation_score_.append(-self._loss(y_true=y_val, raw_prediction=raw_predictions_val, sample_weight=sample_weight_val, n_threads=n_threads))
return self._should_stop(self.validation_score_)
else:
return self._should_stop(self.train_score_)
</DeepExtract>
else:
self._scorer = check_scoring(self, self.scoring)
<DeepExtract>
subsample_size = 10000
if X_binned_train.shape[0] > subsample_size:
indices = np.arange(X_binned_train.shape[0])
stratify = y_train if is_classifier(self) else None
indices = resample(indices, n_samples=subsample_size, replace=False, random_state=self._random_seed, stratify=stratify)
X_binned_small_train = X_binned_train[indices]
y_small_train = y_train[indices]
if sample_weight_train is not None:
sample_weight_small_train = sample_weight_train[indices]
else:
sample_weight_small_train = None
X_binned_small_train = np.ascontiguousarray(X_binned_small_train)
(X_binned_small_train, y_small_train, sample_weight_small_train) = (X_binned_small_train, y_small_train, sample_weight_small_train)
else:
(X_binned_small_train, y_small_train, sample_weight_small_train) = (X_binned_train, y_train, sample_weight_train)
</DeepExtract>
<DeepExtract>
if is_classifier(self):
y_small_train = self.classes_[y_small_train.astype(int)]
if sample_weight_small_train is None:
self.train_score_.append(self._scorer(self, X_binned_small_train, y_small_train))
else:
self.train_score_.append(self._scorer(self, X_binned_small_train, y_small_train, sample_weight=sample_weight_small_train))
if self._use_validation_data:
if is_classifier(self):
y_val = self.classes_[y_val.astype(int)]
if sample_weight_val is None:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val))
else:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val, sample_weight=sample_weight_val))
return self._should_stop(self.validation_score_)
else:
return self._should_stop(self.train_score_)
</DeepExtract>
begin_at_stage = 0
else:
if self.max_iter < self.n_iter_:
raise ValueError('max_iter=%d must be larger than or equal to n_iter_=%d when warm_start==True' % (self.max_iter, self.n_iter_))
self.train_score_ = self.train_score_.tolist()
self.validation_score_ = self.validation_score_.tolist()
<DeepExtract>
is_binned = getattr(self, '_in_fit', False)
if not is_binned:
X_binned_train = self._validate_data(X_binned_train, dtype=X_DTYPE, force_all_finite=False, reset=False)
check_is_fitted(self)
if X_binned_train.shape[1] != self._n_features:
raise ValueError('X has {} features but this estimator was trained with {} features.'.format(X_binned_train.shape[1], self._n_features))
n_samples = X_binned_train.shape[0]
raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions += self._baseline_prediction
n_threads = _openmp_effective_n_threads(n_threads)
self._predict_iterations(X_binned_train, self._predictors, raw_predictions, is_binned, n_threads)
raw_predictions = raw_predictions
</DeepExtract>
if self.do_early_stopping_ and self._use_validation_data:
<DeepExtract>
is_binned = getattr(self, '_in_fit', False)
if not is_binned:
X_binned_val = self._validate_data(X_binned_val, dtype=X_DTYPE, force_all_finite=False, reset=False)
check_is_fitted(self)
if X_binned_val.shape[1] != self._n_features:
raise ValueError('X has {} features but this estimator was trained with {} features.'.format(X_binned_val.shape[1], self._n_features))
n_samples = X_binned_val.shape[0]
raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions += self._baseline_prediction
n_threads = _openmp_effective_n_threads(n_threads)
self._predict_iterations(X_binned_val, self._predictors, raw_predictions, is_binned, n_threads)
raw_predictions_val = raw_predictions
</DeepExtract>
else:
raw_predictions_val = None
if self.do_early_stopping_ and self.scoring != 'loss':
<DeepExtract>
subsample_size = 10000
if X_binned_train.shape[0] > subsample_size:
indices = np.arange(X_binned_train.shape[0])
stratify = y_train if is_classifier(self) else None
indices = resample(indices, n_samples=subsample_size, replace=False, random_state=self._random_seed, stratify=stratify)
X_binned_small_train = X_binned_train[indices]
y_small_train = y_train[indices]
if sample_weight_train is not None:
sample_weight_small_train = sample_weight_train[indices]
else:
sample_weight_small_train = None
X_binned_small_train = np.ascontiguousarray(X_binned_small_train)
(X_binned_small_train, y_small_train, sample_weight_small_train) = (X_binned_small_train, y_small_train, sample_weight_small_train)
else:
(X_binned_small_train, y_small_train, sample_weight_small_train) = (X_binned_train, y_train, sample_weight_train)
</DeepExtract>
predictors = self._predictors
begin_at_stage = self.n_iter_
(gradient, hessian) = self._loss.init_gradient_and_hessian(n_samples=n_samples, dtype=G_H_DTYPE, order='F')
for iteration in range(begin_at_stage, self.max_iter):
if self.verbose:
iteration_start_time = time()
print('[{}/{}] '.format(iteration + 1, self.max_iter), end='', flush=True)
if self._loss.constant_hessian:
self._loss.gradient(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, gradient_out=gradient, n_threads=n_threads)
else:
self._loss.gradient_hessian(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, gradient_out=gradient, hessian_out=hessian, n_threads=n_threads)
predictors.append([])
if gradient.ndim == 1:
g_view = gradient.reshape((-1, 1))
h_view = hessian.reshape((-1, 1))
else:
g_view = gradient
h_view = hessian
for k in range(self.n_trees_per_iteration_):
grower = TreeGrower(X_binned=X_binned_train, gradients=g_view[:, k], hessians=h_view[:, k], n_bins=n_bins, n_bins_non_missing=self._bin_mapper.n_bins_non_missing_, has_missing_values=has_missing_values, is_categorical=self.is_categorical_, monotonic_cst=monotonic_cst, interaction_cst=interaction_cst, max_leaf_nodes=self.max_leaf_nodes, max_depth=self.max_depth, min_samples_leaf=self.min_samples_leaf, l2_regularization=self.l2_regularization, shrinkage=self.learning_rate, n_threads=n_threads)
grower.grow()
acc_apply_split_time += grower.total_apply_split_time
acc_find_split_time += grower.total_find_split_time
acc_compute_hist_time += grower.total_compute_hist_time
if self._loss.need_update_leaves_values:
<DeepExtract>
for leaf in grower.finalized_leaves:
indices = leaf.sample_indices
if sample_weight_train is None:
sw = None
else:
sw = sample_weight_train[indices]
update = self._loss.fit_intercept_only(y_true=y_train[indices] - raw_predictions[:, k][indices], sample_weight=sw)
leaf.value = grower.shrinkage * update
</DeepExtract>
predictor = grower.make_predictor(binning_thresholds=self._bin_mapper.bin_thresholds_)
predictors[-1].append(predictor)
tic_pred = time()
_update_raw_predictions(raw_predictions[:, k], grower, n_threads)
toc_pred = time()
acc_prediction_time += toc_pred - tic_pred
should_early_stop = False
if self.do_early_stopping_:
if self.scoring == 'loss':
if self._use_validation_data:
for (k, pred) in enumerate(self._predictors[-1]):
raw_predictions_val[:, k] += pred.predict_binned(X_binned_val, self._bin_mapper.missing_values_bin_idx_, n_threads)
<DeepExtract>
self.train_score_.append(-self._loss(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, n_threads=n_threads))
if self._use_validation_data:
self.validation_score_.append(-self._loss(y_true=y_val, raw_prediction=raw_predictions_val, sample_weight=sample_weight_val, n_threads=n_threads))
should_early_stop = self._should_stop(self.validation_score_)
else:
should_early_stop = self._should_stop(self.train_score_)
</DeepExtract>
else:
<DeepExtract>
if is_classifier(self):
y_small_train = self.classes_[y_small_train.astype(int)]
if sample_weight_small_train is None:
self.train_score_.append(self._scorer(self, X_binned_small_train, y_small_train))
else:
self.train_score_.append(self._scorer(self, X_binned_small_train, y_small_train, sample_weight=sample_weight_small_train))
if self._use_validation_data:
if is_classifier(self):
y_val = self.classes_[y_val.astype(int)]
if sample_weight_val is None:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val))
else:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val, sample_weight=sample_weight_val))
should_early_stop = self._should_stop(self.validation_score_)
else:
should_early_stop = self._should_stop(self.train_score_)
</DeepExtract>
if self.verbose:
<DeepExtract>
log_msg = ''
predictors_of_ith_iteration = [predictors_list for predictors_list in self._predictors[-1] if predictors_list]
n_trees = len(predictors_of_ith_iteration)
max_depth = max((predictor.get_max_depth() for predictor in predictors_of_ith_iteration))
n_leaves = sum((predictor.get_n_leaf_nodes() for predictor in predictors_of_ith_iteration))
if n_trees == 1:
log_msg += '{} tree, {} leaves, '.format(n_trees, n_leaves)
else:
log_msg += '{} trees, {} leaves '.format(n_trees, n_leaves)
log_msg += '({} on avg), '.format(int(n_leaves / n_trees))
log_msg += 'max depth = {}, '.format(max_depth)
if self.do_early_stopping_:
if self.scoring == 'loss':
factor = -1
name = 'loss'
else:
factor = 1
name = 'score'
log_msg += 'train {}: {:.5f}, '.format(name, factor * self.train_score_[-1])
if self._use_validation_data:
log_msg += 'val {}: {:.5f}, '.format(name, factor * self.validation_score_[-1])
iteration_time = time() - iteration_start_time
log_msg += 'in {:0.3f}s'.format(iteration_time)
print(log_msg)
</DeepExtract>
if should_early_stop:
break
if self.verbose:
duration = time() - fit_start_time
n_total_leaves = sum((predictor.get_n_leaf_nodes() for predictors_at_ith_iteration in self._predictors for predictor in predictors_at_ith_iteration))
n_predictors = sum((len(predictors_at_ith_iteration) for predictors_at_ith_iteration in self._predictors))
print('Fit {} trees in {:.3f} s, ({} total leaves)'.format(n_predictors, duration, n_total_leaves))
print('{:<32} {:.3f}s'.format('Time spent computing histograms:', acc_compute_hist_time))
print('{:<32} {:.3f}s'.format('Time spent finding best splits:', acc_find_split_time))
print('{:<32} {:.3f}s'.format('Time spent applying splits:', acc_apply_split_time))
print('{:<32} {:.3f}s'.format('Time spent predicting:', acc_prediction_time))
self.train_score_ = np.asarray(self.train_score_)
self.validation_score_ = np.asarray(self.validation_score_)
del self._in_fit
return self
|
def predict(self, X):
check_is_fitted(self)
X = check_array(X)
proba_shape = (X.shape[0], self.classes_.size)
y_proba = np.zeros(shape=proba_shape, dtype=np.float64)
y_proba[:, self._most_frequent_class_idx] = 1.0
y_proba = y_proba
y_pred = y_proba.argmax(axis=1)
return self.classes_[y_pred]
|
def predict(self, X):
<DeepExtract>
check_is_fitted(self)
X = check_array(X)
proba_shape = (X.shape[0], self.classes_.size)
y_proba = np.zeros(shape=proba_shape, dtype=np.float64)
y_proba[:, self._most_frequent_class_idx] = 1.0
y_proba = y_proba
</DeepExtract>
y_pred = y_proba.argmax(axis=1)
return self.classes_[y_pred]
|
def fit(self, X, y=None):
"""Fit the imputer on `X`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
if self.verbose != 'deprecated':
warnings.warn("The 'verbose' parameter was deprecated in version 1.1 and will be removed in 1.3. A warning will always be raised upon the removal of empty columns in the future version.", FutureWarning)
if self.strategy in ('most_frequent', 'constant'):
if isinstance(X, list) and any((isinstance(elem, str) for row in X for elem in row)):
dtype = object
else:
dtype = None
else:
dtype = FLOAT_DTYPES
if not True and self._fit_dtype.kind == 'O':
dtype = self._fit_dtype
if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
force_all_finite = 'allow-nan'
else:
force_all_finite = True
try:
X = self._validate_data(X, reset=True, accept_sparse='csc', dtype=dtype, force_all_finite=force_all_finite, copy=self.copy)
except ValueError as ve:
if 'could not convert' in str(ve):
new_ve = ValueError('Cannot use {} strategy with non-numeric data:\n{}'.format(self.strategy, ve))
raise new_ve from None
else:
raise ve
if True:
self._fit_dtype = X.dtype
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ('i', 'u', 'f', 'O'):
raise ValueError('SimpleImputer does not support data with dtype {0}. Please provide either a numeric array (with a floating point or integer dtype) or categorical data represented either as an array with integer dtype or an array of string values with an object dtype.'.format(X.dtype))
X = X
if self.fill_value is None:
if X.dtype.kind in ('i', 'u', 'f'):
fill_value = 0
else:
fill_value = 'missing_value'
else:
fill_value = self.fill_value
if self.strategy == 'constant' and X.dtype.kind in ('i', 'u', 'f') and (not isinstance(fill_value, numbers.Real)):
raise ValueError("'fill_value'={0} is invalid. Expected a numerical value when imputing numerical data".format(fill_value))
if sp.issparse(X):
if self.missing_values == 0:
raise ValueError('Imputation not possible when missing_values == 0 and input is sparse. Provide a dense array instead.')
else:
missing_mask = _get_mask(X, self.missing_values)
mask_data = missing_mask.data
n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
statistics = np.empty(X.shape[1])
if self.strategy == 'constant':
statistics.fill(fill_value)
else:
for i in range(X.shape[1]):
column = X.data[X.indptr[i]:X.indptr[i + 1]]
mask_column = mask_data[X.indptr[i]:X.indptr[i + 1]]
column = column[~mask_column]
mask_zeros = _get_mask(column, 0)
column = column[~mask_zeros]
n_explicit_zeros = mask_zeros.sum()
n_zeros = n_implicit_zeros[i] + n_explicit_zeros
if len(column) == 0 and self.keep_empty_features:
statistics[i] = 0
elif self.strategy == 'mean':
s = column.size + n_zeros
statistics[i] = np.nan if s == 0 else column.sum() / s
elif self.strategy == 'median':
statistics[i] = _get_median(column, n_zeros)
elif self.strategy == 'most_frequent':
statistics[i] = _most_frequent(column, 0, n_zeros)
super()._fit_indicator(missing_mask)
self.statistics_ = statistics
else:
missing_mask = _get_mask(X, self.missing_values)
masked_X = ma.masked_array(X, mask=missing_mask)
super()._fit_indicator(missing_mask)
if self.strategy == 'mean':
mean_masked = np.ma.mean(masked_X, axis=0)
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = 0 if self.keep_empty_features else np.nan
self.statistics_ = mean
elif self.strategy == 'median':
median_masked = np.ma.median(masked_X, axis=0)
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = 0 if self.keep_empty_features else np.nan
self.statistics_ = median
elif self.strategy == 'most_frequent':
X = X.transpose()
mask = missing_mask.transpose()
if X.dtype.kind == 'O':
most_frequent = np.empty(X.shape[0], dtype=object)
else:
most_frequent = np.empty(X.shape[0])
for (i, (row, row_mask)) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(bool)
row = row[row_mask]
if len(row) == 0 and self.keep_empty_features:
most_frequent[i] = 0
else:
most_frequent[i] = _most_frequent(row, np.nan, 0)
self.statistics_ = most_frequent
elif self.strategy == 'constant':
self.statistics_ = np.full(X.shape[1], fill_value, dtype=X.dtype)
return self
|
def fit(self, X, y=None):
"""Fit the imputer on `X`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
if self.verbose != 'deprecated':
warnings.warn("The 'verbose' parameter was deprecated in version 1.1 and will be removed in 1.3. A warning will always be raised upon the removal of empty columns in the future version.", FutureWarning)
<DeepExtract>
if self.strategy in ('most_frequent', 'constant'):
if isinstance(X, list) and any((isinstance(elem, str) for row in X for elem in row)):
dtype = object
else:
dtype = None
else:
dtype = FLOAT_DTYPES
if not True and self._fit_dtype.kind == 'O':
dtype = self._fit_dtype
if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
force_all_finite = 'allow-nan'
else:
force_all_finite = True
try:
X = self._validate_data(X, reset=True, accept_sparse='csc', dtype=dtype, force_all_finite=force_all_finite, copy=self.copy)
except ValueError as ve:
if 'could not convert' in str(ve):
new_ve = ValueError('Cannot use {} strategy with non-numeric data:\n{}'.format(self.strategy, ve))
raise new_ve from None
else:
raise ve
if True:
self._fit_dtype = X.dtype
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ('i', 'u', 'f', 'O'):
raise ValueError('SimpleImputer does not support data with dtype {0}. Please provide either a numeric array (with a floating point or integer dtype) or categorical data represented either as an array with integer dtype or an array of string values with an object dtype.'.format(X.dtype))
X = X
</DeepExtract>
if self.fill_value is None:
if X.dtype.kind in ('i', 'u', 'f'):
fill_value = 0
else:
fill_value = 'missing_value'
else:
fill_value = self.fill_value
if self.strategy == 'constant' and X.dtype.kind in ('i', 'u', 'f') and (not isinstance(fill_value, numbers.Real)):
raise ValueError("'fill_value'={0} is invalid. Expected a numerical value when imputing numerical data".format(fill_value))
if sp.issparse(X):
if self.missing_values == 0:
raise ValueError('Imputation not possible when missing_values == 0 and input is sparse. Provide a dense array instead.')
else:
<DeepExtract>
missing_mask = _get_mask(X, self.missing_values)
mask_data = missing_mask.data
n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
statistics = np.empty(X.shape[1])
if self.strategy == 'constant':
statistics.fill(fill_value)
else:
for i in range(X.shape[1]):
column = X.data[X.indptr[i]:X.indptr[i + 1]]
mask_column = mask_data[X.indptr[i]:X.indptr[i + 1]]
column = column[~mask_column]
mask_zeros = _get_mask(column, 0)
column = column[~mask_zeros]
n_explicit_zeros = mask_zeros.sum()
n_zeros = n_implicit_zeros[i] + n_explicit_zeros
if len(column) == 0 and self.keep_empty_features:
statistics[i] = 0
elif self.strategy == 'mean':
s = column.size + n_zeros
statistics[i] = np.nan if s == 0 else column.sum() / s
elif self.strategy == 'median':
statistics[i] = _get_median(column, n_zeros)
elif self.strategy == 'most_frequent':
statistics[i] = _most_frequent(column, 0, n_zeros)
super()._fit_indicator(missing_mask)
self.statistics_ = statistics
</DeepExtract>
else:
<DeepExtract>
missing_mask = _get_mask(X, self.missing_values)
masked_X = ma.masked_array(X, mask=missing_mask)
super()._fit_indicator(missing_mask)
if self.strategy == 'mean':
mean_masked = np.ma.mean(masked_X, axis=0)
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = 0 if self.keep_empty_features else np.nan
self.statistics_ = mean
elif self.strategy == 'median':
median_masked = np.ma.median(masked_X, axis=0)
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = 0 if self.keep_empty_features else np.nan
self.statistics_ = median
elif self.strategy == 'most_frequent':
X = X.transpose()
mask = missing_mask.transpose()
if X.dtype.kind == 'O':
most_frequent = np.empty(X.shape[0], dtype=object)
else:
most_frequent = np.empty(X.shape[0])
for (i, (row, row_mask)) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(bool)
row = row[row_mask]
if len(row) == 0 and self.keep_empty_features:
most_frequent[i] = 0
else:
most_frequent[i] = _most_frequent(row, np.nan, 0)
self.statistics_ = most_frequent
elif self.strategy == 'constant':
self.statistics_ = np.full(X.shape[1], fill_value, dtype=X.dtype)
</DeepExtract>
return self
|
def orthogonal_mp_gram(Gram, Xy, *, n_nonzero_coefs=None, tol=None, norms_squared=None, copy_Gram=True, copy_Xy=True, return_path=False, return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP).
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : ndarray of shape (n_features, n_features)
Gram matrix of the input data: X.T * X.
Xy : ndarray of shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y.
n_nonzero_coefs : int, default=None
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, default=None
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like of shape (n_targets,), default=None
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, default=True
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, default=True
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, default=False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
coef : ndarray of shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See Also
--------
OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP).
orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
lars_path : Compute Least Angle Regression or Lasso path using
LARS algorithm.
sklearn.decomposition.sparse_encode : Generic sparse coding.
Each column of the result is the solution to a Lasso problem.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if copy_Xy or not Xy.flags.writeable:
Xy = Xy.copy()
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError('Epsilon cannot be negative')
if tol is None and n_nonzero_coefs <= 0:
raise ValueError('The number of atoms must be positive')
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError('The number of atoms cannot be more than the number of features')
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)), dtype=Gram.dtype)
else:
coef = np.zeros((len(Gram), Xy.shape[1]), dtype=Gram.dtype)
n_iters = []
for k in range(Xy.shape[1]):
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if False or not Xy[:, k].flags.writeable:
Xy[:, k] = Xy[:, k].copy()
min_float = np.finfo(Gram.dtype).eps
(nrm2, swap) = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
(potrs,) = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram))
alpha = Xy[:, k]
tol_curr = norms_squared[k] if tol is not None else None
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.0
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, check_finite=False)
v = nrm2(L[n_active, :n_active]) ** 2
Lkk = Gram[lam, lam] - v
if Lkk <= min_float:
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = sqrt(Lkk)
else:
L[0, 0] = sqrt(Gram[lam, lam])
(Gram[n_active], Gram[lam]) = swap(Gram[n_active], Gram[lam])
(Gram.T[n_active], Gram.T[lam]) = swap(Gram.T[n_active], Gram.T[lam])
(indices[n_active], indices[lam]) = (indices[lam], indices[n_active])
(Xy[:, k][n_active], Xy[:, k][lam]) = (Xy[:, k][lam], Xy[:, k][n_active])
n_active += 1
(gamma, _) = potrs(L[:n_active, :n_active], Xy[:, k][:n_active], lower=True, overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy[:, k] - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
out = (gamma, indices[:n_active], coefs[:, :n_active], n_active)
else:
out = (gamma, indices[:n_active], n_active)
if return_path:
(_, idx, coefs, n_iter) = out
coef = coef[:, :, :len(idx)]
for (n_active, x) in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
(x, idx, n_iter) = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return (np.squeeze(coef), n_iters)
else:
return np.squeeze(coef)
|
def orthogonal_mp_gram(Gram, Xy, *, n_nonzero_coefs=None, tol=None, norms_squared=None, copy_Gram=True, copy_Xy=True, return_path=False, return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP).
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : ndarray of shape (n_features, n_features)
Gram matrix of the input data: X.T * X.
Xy : ndarray of shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y.
n_nonzero_coefs : int, default=None
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, default=None
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like of shape (n_targets,), default=None
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, default=True
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, default=True
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, default=False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
coef : ndarray of shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See Also
--------
OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP).
orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
lars_path : Compute Least Angle Regression or Lasso path using
LARS algorithm.
sklearn.decomposition.sparse_encode : Generic sparse coding.
Each column of the result is the solution to a Lasso problem.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if copy_Xy or not Xy.flags.writeable:
Xy = Xy.copy()
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError('Epsilon cannot be negative')
if tol is None and n_nonzero_coefs <= 0:
raise ValueError('The number of atoms must be positive')
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError('The number of atoms cannot be more than the number of features')
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)), dtype=Gram.dtype)
else:
coef = np.zeros((len(Gram), Xy.shape[1]), dtype=Gram.dtype)
n_iters = []
for k in range(Xy.shape[1]):
<DeepExtract>
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if False or not Xy[:, k].flags.writeable:
Xy[:, k] = Xy[:, k].copy()
min_float = np.finfo(Gram.dtype).eps
(nrm2, swap) = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
(potrs,) = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram))
alpha = Xy[:, k]
tol_curr = norms_squared[k] if tol is not None else None
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.0
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, check_finite=False)
v = nrm2(L[n_active, :n_active]) ** 2
Lkk = Gram[lam, lam] - v
if Lkk <= min_float:
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = sqrt(Lkk)
else:
L[0, 0] = sqrt(Gram[lam, lam])
(Gram[n_active], Gram[lam]) = swap(Gram[n_active], Gram[lam])
(Gram.T[n_active], Gram.T[lam]) = swap(Gram.T[n_active], Gram.T[lam])
(indices[n_active], indices[lam]) = (indices[lam], indices[n_active])
(Xy[:, k][n_active], Xy[:, k][lam]) = (Xy[:, k][lam], Xy[:, k][n_active])
n_active += 1
(gamma, _) = potrs(L[:n_active, :n_active], Xy[:, k][:n_active], lower=True, overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy[:, k] - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
out = (gamma, indices[:n_active], coefs[:, :n_active], n_active)
else:
out = (gamma, indices[:n_active], n_active)
</DeepExtract>
if return_path:
(_, idx, coefs, n_iter) = out
coef = coef[:, :, :len(idx)]
for (n_active, x) in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
(x, idx, n_iter) = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return (np.squeeze(coef), n_iters)
else:
return np.squeeze(coef)
|
def __lshift__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.skipWhitespace = True
self.whiteChars = self.expr.whiteChars
self.copyDefaultWhiteChars = False
return self
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
|
def __lshift__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
<DeepExtract>
self.skipWhitespace = True
self.whiteChars = self.expr.whiteChars
self.copyDefaultWhiteChars = False
return self
</DeepExtract>
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
|
def check_transformer_get_feature_names_out(name, transformer_orig):
tags = transformer_orig._get_tags()
if '2darray' not in tags['X_types'] or tags['no_validation']:
return
(X, y) = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
transformer = clone(transformer_orig)
if '1darray' in _safe_tags(transformer, key='X_types'):
X = X[:, 0]
if _safe_tags(transformer, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(transformer, key='X_types'):
X = (X - X.min()).astype(np.int32)
if transformer.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(transformer):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(transformer, key='pairwise'):
X = kernel(X, X)
X = X
n_features = X.shape[1]
set_random_state(transformer)
y_ = y
if name in CROSS_DECOMPOSITION:
y_ = np.c_[np.asarray(y), np.asarray(y)]
y_[::2, 1] *= 2
X_transform = transformer.fit_transform(X, y=y_)
input_features = [f'feature{i}' for i in range(n_features)]
with raises(ValueError, match='input_features should have length equal'):
transformer.get_feature_names_out(input_features[::2])
feature_names_out = transformer.get_feature_names_out(input_features)
assert feature_names_out is not None
assert isinstance(feature_names_out, np.ndarray)
assert feature_names_out.dtype == object
assert all((isinstance(name, str) for name in feature_names_out))
if isinstance(X_transform, tuple):
n_features_out = X_transform[0].shape[1]
else:
n_features_out = X_transform.shape[1]
assert len(feature_names_out) == n_features_out, f'Expected {n_features_out} feature names, got {len(feature_names_out)}'
|
def check_transformer_get_feature_names_out(name, transformer_orig):
tags = transformer_orig._get_tags()
if '2darray' not in tags['X_types'] or tags['no_validation']:
return
(X, y) = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
transformer = clone(transformer_orig)
<DeepExtract>
if '1darray' in _safe_tags(transformer, key='X_types'):
X = X[:, 0]
if _safe_tags(transformer, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(transformer, key='X_types'):
X = (X - X.min()).astype(np.int32)
if transformer.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(transformer):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(transformer, key='pairwise'):
X = kernel(X, X)
X = X
</DeepExtract>
n_features = X.shape[1]
set_random_state(transformer)
y_ = y
if name in CROSS_DECOMPOSITION:
y_ = np.c_[np.asarray(y), np.asarray(y)]
y_[::2, 1] *= 2
X_transform = transformer.fit_transform(X, y=y_)
input_features = [f'feature{i}' for i in range(n_features)]
with raises(ValueError, match='input_features should have length equal'):
transformer.get_feature_names_out(input_features[::2])
feature_names_out = transformer.get_feature_names_out(input_features)
assert feature_names_out is not None
assert isinstance(feature_names_out, np.ndarray)
assert feature_names_out.dtype == object
assert all((isinstance(name, str) for name in feature_names_out))
if isinstance(X_transform, tuple):
n_features_out = X_transform[0].shape[1]
else:
n_features_out = X_transform.shape[1]
assert len(feature_names_out) == n_features_out, f'Expected {n_features_out} feature names, got {len(feature_names_out)}'
|
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
random_state = np.random.RandomState(seed)
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
(X, y) = (X, y)
(X_train, X_test) = (X[n_samples // 2:], X[:n_samples // 2])
(y_train, y_test) = (y[n_samples // 2:], y[:n_samples // 2])
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-07)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert s_clf.score(X_test, y_test) > 0.85
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-07)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert d_clf.score(X_test, y_test) > 0.85
assert np.sum(s_clf.coef_ != 0.0) == n_informative
|
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
<DeepExtract>
random_state = np.random.RandomState(seed)
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
(X, y) = (X, y)
</DeepExtract>
(X_train, X_test) = (X[n_samples // 2:], X[:n_samples // 2])
(y_train, y_test) = (y[n_samples // 2:], y[:n_samples // 2])
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-07)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert s_clf.score(X_test, y_test) > 0.85
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-07)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert d_clf.score(X_test, y_test) > 0.85
assert np.sum(s_clf.coef_ != 0.0) == n_informative
|
def _make_estimator(self, append=True, random_state=None):
"""Make and configure a copy of the `estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone(self.estimator_)
estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params})
if random_state is not None:
random_state = check_random_state(random_state)
to_set = {}
for key in sorted(estimator.get_params(deep=True)):
if key == 'random_state' or key.endswith('__random_state'):
to_set[key] = random_state.randint(np.iinfo(np.int32).max)
if to_set:
estimator.set_params(**to_set)
if append:
self.estimators_.append(estimator)
return estimator
|
def _make_estimator(self, append=True, random_state=None):
"""Make and configure a copy of the `estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone(self.estimator_)
estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params})
if random_state is not None:
<DeepExtract>
random_state = check_random_state(random_state)
to_set = {}
for key in sorted(estimator.get_params(deep=True)):
if key == 'random_state' or key.endswith('__random_state'):
to_set[key] = random_state.randint(np.iinfo(np.int32).max)
if to_set:
estimator.set_params(**to_set)
</DeepExtract>
if append:
self.estimators_.append(estimator)
return estimator
|
def check_transformer_preserve_dtypes(name, transformer_orig):
(X, y) = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
if '1darray' in _safe_tags(transformer_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(transformer_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(transformer_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if transformer_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(transformer_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(transformer_orig, key='pairwise'):
X = kernel(X, X)
X = X
for dtype in _safe_tags(transformer_orig, key='preserves_dtype'):
X_cast = X.astype(dtype)
transformer = clone(transformer_orig)
set_random_state(transformer)
X_trans1 = transformer.fit_transform(X_cast, y)
X_trans2 = transformer.fit(X_cast, y).transform(X_cast)
for (Xt, method) in zip([X_trans1, X_trans2], ['fit_transform', 'transform']):
if isinstance(Xt, tuple):
Xt = Xt[0]
assert Xt.dtype == dtype, f'{name} (method={method}) does not preserve dtype. Original/Expected dtype={dtype.__name__}, got dtype={Xt.dtype}.'
|
def check_transformer_preserve_dtypes(name, transformer_orig):
(X, y) = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
<DeepExtract>
if '1darray' in _safe_tags(transformer_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(transformer_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(transformer_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if transformer_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(transformer_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(transformer_orig, key='pairwise'):
X = kernel(X, X)
X = X
</DeepExtract>
for dtype in _safe_tags(transformer_orig, key='preserves_dtype'):
X_cast = X.astype(dtype)
transformer = clone(transformer_orig)
set_random_state(transformer)
X_trans1 = transformer.fit_transform(X_cast, y)
X_trans2 = transformer.fit(X_cast, y).transform(X_cast)
for (Xt, method) in zip([X_trans1, X_trans2], ['fit_transform', 'transform']):
if isinstance(Xt, tuple):
Xt = Xt[0]
assert Xt.dtype == dtype, f'{name} (method={method}) does not preserve dtype. Original/Expected dtype={dtype.__name__}, got dtype={Xt.dtype}.'
|
def fit(self, X, y, sample_weight=None):
"""Fit the model.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : ndarray of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.20
parameter *sample_weight* support to BayesianRidge.
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
(X, y) = self._validate_data(X, y, dtype=[np.float64, np.float32], y_numeric=True)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
(X, y, X_offset_, y_offset_, X_scale_) = _preprocess_data(X, y, self.fit_intercept, copy=self.copy_X, sample_weight=sample_weight)
if sample_weight is not None:
(X, y, _) = _rescale_data(X, y, sample_weight)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
(n_samples, n_features) = X.shape
eps = np.finfo(np.float64).eps
alpha_ = self.alpha_init
lambda_ = self.lambda_init
if alpha_ is None:
alpha_ = 1.0 / (np.var(y) + eps)
if lambda_ is None:
lambda_ = 1.0
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
(U, S, Vh) = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
for iter_ in range(self.n_iter):
if n_samples > n_features:
coef_ = np.linalg.multi_dot([Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis], XT_y])
else:
coef_ = np.linalg.multi_dot([X.T, U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T, y])
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
(coef_, rmse_) = (coef_, rmse_)
if self.compute_score:
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
if n_samples > n_features:
logdet_sigma = -np.sum(np.log(lambda_ + alpha_ * eigen_vals_))
else:
logdet_sigma = np.full(n_features, lambda_, dtype=np.array(lambda_).dtype)
logdet_sigma[:n_samples] += alpha_ * eigen_vals_
logdet_sigma = -np.sum(np.log(logdet_sigma))
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
score += 0.5 * (n_features * log(lambda_) + n_samples * log(alpha_) - alpha_ * rmse_ - lambda_ * np.sum(coef_ ** 2) + logdet_sigma - n_samples * log(2 * np.pi))
s = score
self.scores_.append(s)
gamma_ = np.sum(alpha_ * eigen_vals_ / (lambda_ + alpha_ * eigen_vals_))
lambda_ = (gamma_ + 2 * lambda_1) / (np.sum(coef_ ** 2) + 2 * lambda_2)
alpha_ = (n_samples - gamma_ + 2 * alpha_1) / (rmse_ + 2 * alpha_2)
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print('Convergence after ', str(iter_), ' iterations')
break
coef_old_ = np.copy(coef_)
self.n_iter_ = iter_ + 1
self.alpha_ = alpha_
self.lambda_ = lambda_
if n_samples > n_features:
coef_ = np.linalg.multi_dot([Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis], XT_y])
else:
coef_ = np.linalg.multi_dot([X.T, U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T, y])
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
(self.coef_, rmse_) = (coef_, rmse_)
if self.compute_score:
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
if n_samples > n_features:
logdet_sigma = -np.sum(np.log(lambda_ + alpha_ * eigen_vals_))
else:
logdet_sigma = np.full(n_features, lambda_, dtype=np.array(lambda_).dtype)
logdet_sigma[:n_samples] += alpha_ * eigen_vals_
logdet_sigma = -np.sum(np.log(logdet_sigma))
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
score += 0.5 * (n_features * log(lambda_) + n_samples * log(alpha_) - alpha_ * rmse_ - lambda_ * np.sum(coef_ ** 2) + logdet_sigma - n_samples * log(2 * np.pi))
s = score
self.scores_.append(s)
self.scores_ = np.array(self.scores_)
scaled_sigma_ = np.dot(Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis])
self.sigma_ = 1.0 / alpha_ * scaled_sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
|
def fit(self, X, y, sample_weight=None):
"""Fit the model.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : ndarray of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.20
parameter *sample_weight* support to BayesianRidge.
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
(X, y) = self._validate_data(X, y, dtype=[np.float64, np.float32], y_numeric=True)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
(X, y, X_offset_, y_offset_, X_scale_) = _preprocess_data(X, y, self.fit_intercept, copy=self.copy_X, sample_weight=sample_weight)
if sample_weight is not None:
(X, y, _) = _rescale_data(X, y, sample_weight)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
(n_samples, n_features) = X.shape
eps = np.finfo(np.float64).eps
alpha_ = self.alpha_init
lambda_ = self.lambda_init
if alpha_ is None:
alpha_ = 1.0 / (np.var(y) + eps)
if lambda_ is None:
lambda_ = 1.0
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
(U, S, Vh) = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
for iter_ in range(self.n_iter):
<DeepExtract>
if n_samples > n_features:
coef_ = np.linalg.multi_dot([Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis], XT_y])
else:
coef_ = np.linalg.multi_dot([X.T, U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T, y])
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
(coef_, rmse_) = (coef_, rmse_)
</DeepExtract>
if self.compute_score:
<DeepExtract>
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
if n_samples > n_features:
logdet_sigma = -np.sum(np.log(lambda_ + alpha_ * eigen_vals_))
else:
logdet_sigma = np.full(n_features, lambda_, dtype=np.array(lambda_).dtype)
logdet_sigma[:n_samples] += alpha_ * eigen_vals_
logdet_sigma = -np.sum(np.log(logdet_sigma))
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
score += 0.5 * (n_features * log(lambda_) + n_samples * log(alpha_) - alpha_ * rmse_ - lambda_ * np.sum(coef_ ** 2) + logdet_sigma - n_samples * log(2 * np.pi))
s = score
</DeepExtract>
self.scores_.append(s)
gamma_ = np.sum(alpha_ * eigen_vals_ / (lambda_ + alpha_ * eigen_vals_))
lambda_ = (gamma_ + 2 * lambda_1) / (np.sum(coef_ ** 2) + 2 * lambda_2)
alpha_ = (n_samples - gamma_ + 2 * alpha_1) / (rmse_ + 2 * alpha_2)
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print('Convergence after ', str(iter_), ' iterations')
break
coef_old_ = np.copy(coef_)
self.n_iter_ = iter_ + 1
self.alpha_ = alpha_
self.lambda_ = lambda_
<DeepExtract>
if n_samples > n_features:
coef_ = np.linalg.multi_dot([Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis], XT_y])
else:
coef_ = np.linalg.multi_dot([X.T, U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T, y])
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
(self.coef_, rmse_) = (coef_, rmse_)
</DeepExtract>
if self.compute_score:
<DeepExtract>
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
if n_samples > n_features:
logdet_sigma = -np.sum(np.log(lambda_ + alpha_ * eigen_vals_))
else:
logdet_sigma = np.full(n_features, lambda_, dtype=np.array(lambda_).dtype)
logdet_sigma[:n_samples] += alpha_ * eigen_vals_
logdet_sigma = -np.sum(np.log(logdet_sigma))
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
score += 0.5 * (n_features * log(lambda_) + n_samples * log(alpha_) - alpha_ * rmse_ - lambda_ * np.sum(coef_ ** 2) + logdet_sigma - n_samples * log(2 * np.pi))
s = score
</DeepExtract>
self.scores_.append(s)
self.scores_ = np.array(self.scores_)
scaled_sigma_ = np.dot(Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis])
self.sigma_ = 1.0 / alpha_ * scaled_sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
|
def _check_inverse_transform(self, X):
"""Check that func and inverse_func are the inverse."""
idx_selected = slice(None, None, max(1, X.shape[0] // 100))
if self.validate:
self.transform(X[idx_selected]) = check_array(self.transform(X[idx_selected]), accept_sparse=self.accept_sparse)
X_round_trip = self._transform(self.transform(X[idx_selected]), func=self.inverse_func, kw_args=self.inv_kw_args)
if hasattr(X, 'dtype'):
dtypes = [X.dtype]
elif hasattr(X, 'dtypes'):
dtypes = X.dtypes
if not all((np.issubdtype(d, np.number) for d in dtypes)):
raise ValueError("'check_inverse' is only supported when all the elements in `X` is numerical.")
if not _allclose_dense_sparse(X[idx_selected], X_round_trip):
warnings.warn("The provided functions are not strictly inverse of each other. If you are sure you want to proceed regardless, set 'check_inverse=False'.", UserWarning)
|
def _check_inverse_transform(self, X):
"""Check that func and inverse_func are the inverse."""
idx_selected = slice(None, None, max(1, X.shape[0] // 100))
<DeepExtract>
if self.validate:
self.transform(X[idx_selected]) = check_array(self.transform(X[idx_selected]), accept_sparse=self.accept_sparse)
X_round_trip = self._transform(self.transform(X[idx_selected]), func=self.inverse_func, kw_args=self.inv_kw_args)
</DeepExtract>
if hasattr(X, 'dtype'):
dtypes = [X.dtype]
elif hasattr(X, 'dtypes'):
dtypes = X.dtypes
if not all((np.issubdtype(d, np.number) for d in dtypes)):
raise ValueError("'check_inverse' is only supported when all the elements in `X` is numerical.")
if not _allclose_dense_sparse(X[idx_selected], X_round_trip):
warnings.warn("The provided functions are not strictly inverse of each other. If you are sure you want to proceed regardless, set 'check_inverse=False'.", UserWarning)
|
def buchheim(tree):
if len(DrawTree(tree).children) == 0:
if DrawTree(tree).lmost_sibling:
DrawTree(tree).x = DrawTree(tree).lbrother().x + distance
else:
DrawTree(tree).x = 0.0
else:
default_ancestor = DrawTree(tree).children[0]
for w in DrawTree(tree).children:
first_walk(w)
default_ancestor = apportion(w, default_ancestor, distance)
execute_shifts(DrawTree(tree))
midpoint = (DrawTree(tree).children[0].x + DrawTree(tree).children[-1].x) / 2
w = DrawTree(tree).lbrother()
if w:
DrawTree(tree).x = w.x + distance
DrawTree(tree).mod = DrawTree(tree).x - midpoint
else:
DrawTree(tree).x = midpoint
dt = DrawTree(tree)
dt.x += m
dt.y = depth
if min is None or dt.x < min:
min = dt.x
for w in dt.children:
min = second_walk(w, m + dt.mod, depth + 1, min)
min = min
if min < 0:
dt.x += -min
for c in dt.children:
third_walk(c, -min)
return dt
|
def buchheim(tree):
<DeepExtract>
if len(DrawTree(tree).children) == 0:
if DrawTree(tree).lmost_sibling:
DrawTree(tree).x = DrawTree(tree).lbrother().x + distance
else:
DrawTree(tree).x = 0.0
else:
default_ancestor = DrawTree(tree).children[0]
for w in DrawTree(tree).children:
first_walk(w)
default_ancestor = apportion(w, default_ancestor, distance)
execute_shifts(DrawTree(tree))
midpoint = (DrawTree(tree).children[0].x + DrawTree(tree).children[-1].x) / 2
w = DrawTree(tree).lbrother()
if w:
DrawTree(tree).x = w.x + distance
DrawTree(tree).mod = DrawTree(tree).x - midpoint
else:
DrawTree(tree).x = midpoint
dt = DrawTree(tree)
</DeepExtract>
<DeepExtract>
dt.x += m
dt.y = depth
if min is None or dt.x < min:
min = dt.x
for w in dt.children:
min = second_walk(w, m + dt.mod, depth + 1, min)
min = min
</DeepExtract>
if min < 0:
<DeepExtract>
dt.x += -min
for c in dt.children:
third_walk(c, -min)
</DeepExtract>
return dt
|
def _partial_fit(self, X, y, alpha, C, loss, learning_rate, max_iter, sample_weight, coef_init, intercept_init):
first_call = getattr(self, 'coef_', None) is None
(X, y) = self._validate_data(X, y, accept_sparse='csr', copy=False, order='C', dtype=[np.float64, np.float32], accept_large_sparse=False, reset=first_call)
y = y.astype(X.dtype, copy=False)
(n_samples, n_features) = X.shape
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if first_call:
if 1 > 2:
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=X.dtype, order='C')
if coef_init.shape != (1, n_features):
raise ValueError('Provided ``coef_`` does not match dataset. ')
self.coef_ = coef_init
else:
self.coef_ = np.zeros((1, n_features), dtype=X.dtype, order='C')
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order='C', dtype=X.dtype)
if intercept_init.shape != (1,):
raise ValueError('Provided intercept_init does not match dataset.')
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(1, dtype=X.dtype, order='C')
else:
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=X.dtype, order='C')
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError('Provided coef_init does not match dataset.')
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features, dtype=X.dtype, order='C')
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=X.dtype)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError('Provided intercept_init does not match dataset.')
if one_class:
self.offset_ = intercept_init.reshape(1)
else:
self.intercept_ = intercept_init.reshape(1)
elif one_class:
self.offset_ = np.zeros(1, dtype=X.dtype, order='C')
else:
self.intercept_ = np.zeros(1, dtype=X.dtype, order='C')
if self.average > 0:
self._standard_coef = self.coef_
self._average_coef = np.zeros(self.coef_.shape, dtype=X.dtype, order='C')
if one_class:
self._standard_intercept = 1 - self.offset_
else:
self._standard_intercept = self.intercept_
self._average_intercept = np.zeros(self._standard_intercept.shape, dtype=X.dtype, order='C')
if self.average > 0 and getattr(self, '_average_coef', None) is None:
self._average_coef = np.zeros(n_features, dtype=X.dtype, order='C')
self._average_intercept = np.zeros(1, dtype=X.dtype, order='C')
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if not hasattr(self, 't_'):
self.t_ = 1.0
validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0)
validation_score_cb = self._make_validation_score_cb(validation_mask, X, y, sample_weight)
random_state = check_random_state(self.random_state)
seed = random_state.randint(0, MAX_INT)
(dataset, intercept_decay) = make_dataset(X, y, sample_weight, random_state=random_state)
tol = self.tol if self.tol is not None else -np.inf
if self.average:
coef = self._standard_coef
intercept = self._standard_intercept
average_coef = self._average_coef
average_intercept = self._average_intercept
else:
coef = self.coef_
intercept = self.intercept_
average_coef = None
average_intercept = [0]
_plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype)
(coef, intercept, average_coef, average_intercept, self.n_iter_) = _plain_sgd(coef, intercept[0], average_coef, average_intercept[0], loss_function, penalty_type, alpha, C, self.l1_ratio, dataset, validation_mask, self.early_stopping, validation_score_cb, int(self.n_iter_no_change), max_iter, tol, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, 1.0, 1.0, learning_rate_type, self.eta0, self.power_t, 0, self.t_, intercept_decay, self.average)
self.t_ += self.n_iter_ * X.shape[0]
if self.average > 0:
self._average_intercept = np.atleast_1d(average_intercept)
self._standard_intercept = np.atleast_1d(intercept)
if self.average <= self.t_ - 1.0:
self.coef_ = average_coef
self.intercept_ = np.atleast_1d(average_intercept)
else:
self.coef_ = coef
self.intercept_ = np.atleast_1d(intercept)
else:
self.intercept_ = np.atleast_1d(intercept)
return self
|
def _partial_fit(self, X, y, alpha, C, loss, learning_rate, max_iter, sample_weight, coef_init, intercept_init):
first_call = getattr(self, 'coef_', None) is None
(X, y) = self._validate_data(X, y, accept_sparse='csr', copy=False, order='C', dtype=[np.float64, np.float32], accept_large_sparse=False, reset=first_call)
y = y.astype(X.dtype, copy=False)
(n_samples, n_features) = X.shape
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if first_call:
<DeepExtract>
if 1 > 2:
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=X.dtype, order='C')
if coef_init.shape != (1, n_features):
raise ValueError('Provided ``coef_`` does not match dataset. ')
self.coef_ = coef_init
else:
self.coef_ = np.zeros((1, n_features), dtype=X.dtype, order='C')
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order='C', dtype=X.dtype)
if intercept_init.shape != (1,):
raise ValueError('Provided intercept_init does not match dataset.')
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(1, dtype=X.dtype, order='C')
else:
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=X.dtype, order='C')
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError('Provided coef_init does not match dataset.')
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features, dtype=X.dtype, order='C')
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=X.dtype)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError('Provided intercept_init does not match dataset.')
if one_class:
self.offset_ = intercept_init.reshape(1)
else:
self.intercept_ = intercept_init.reshape(1)
elif one_class:
self.offset_ = np.zeros(1, dtype=X.dtype, order='C')
else:
self.intercept_ = np.zeros(1, dtype=X.dtype, order='C')
if self.average > 0:
self._standard_coef = self.coef_
self._average_coef = np.zeros(self.coef_.shape, dtype=X.dtype, order='C')
if one_class:
self._standard_intercept = 1 - self.offset_
else:
self._standard_intercept = self.intercept_
self._average_intercept = np.zeros(self._standard_intercept.shape, dtype=X.dtype, order='C')
</DeepExtract>
if self.average > 0 and getattr(self, '_average_coef', None) is None:
self._average_coef = np.zeros(n_features, dtype=X.dtype, order='C')
self._average_intercept = np.zeros(1, dtype=X.dtype, order='C')
<DeepExtract>
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if not hasattr(self, 't_'):
self.t_ = 1.0
validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0)
validation_score_cb = self._make_validation_score_cb(validation_mask, X, y, sample_weight)
random_state = check_random_state(self.random_state)
seed = random_state.randint(0, MAX_INT)
(dataset, intercept_decay) = make_dataset(X, y, sample_weight, random_state=random_state)
tol = self.tol if self.tol is not None else -np.inf
if self.average:
coef = self._standard_coef
intercept = self._standard_intercept
average_coef = self._average_coef
average_intercept = self._average_intercept
else:
coef = self.coef_
intercept = self.intercept_
average_coef = None
average_intercept = [0]
_plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype)
(coef, intercept, average_coef, average_intercept, self.n_iter_) = _plain_sgd(coef, intercept[0], average_coef, average_intercept[0], loss_function, penalty_type, alpha, C, self.l1_ratio, dataset, validation_mask, self.early_stopping, validation_score_cb, int(self.n_iter_no_change), max_iter, tol, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, 1.0, 1.0, learning_rate_type, self.eta0, self.power_t, 0, self.t_, intercept_decay, self.average)
self.t_ += self.n_iter_ * X.shape[0]
if self.average > 0:
self._average_intercept = np.atleast_1d(average_intercept)
self._standard_intercept = np.atleast_1d(intercept)
if self.average <= self.t_ - 1.0:
self.coef_ = average_coef
self.intercept_ = np.atleast_1d(average_intercept)
else:
self.coef_ = coef
self.intercept_ = np.atleast_1d(intercept)
else:
self.intercept_ = np.atleast_1d(intercept)
</DeepExtract>
return self
|
def fit(self, X, y=None, sample_weight=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.24
parameter *sample_weight* support to StandardScaler.
Returns
-------
self : object
Fitted scaler.
"""
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
return self.partial_fit(X, y, sample_weight)
|
def fit(self, X, y=None, sample_weight=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.24
parameter *sample_weight* support to StandardScaler.
Returns
-------
self : object
Fitted scaler.
"""
<DeepExtract>
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
</DeepExtract>
return self.partial_fit(X, y, sample_weight)
|
def _fit(self, X, handle_unknown='error', force_all_finite=True, return_counts=False, return_and_ignore_missing_for_infrequent=False):
max_categories = getattr(self, 'max_categories', None)
min_frequency = getattr(self, 'min_frequency', None)
self._infrequent_enabled = max_categories is not None and max_categories >= 1 or min_frequency is not None
self._check_n_features(X, reset=True)
self._check_feature_names(X, reset=True)
if not (hasattr(X, 'iloc') and getattr(X, 'ndim', 0) == 2):
X_temp = check_array(X, dtype=None, force_all_finite=force_all_finite)
if not hasattr(X, 'dtype') and np.issubdtype(X_temp.dtype, np.str_):
X = check_array(X, dtype=object, force_all_finite=force_all_finite)
else:
X = X_temp
needs_validation = False
else:
needs_validation = force_all_finite
(n_samples, n_features) = X.shape
X_columns = []
for i in range(n_features):
Xi = _safe_indexing(X, indices=i, axis=1)
Xi = check_array(Xi, ensure_2d=False, dtype=None, force_all_finite=needs_validation)
X_columns.append(Xi)
(X_list, n_samples, n_features) = (X_columns, n_samples, n_features)
self.n_features_in_ = n_features
if self.categories != 'auto':
if len(self.categories) != n_features:
raise ValueError('Shape mismatch: if categories is an array, it has to be of shape (n_features,).')
self.categories_ = []
category_counts = []
compute_counts = return_counts or self._infrequent_enabled
for i in range(n_features):
Xi = X_list[i]
if self.categories == 'auto':
result = _unique(Xi, return_counts=compute_counts)
if compute_counts:
(cats, counts) = result
category_counts.append(counts)
else:
cats = result
else:
if np.issubdtype(Xi.dtype, np.str_):
Xi_dtype = object
else:
Xi_dtype = Xi.dtype
cats = np.array(self.categories[i], dtype=Xi_dtype)
if cats.dtype == object and isinstance(cats[0], bytes) and (Xi.dtype.kind != 'S'):
msg = f"In column {i}, the predefined categories have type 'bytes' which is incompatible with values of type '{type(Xi[0]).__name__}'."
raise ValueError(msg)
if Xi.dtype.kind not in 'OUS':
sorted_cats = np.sort(cats)
error_msg = 'Unsorted categories are not supported for numerical categories'
stop_idx = -1 if np.isnan(sorted_cats[-1]) else None
if np.any(sorted_cats[:stop_idx] != cats[:stop_idx]) or (np.isnan(sorted_cats[-1]) and (not np.isnan(sorted_cats[-1]))):
raise ValueError(error_msg)
if handle_unknown == 'error':
diff = _check_unknown(Xi, cats)
if diff:
msg = 'Found unknown categories {0} in column {1} during fit'.format(diff, i)
raise ValueError(msg)
if compute_counts:
category_counts.append(_get_counts(Xi, cats))
self.categories_.append(cats)
output = {'n_samples': n_samples}
if return_counts:
output['category_counts'] = category_counts
missing_indices = {}
if return_and_ignore_missing_for_infrequent:
for (feature_idx, categories_for_idx) in enumerate(self.categories_):
for (category_idx, category) in enumerate(categories_for_idx):
if is_scalar_nan(category):
missing_indices[feature_idx] = category_idx
break
output['missing_indices'] = missing_indices
if self._infrequent_enabled:
if missing_indices:
category_counts_ = []
for (feature_idx, count) in enumerate(category_counts):
if feature_idx in missing_indices:
category_counts_.append(np.delete(count, missing_indices[feature_idx]))
else:
category_counts_.append(count)
else:
category_counts_ = category_counts
self._infrequent_indices = [self._identify_infrequent(category_count, n_samples, col_idx) for (col_idx, category_count) in enumerate(category_counts_)]
self._default_to_infrequent_mappings = []
for (feature_idx, infreq_idx) in enumerate(self._infrequent_indices):
cats = self.categories_[feature_idx]
if infreq_idx is None:
self._default_to_infrequent_mappings.append(None)
continue
n_cats = len(cats)
if feature_idx in missing_indices:
n_cats -= 1
mapping = np.empty(n_cats, dtype=np.int64)
n_infrequent_cats = infreq_idx.size
n_frequent_cats = n_cats - n_infrequent_cats
mapping[infreq_idx] = n_frequent_cats
frequent_indices = np.setdiff1d(np.arange(n_cats), infreq_idx)
mapping[frequent_indices] = np.arange(n_frequent_cats)
self._default_to_infrequent_mappings.append(mapping)
return output
|
def _fit(self, X, handle_unknown='error', force_all_finite=True, return_counts=False, return_and_ignore_missing_for_infrequent=False):
<DeepExtract>
max_categories = getattr(self, 'max_categories', None)
min_frequency = getattr(self, 'min_frequency', None)
self._infrequent_enabled = max_categories is not None and max_categories >= 1 or min_frequency is not None
</DeepExtract>
self._check_n_features(X, reset=True)
self._check_feature_names(X, reset=True)
<DeepExtract>
if not (hasattr(X, 'iloc') and getattr(X, 'ndim', 0) == 2):
X_temp = check_array(X, dtype=None, force_all_finite=force_all_finite)
if not hasattr(X, 'dtype') and np.issubdtype(X_temp.dtype, np.str_):
X = check_array(X, dtype=object, force_all_finite=force_all_finite)
else:
X = X_temp
needs_validation = False
else:
needs_validation = force_all_finite
(n_samples, n_features) = X.shape
X_columns = []
for i in range(n_features):
Xi = _safe_indexing(X, indices=i, axis=1)
Xi = check_array(Xi, ensure_2d=False, dtype=None, force_all_finite=needs_validation)
X_columns.append(Xi)
(X_list, n_samples, n_features) = (X_columns, n_samples, n_features)
</DeepExtract>
self.n_features_in_ = n_features
if self.categories != 'auto':
if len(self.categories) != n_features:
raise ValueError('Shape mismatch: if categories is an array, it has to be of shape (n_features,).')
self.categories_ = []
category_counts = []
compute_counts = return_counts or self._infrequent_enabled
for i in range(n_features):
Xi = X_list[i]
if self.categories == 'auto':
result = _unique(Xi, return_counts=compute_counts)
if compute_counts:
(cats, counts) = result
category_counts.append(counts)
else:
cats = result
else:
if np.issubdtype(Xi.dtype, np.str_):
Xi_dtype = object
else:
Xi_dtype = Xi.dtype
cats = np.array(self.categories[i], dtype=Xi_dtype)
if cats.dtype == object and isinstance(cats[0], bytes) and (Xi.dtype.kind != 'S'):
msg = f"In column {i}, the predefined categories have type 'bytes' which is incompatible with values of type '{type(Xi[0]).__name__}'."
raise ValueError(msg)
if Xi.dtype.kind not in 'OUS':
sorted_cats = np.sort(cats)
error_msg = 'Unsorted categories are not supported for numerical categories'
stop_idx = -1 if np.isnan(sorted_cats[-1]) else None
if np.any(sorted_cats[:stop_idx] != cats[:stop_idx]) or (np.isnan(sorted_cats[-1]) and (not np.isnan(sorted_cats[-1]))):
raise ValueError(error_msg)
if handle_unknown == 'error':
diff = _check_unknown(Xi, cats)
if diff:
msg = 'Found unknown categories {0} in column {1} during fit'.format(diff, i)
raise ValueError(msg)
if compute_counts:
category_counts.append(_get_counts(Xi, cats))
self.categories_.append(cats)
output = {'n_samples': n_samples}
if return_counts:
output['category_counts'] = category_counts
missing_indices = {}
if return_and_ignore_missing_for_infrequent:
for (feature_idx, categories_for_idx) in enumerate(self.categories_):
for (category_idx, category) in enumerate(categories_for_idx):
if is_scalar_nan(category):
missing_indices[feature_idx] = category_idx
break
output['missing_indices'] = missing_indices
if self._infrequent_enabled:
<DeepExtract>
if missing_indices:
category_counts_ = []
for (feature_idx, count) in enumerate(category_counts):
if feature_idx in missing_indices:
category_counts_.append(np.delete(count, missing_indices[feature_idx]))
else:
category_counts_.append(count)
else:
category_counts_ = category_counts
self._infrequent_indices = [self._identify_infrequent(category_count, n_samples, col_idx) for (col_idx, category_count) in enumerate(category_counts_)]
self._default_to_infrequent_mappings = []
for (feature_idx, infreq_idx) in enumerate(self._infrequent_indices):
cats = self.categories_[feature_idx]
if infreq_idx is None:
self._default_to_infrequent_mappings.append(None)
continue
n_cats = len(cats)
if feature_idx in missing_indices:
n_cats -= 1
mapping = np.empty(n_cats, dtype=np.int64)
n_infrequent_cats = infreq_idx.size
n_frequent_cats = n_cats - n_infrequent_cats
mapping[infreq_idx] = n_frequent_cats
frequent_indices = np.setdiff1d(np.arange(n_cats), infreq_idx)
mapping[frequent_indices] = np.arange(n_frequent_cats)
self._default_to_infrequent_mappings.append(mapping)
</DeepExtract>
return output
|
def fit_transform(self, X, y=None):
"""Generate missing values indicator for `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data to complete.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_features_with_missing)
The missing indicator for input data. The data type of `Xt`
will be boolean.
"""
self._validate_params()
if precomputed:
if not (hasattr(X, 'dtype') and X.dtype.kind == 'b'):
raise ValueError('precomputed is True but the input data is not a mask')
self._precomputed = True
else:
self._precomputed = False
if not self._precomputed:
X = self._validate_input(X, in_fit=True)
else:
self._check_n_features(X, reset=True)
self._n_features = X.shape[1]
missing_features_info = self._get_missing_features_info(X)
self.features_ = missing_features_info[1]
imputer_mask = missing_features_info[0]
if self.features_.size < self._n_features:
imputer_mask = imputer_mask[:, self.features_]
return imputer_mask
|
def fit_transform(self, X, y=None):
"""Generate missing values indicator for `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data to complete.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_features_with_missing)
The missing indicator for input data. The data type of `Xt`
will be boolean.
"""
self._validate_params()
<DeepExtract>
if precomputed:
if not (hasattr(X, 'dtype') and X.dtype.kind == 'b'):
raise ValueError('precomputed is True but the input data is not a mask')
self._precomputed = True
else:
self._precomputed = False
if not self._precomputed:
X = self._validate_input(X, in_fit=True)
else:
self._check_n_features(X, reset=True)
self._n_features = X.shape[1]
missing_features_info = self._get_missing_features_info(X)
self.features_ = missing_features_info[1]
imputer_mask = missing_features_info[0]
</DeepExtract>
if self.features_.size < self._n_features:
imputer_mask = imputer_mask[:, self.features_]
return imputer_mask
|
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when :meth:`fit` is not feasible due to very large number of
`n_samples` or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
self._validate_params()
first_pass = not hasattr(self, 'n_samples_seen_')
X = self._validate_data(X, reset=first_pass, accept_sparse=('csr', 'csc'), dtype=FLOAT_DTYPES, force_all_finite='allow-nan')
if sparse.issparse(X):
(mins, maxs) = min_max_axis(X, axis=0, ignore_nan=True)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.nanmax(np.abs(X), axis=0)
if first_pass:
self.n_samples_seen_ = X.shape[0]
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
if np.isscalar(max_abs):
if max_abs == 0.0:
max_abs = 1.0
self.scale_ = max_abs
elif isinstance(max_abs, np.ndarray):
if constant_mask is None:
constant_mask = max_abs < 10 * np.finfo(max_abs.dtype).eps
if True:
max_abs = max_abs.copy()
max_abs[constant_mask] = 1.0
self.scale_ = max_abs
return self
|
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when :meth:`fit` is not feasible due to very large number of
`n_samples` or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
self._validate_params()
first_pass = not hasattr(self, 'n_samples_seen_')
X = self._validate_data(X, reset=first_pass, accept_sparse=('csr', 'csc'), dtype=FLOAT_DTYPES, force_all_finite='allow-nan')
if sparse.issparse(X):
(mins, maxs) = min_max_axis(X, axis=0, ignore_nan=True)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.nanmax(np.abs(X), axis=0)
if first_pass:
self.n_samples_seen_ = X.shape[0]
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
<DeepExtract>
if np.isscalar(max_abs):
if max_abs == 0.0:
max_abs = 1.0
self.scale_ = max_abs
elif isinstance(max_abs, np.ndarray):
if constant_mask is None:
constant_mask = max_abs < 10 * np.finfo(max_abs.dtype).eps
if True:
max_abs = max_abs.copy()
max_abs[constant_mask] = 1.0
self.scale_ = max_abs
</DeepExtract>
return self
|
def test_huber_warm_start():
rng = np.random.RandomState(0)
(X, y) = make_regression(n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05)
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
(X, y) = (X, y)
huber_warm = HuberRegressor(alpha=1.0, max_iter=10000, warm_start=True, tol=0.1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
assert huber_warm.n_iter_ == 0
|
def test_huber_warm_start():
<DeepExtract>
rng = np.random.RandomState(0)
(X, y) = make_regression(n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05)
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
(X, y) = (X, y)
</DeepExtract>
huber_warm = HuberRegressor(alpha=1.0, max_iter=10000, warm_start=True, tol=0.1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
assert huber_warm.n_iter_ == 0
|
def test_normalize():
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False), normalize(X.T, axis=0, copy=False).T)
with pytest.raises(ValueError):
normalize([[0]], axis=2)
with pytest.raises(ValueError):
normalize([[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones(10)
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert X_norm.dtype == dtype
if hasattr(X_norm, 'toarray'):
X_norm = X_norm.toarray()
X_norm = X_norm
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm ** 2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]])
for norm in ('l1', 'l2', 'max'):
(_, norms) = normalize(X_dense, norm=norm, return_norm=True)
if norm == 'l1':
assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0]))
elif norm == 'l2':
assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127]))
else:
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
X_sparse = sparse.csr_matrix(X_dense)
for norm in ('l1', 'l2'):
with pytest.raises(NotImplementedError):
normalize(X_sparse, norm=norm, return_norm=True)
(_, norms) = normalize(X_sparse, norm='max', return_norm=True)
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
|
def test_normalize():
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False), normalize(X.T, axis=0, copy=False).T)
with pytest.raises(ValueError):
normalize([[0]], axis=2)
with pytest.raises(ValueError):
normalize([[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones(10)
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert X_norm.dtype == dtype
<DeepExtract>
if hasattr(X_norm, 'toarray'):
X_norm = X_norm.toarray()
X_norm = X_norm
</DeepExtract>
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm ** 2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]])
for norm in ('l1', 'l2', 'max'):
(_, norms) = normalize(X_dense, norm=norm, return_norm=True)
if norm == 'l1':
assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0]))
elif norm == 'l2':
assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127]))
else:
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
X_sparse = sparse.csr_matrix(X_dense)
for norm in ('l1', 'l2'):
with pytest.raises(NotImplementedError):
normalize(X_sparse, norm=norm, return_norm=True)
(_, norms) = normalize(X_sparse, norm='max', return_norm=True)
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
|
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores, test_scores, durations) in clfs:
print('training %s' % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except Exception:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except Exception:
C = clf.C
try:
intercept = clf.intercept_
except Exception:
intercept = 0.0
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
(train_loss, train_score, test_score, duration) = (train_loss, train_score, test_score, duration)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print('classifier: %s' % name)
print('train_loss: %.8f' % train_loss)
print('train_score: %.8f' % train_score)
print('test_score: %.8f' % test_score)
print('time for fit: %.8f seconds' % duration)
print('')
print('')
return clfs
|
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores, test_scores, durations) in clfs:
print('training %s' % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
<DeepExtract>
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except Exception:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except Exception:
C = clf.C
try:
intercept = clf.intercept_
except Exception:
intercept = 0.0
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
(train_loss, train_score, test_score, duration) = (train_loss, train_score, test_score, duration)
</DeepExtract>
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print('classifier: %s' % name)
print('train_loss: %.8f' % train_loss)
print('train_score: %.8f' % train_score)
print('test_score: %.8f' % test_score)
print('time for fit: %.8f seconds' % duration)
print('')
print('')
return clfs
|
def _iter_test_masks(self, X, y=None, groups=None):
rng = check_random_state(self.random_state)
y = np.asarray(y)
type_of_target_y = type_of_target(y)
allowed_target_types = ('binary', 'multiclass')
if type_of_target_y not in allowed_target_types:
raise ValueError('Supported target types are: {}. Got {!r} instead.'.format(allowed_target_types, type_of_target_y))
y = column_or_1d(y)
(_, y_idx, y_inv) = np.unique(y, return_index=True, return_inverse=True)
(_, class_perm) = np.unique(y_idx, return_inverse=True)
y_encoded = class_perm[y_inv]
n_classes = len(y_idx)
y_counts = np.bincount(y_encoded)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError('n_splits=%d cannot be greater than the number of members in each class.' % self.n_splits)
if self.n_splits > min_groups:
warnings.warn('The least populated class in y has only %d members, which is less than n_splits=%d.' % (min_groups, self.n_splits), UserWarning)
y_order = np.sort(y_encoded)
allocation = np.asarray([np.bincount(y_order[i::self.n_splits], minlength=n_classes) for i in range(self.n_splits)])
test_folds = np.empty(len(y), dtype='i')
for k in range(n_classes):
folds_for_class = np.arange(self.n_splits).repeat(allocation[:, k])
if self.shuffle:
rng.shuffle(folds_for_class)
test_folds[y_encoded == k] = folds_for_class
test_folds = test_folds
for i in range(self.n_splits):
yield (test_folds == i)
|
def _iter_test_masks(self, X, y=None, groups=None):
<DeepExtract>
rng = check_random_state(self.random_state)
y = np.asarray(y)
type_of_target_y = type_of_target(y)
allowed_target_types = ('binary', 'multiclass')
if type_of_target_y not in allowed_target_types:
raise ValueError('Supported target types are: {}. Got {!r} instead.'.format(allowed_target_types, type_of_target_y))
y = column_or_1d(y)
(_, y_idx, y_inv) = np.unique(y, return_index=True, return_inverse=True)
(_, class_perm) = np.unique(y_idx, return_inverse=True)
y_encoded = class_perm[y_inv]
n_classes = len(y_idx)
y_counts = np.bincount(y_encoded)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError('n_splits=%d cannot be greater than the number of members in each class.' % self.n_splits)
if self.n_splits > min_groups:
warnings.warn('The least populated class in y has only %d members, which is less than n_splits=%d.' % (min_groups, self.n_splits), UserWarning)
y_order = np.sort(y_encoded)
allocation = np.asarray([np.bincount(y_order[i::self.n_splits], minlength=n_classes) for i in range(self.n_splits)])
test_folds = np.empty(len(y), dtype='i')
for k in range(n_classes):
folds_for_class = np.arange(self.n_splits).repeat(allocation[:, k])
if self.shuffle:
rng.shuffle(folds_for_class)
test_folds[y_encoded == k] = folds_for_class
test_folds = test_folds
</DeepExtract>
for i in range(self.n_splits):
yield (test_folds == i)
|
@pytest.mark.parametrize('add_noise', [True, False])
def test_non_square_fastica(add_noise):
rng = np.random.RandomState(0)
n_samples = 1000
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
s = np.rollaxis(s, axis)
s -= s.mean(axis=0)
s /= s.std(axis=0)
(s1, s2) = s
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
m = np.rollaxis(m, axis)
m -= m.mean(axis=0)
m /= m.std(axis=0)
(k_, mixing_, s_) = fastica(m.T, n_components=2, whiten='unit-variance', random_state=rng)
s_ = s_.T
assert_allclose(s_, np.dot(np.dot(mixing_, k_), m))
s_ = np.rollaxis(s_, axis)
s_ -= s_.mean(axis=0)
s_ /= s_.std(axis=0)
(s1_, s2_) = s_
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
(s2_, s1_) = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
if not add_noise:
assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=0.001)
assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=0.001)
|
@pytest.mark.parametrize('add_noise', [True, False])
def test_non_square_fastica(add_noise):
rng = np.random.RandomState(0)
n_samples = 1000
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
<DeepExtract>
s = np.rollaxis(s, axis)
s -= s.mean(axis=0)
s /= s.std(axis=0)
</DeepExtract>
(s1, s2) = s
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
<DeepExtract>
m = np.rollaxis(m, axis)
m -= m.mean(axis=0)
m /= m.std(axis=0)
</DeepExtract>
(k_, mixing_, s_) = fastica(m.T, n_components=2, whiten='unit-variance', random_state=rng)
s_ = s_.T
assert_allclose(s_, np.dot(np.dot(mixing_, k_), m))
<DeepExtract>
s_ = np.rollaxis(s_, axis)
s_ -= s_.mean(axis=0)
s_ /= s_.std(axis=0)
</DeepExtract>
(s1_, s2_) = s_
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
(s2_, s1_) = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
if not add_noise:
assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=0.001)
assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=0.001)
|
def test_huber_equals_lr_for_high_epsilon():
rng = np.random.RandomState(0)
(X, y) = make_regression(n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05)
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
(X, y) = (X, y)
lr = LinearRegression()
lr.fit(X, y)
huber = HuberRegressor(epsilon=1000.0, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
|
def test_huber_equals_lr_for_high_epsilon():
<DeepExtract>
rng = np.random.RandomState(0)
(X, y) = make_regression(n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05)
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
(X, y) = (X, y)
</DeepExtract>
lr = LinearRegression()
lr.fit(X, y)
huber = HuberRegressor(epsilon=1000.0, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
|
def _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy, check_input=True, sample_weight=None):
"""Function used at beginning of fit in linear models with L1 or L0 penalty.
This function applies _preprocess_data and additionally computes the gram matrix
`precompute` as needed as well as `Xy`.
"""
(n_samples, n_features) = X.shape
if sparse.isspmatrix(X):
precompute = False
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if check_input:
X = check_array(X, copy=False, accept_sparse=['csr', 'csc'], dtype=FLOAT_DTYPES)
elif False:
if sp.issparse(X):
X = X.copy()
else:
X = X.copy(order='K')
y = np.asarray(y, dtype=X.dtype)
if fit_intercept:
if sp.issparse(X):
(X_offset, X_var) = mean_variance_axis(X, axis=0, weights=sample_weight)
else:
if normalize:
(X_offset, X_var, _) = _incremental_mean_and_var(X, last_mean=0.0, last_variance=0.0, last_sample_count=0.0, sample_weight=sample_weight)
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X_offset = X_offset.astype(X.dtype, copy=False)
X -= X_offset
if normalize:
X_var = X_var.astype(X.dtype, copy=False)
constant_mask = _is_constant_feature(X_var, X_offset, X.shape[0])
if sample_weight is None:
X_var *= X.shape[0]
else:
X_var *= sample_weight.sum()
X_scale = np.sqrt(X_var, out=X_var)
X_scale[constant_mask] = 1.0
if sp.issparse(X):
inplace_column_scale(X, 1.0 / X_scale)
else:
X /= X_scale
else:
X_scale = np.ones(X.shape[1], dtype=X.dtype)
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1], dtype=X.dtype)
X_scale = np.ones(X.shape[1], dtype=X.dtype)
if y.ndim == 1:
y_offset = X.dtype.type(0)
else:
y_offset = np.zeros(y.shape[1], dtype=X.dtype)
(X, y, X_offset, y_offset, X_scale) = (X, y, X_offset, y_offset, X_scale)
else:
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if check_input:
X = check_array(X, copy=copy, accept_sparse=['csr', 'csc'], dtype=FLOAT_DTYPES)
elif copy:
if sp.issparse(X):
X = X.copy()
else:
X = X.copy(order='K')
y = np.asarray(y, dtype=X.dtype)
if fit_intercept:
if sp.issparse(X):
(X_offset, X_var) = mean_variance_axis(X, axis=0, weights=sample_weight)
else:
if normalize:
(X_offset, X_var, _) = _incremental_mean_and_var(X, last_mean=0.0, last_variance=0.0, last_sample_count=0.0, sample_weight=sample_weight)
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X_offset = X_offset.astype(X.dtype, copy=False)
X -= X_offset
if normalize:
X_var = X_var.astype(X.dtype, copy=False)
constant_mask = _is_constant_feature(X_var, X_offset, X.shape[0])
if sample_weight is None:
X_var *= X.shape[0]
else:
X_var *= sample_weight.sum()
X_scale = np.sqrt(X_var, out=X_var)
X_scale[constant_mask] = 1.0
if sp.issparse(X):
inplace_column_scale(X, 1.0 / X_scale)
else:
X /= X_scale
else:
X_scale = np.ones(X.shape[1], dtype=X.dtype)
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1], dtype=X.dtype)
X_scale = np.ones(X.shape[1], dtype=X.dtype)
if y.ndim == 1:
y_offset = X.dtype.type(0)
else:
y_offset = np.zeros(y.shape[1], dtype=X.dtype)
(X, y, X_offset, y_offset, X_scale) = (X, y, X_offset, y_offset, X_scale)
if sample_weight is not None:
n_samples = X.shape[0]
sample_weight = np.asarray(sample_weight)
if sample_weight.ndim == 0:
sample_weight = np.full(n_samples, sample_weight, dtype=sample_weight.dtype)
sample_weight_sqrt = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight_sqrt, 0), shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
(X, y, _) = (X, y, sample_weight_sqrt)
if hasattr(precompute, '__array__'):
if fit_intercept and (not np.allclose(X_offset, np.zeros(n_features))) or (normalize and (not np.allclose(X_scale, np.ones(n_features)))):
warnings.warn('Gram matrix was provided but X was centered to fit intercept, or X was normalized : recomputing Gram matrix.', UserWarning)
precompute = 'auto'
Xy = None
elif check_input:
n_features = X.shape[1]
f1 = n_features // 2
f2 = min(f1 + 1, n_features - 1)
v1 = (X[:, f1] - X_offset[f1]) * X_scale[f1]
v2 = (X[:, f2] - X_offset[f2]) * X_scale[f2]
expected = np.dot(v1, v2)
actual = precompute[f1, f2]
dtypes = [precompute.dtype, expected.dtype]
if rtol is None:
rtols = [0.0001 if dtype == np.float32 else 1e-07 for dtype in dtypes]
rtol = max(rtols)
if not np.isclose(expected, actual, rtol=rtol, atol=atol):
raise ValueError(f"Gram matrix passed in via 'precompute' parameter did not pass validation when a single element was checked - please check that it was computed properly. For element ({f1},{f2}) we computed {expected} but the user-supplied value was {actual}.")
if isinstance(precompute, str) and precompute == 'auto':
precompute = n_samples > n_features
if precompute is True:
precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype, order='C')
np.dot(X.T, X, out=precompute)
if not hasattr(precompute, '__array__'):
Xy = None
if hasattr(precompute, '__array__') and Xy is None:
common_dtype = np.result_type(X.dtype, y.dtype)
if y.ndim == 1:
Xy = np.empty(shape=n_features, dtype=common_dtype, order='C')
np.dot(X.T, y, out=Xy)
else:
n_targets = y.shape[1]
Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype, order='F')
np.dot(y.T, X, out=Xy.T)
return (X, y, X_offset, y_offset, X_scale, precompute, Xy)
|
def _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy, check_input=True, sample_weight=None):
"""Function used at beginning of fit in linear models with L1 or L0 penalty.
This function applies _preprocess_data and additionally computes the gram matrix
`precompute` as needed as well as `Xy`.
"""
(n_samples, n_features) = X.shape
if sparse.isspmatrix(X):
precompute = False
<DeepExtract>
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if check_input:
X = check_array(X, copy=False, accept_sparse=['csr', 'csc'], dtype=FLOAT_DTYPES)
elif False:
if sp.issparse(X):
X = X.copy()
else:
X = X.copy(order='K')
y = np.asarray(y, dtype=X.dtype)
if fit_intercept:
if sp.issparse(X):
(X_offset, X_var) = mean_variance_axis(X, axis=0, weights=sample_weight)
else:
if normalize:
(X_offset, X_var, _) = _incremental_mean_and_var(X, last_mean=0.0, last_variance=0.0, last_sample_count=0.0, sample_weight=sample_weight)
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X_offset = X_offset.astype(X.dtype, copy=False)
X -= X_offset
if normalize:
X_var = X_var.astype(X.dtype, copy=False)
constant_mask = _is_constant_feature(X_var, X_offset, X.shape[0])
if sample_weight is None:
X_var *= X.shape[0]
else:
X_var *= sample_weight.sum()
X_scale = np.sqrt(X_var, out=X_var)
X_scale[constant_mask] = 1.0
if sp.issparse(X):
inplace_column_scale(X, 1.0 / X_scale)
else:
X /= X_scale
else:
X_scale = np.ones(X.shape[1], dtype=X.dtype)
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1], dtype=X.dtype)
X_scale = np.ones(X.shape[1], dtype=X.dtype)
if y.ndim == 1:
y_offset = X.dtype.type(0)
else:
y_offset = np.zeros(y.shape[1], dtype=X.dtype)
(X, y, X_offset, y_offset, X_scale) = (X, y, X_offset, y_offset, X_scale)
</DeepExtract>
else:
<DeepExtract>
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if check_input:
X = check_array(X, copy=copy, accept_sparse=['csr', 'csc'], dtype=FLOAT_DTYPES)
elif copy:
if sp.issparse(X):
X = X.copy()
else:
X = X.copy(order='K')
y = np.asarray(y, dtype=X.dtype)
if fit_intercept:
if sp.issparse(X):
(X_offset, X_var) = mean_variance_axis(X, axis=0, weights=sample_weight)
else:
if normalize:
(X_offset, X_var, _) = _incremental_mean_and_var(X, last_mean=0.0, last_variance=0.0, last_sample_count=0.0, sample_weight=sample_weight)
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X_offset = X_offset.astype(X.dtype, copy=False)
X -= X_offset
if normalize:
X_var = X_var.astype(X.dtype, copy=False)
constant_mask = _is_constant_feature(X_var, X_offset, X.shape[0])
if sample_weight is None:
X_var *= X.shape[0]
else:
X_var *= sample_weight.sum()
X_scale = np.sqrt(X_var, out=X_var)
X_scale[constant_mask] = 1.0
if sp.issparse(X):
inplace_column_scale(X, 1.0 / X_scale)
else:
X /= X_scale
else:
X_scale = np.ones(X.shape[1], dtype=X.dtype)
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1], dtype=X.dtype)
X_scale = np.ones(X.shape[1], dtype=X.dtype)
if y.ndim == 1:
y_offset = X.dtype.type(0)
else:
y_offset = np.zeros(y.shape[1], dtype=X.dtype)
(X, y, X_offset, y_offset, X_scale) = (X, y, X_offset, y_offset, X_scale)
</DeepExtract>
if sample_weight is not None:
<DeepExtract>
n_samples = X.shape[0]
sample_weight = np.asarray(sample_weight)
if sample_weight.ndim == 0:
sample_weight = np.full(n_samples, sample_weight, dtype=sample_weight.dtype)
sample_weight_sqrt = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight_sqrt, 0), shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
(X, y, _) = (X, y, sample_weight_sqrt)
</DeepExtract>
if hasattr(precompute, '__array__'):
if fit_intercept and (not np.allclose(X_offset, np.zeros(n_features))) or (normalize and (not np.allclose(X_scale, np.ones(n_features)))):
warnings.warn('Gram matrix was provided but X was centered to fit intercept, or X was normalized : recomputing Gram matrix.', UserWarning)
precompute = 'auto'
Xy = None
elif check_input:
<DeepExtract>
n_features = X.shape[1]
f1 = n_features // 2
f2 = min(f1 + 1, n_features - 1)
v1 = (X[:, f1] - X_offset[f1]) * X_scale[f1]
v2 = (X[:, f2] - X_offset[f2]) * X_scale[f2]
expected = np.dot(v1, v2)
actual = precompute[f1, f2]
dtypes = [precompute.dtype, expected.dtype]
if rtol is None:
rtols = [0.0001 if dtype == np.float32 else 1e-07 for dtype in dtypes]
rtol = max(rtols)
if not np.isclose(expected, actual, rtol=rtol, atol=atol):
raise ValueError(f"Gram matrix passed in via 'precompute' parameter did not pass validation when a single element was checked - please check that it was computed properly. For element ({f1},{f2}) we computed {expected} but the user-supplied value was {actual}.")
</DeepExtract>
if isinstance(precompute, str) and precompute == 'auto':
precompute = n_samples > n_features
if precompute is True:
precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype, order='C')
np.dot(X.T, X, out=precompute)
if not hasattr(precompute, '__array__'):
Xy = None
if hasattr(precompute, '__array__') and Xy is None:
common_dtype = np.result_type(X.dtype, y.dtype)
if y.ndim == 1:
Xy = np.empty(shape=n_features, dtype=common_dtype, order='C')
np.dot(X.T, y, out=Xy)
else:
n_targets = y.shape[1]
Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype, order='F')
np.dot(y.T, X, out=Xy.T)
return (X, y, X_offset, y_offset, X_scale, precompute, Xy)
|
def _accumulate_prediction(predict, X, out, lock):
"""
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
prediction = self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
class_type = self.classes_[0].dtype
predictions = np.empty((n_samples, self.n_outputs_), dtype=class_type)
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k], axis=1), axis=0)
prediction = predictions
with lock:
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)):
out[i] += prediction[i]
|
def _accumulate_prediction(predict, X, out, lock):
"""
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
"""
<DeepExtract>
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
prediction = self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
class_type = self.classes_[0].dtype
predictions = np.empty((n_samples, self.n_outputs_), dtype=class_type)
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k], axis=1), axis=0)
prediction = predictions
</DeepExtract>
with lock:
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)):
out[i] += prediction[i]
|
def fit_transform(self, X, y=None):
"""Fit the imputer on `X` and return the transformed `X`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
self._validate_params()
self.random_state_ = getattr(self, 'random_state_', check_random_state(self.random_state))
if self.estimator is None:
from ..linear_model import BayesianRidge
self._estimator = BayesianRidge()
else:
self._estimator = clone(self.estimator)
self.imputation_sequence_ = []
self.initial_imputer_ = None
if is_scalar_nan(self.missing_values):
force_all_finite = 'allow-nan'
else:
force_all_finite = True
X = self._validate_data(X, dtype=FLOAT_DTYPES, order='F', reset=True, force_all_finite=force_all_finite)
_check_inputs_dtype(X, self.missing_values)
X_missing_mask = _get_mask(X, self.missing_values)
mask_missing_values = X_missing_mask.copy()
if self.initial_imputer_ is None:
self.initial_imputer_ = SimpleImputer(missing_values=self.missing_values, strategy=self.initial_strategy, fill_value=self.fill_value, keep_empty_features=self.keep_empty_features)
X_filled = self.initial_imputer_.fit_transform(X)
else:
X_filled = self.initial_imputer_.transform(X)
valid_mask = np.flatnonzero(np.logical_not(np.isnan(self.initial_imputer_.statistics_)))
if not self.keep_empty_features:
Xt = X[:, valid_mask]
mask_missing_values = mask_missing_values[:, valid_mask]
else:
mask_missing_values[:, valid_mask] = True
Xt = X
(X, Xt, mask_missing_values, complete_mask) = (Xt, X_filled, mask_missing_values, X_missing_mask)
super()._fit_indicator(complete_mask)
X_indicator = super()._transform_indicator(complete_mask)
if self.max_iter == 0 or np.all(mask_missing_values):
self.n_iter_ = 0
return super()._concatenate_indicator(Xt, X_indicator)
if Xt.shape[1] == 1:
self.n_iter_ = 0
return super()._concatenate_indicator(Xt, X_indicator)
limit_bound = np.inf if 'min' == 'max' else -np.inf
self.min_value = limit_bound if self.min_value is None else self.min_value
if np.isscalar(self.min_value):
self.min_value = np.full(X.shape[1], self.min_value)
self.min_value = check_array(self.min_value, force_all_finite=False, copy=False, ensure_2d=False)
if not self.min_value.shape[0] == X.shape[1]:
raise ValueError(f"'{'min'}_value' should be of shape ({X.shape[1]},) when an array-like is provided. Got {self.min_value.shape}, instead.")
self._min_value = self.min_value
limit_bound = np.inf if 'max' == 'max' else -np.inf
self.max_value = limit_bound if self.max_value is None else self.max_value
if np.isscalar(self.max_value):
self.max_value = np.full(X.shape[1], self.max_value)
self.max_value = check_array(self.max_value, force_all_finite=False, copy=False, ensure_2d=False)
if not self.max_value.shape[0] == X.shape[1]:
raise ValueError(f"'{'max'}_value' should be of shape ({X.shape[1]},) when an array-like is provided. Got {self.max_value.shape}, instead.")
self._max_value = self.max_value
if not np.all(np.greater(self._max_value, self._min_value)):
raise ValueError('One (or more) features have min_value >= max_value.')
frac_of_missing_values = mask_missing_values.mean(axis=0)
if self.skip_complete:
missing_values_idx = np.flatnonzero(frac_of_missing_values)
else:
missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0])
if self.imputation_order == 'roman':
ordered_idx = missing_values_idx
elif self.imputation_order == 'arabic':
ordered_idx = missing_values_idx[::-1]
elif self.imputation_order == 'ascending':
n = len(frac_of_missing_values) - len(missing_values_idx)
ordered_idx = np.argsort(frac_of_missing_values, kind='mergesort')[n:]
elif self.imputation_order == 'descending':
n = len(frac_of_missing_values) - len(missing_values_idx)
ordered_idx = np.argsort(frac_of_missing_values, kind='mergesort')[n:][::-1]
elif self.imputation_order == 'random':
ordered_idx = missing_values_idx
self.random_state_.shuffle(ordered_idx)
ordered_idx = ordered_idx
self.n_features_with_missing_ = len(ordered_idx)
n_features = Xt.shape[1]
if self.n_nearest_features is None or self.n_nearest_features >= n_features:
abs_corr_mat = None
with np.errstate(invalid='ignore'):
abs_corr_mat = np.abs(np.corrcoef(Xt.T))
abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance
np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat)
np.fill_diagonal(abs_corr_mat, 0)
abs_corr_mat = normalize(abs_corr_mat, norm='l1', axis=0, copy=False)
abs_corr_mat = abs_corr_mat
(n_samples, n_features) = Xt.shape
if self.verbose > 0:
print('[IterativeImputer] Completing matrix with shape %s' % (X.shape,))
start_t = time()
if not self.sample_posterior:
Xt_previous = Xt.copy()
normalized_tol = self.tol * np.max(np.abs(X[~mask_missing_values]))
for self.n_iter_ in range(1, self.max_iter + 1):
if self.imputation_order == 'random':
frac_of_missing_values = mask_missing_values.mean(axis=0)
if self.skip_complete:
missing_values_idx = np.flatnonzero(frac_of_missing_values)
else:
missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0])
if self.imputation_order == 'roman':
ordered_idx = missing_values_idx
elif self.imputation_order == 'arabic':
ordered_idx = missing_values_idx[::-1]
elif self.imputation_order == 'ascending':
n = len(frac_of_missing_values) - len(missing_values_idx)
ordered_idx = np.argsort(frac_of_missing_values, kind='mergesort')[n:]
elif self.imputation_order == 'descending':
n = len(frac_of_missing_values) - len(missing_values_idx)
ordered_idx = np.argsort(frac_of_missing_values, kind='mergesort')[n:][::-1]
elif self.imputation_order == 'random':
ordered_idx = missing_values_idx
self.random_state_.shuffle(ordered_idx)
ordered_idx = ordered_idx
for feat_idx in ordered_idx:
if self.n_nearest_features is not None and self.n_nearest_features < n_features:
p = abs_corr_mat[:, feat_idx]
neighbor_feat_idx = self.random_state_.choice(np.arange(n_features), self.n_nearest_features, replace=False, p=p)
else:
inds_left = np.arange(feat_idx)
inds_right = np.arange(feat_idx + 1, n_features)
neighbor_feat_idx = np.concatenate((inds_left, inds_right))
neighbor_feat_idx = neighbor_feat_idx
if None is None and True is False:
raise ValueError('If fit_mode is False, then an already-fitted estimator should be passed in.')
if None is None:
None = clone(self._estimator)
missing_row_mask = mask_missing_values[:, feat_idx]
if True:
X_train = _safe_indexing(_safe_indexing(Xt, neighbor_feat_idx, axis=1), ~missing_row_mask, axis=0)
y_train = _safe_indexing(_safe_indexing(Xt, feat_idx, axis=1), ~missing_row_mask, axis=0)
None.fit(X_train, y_train)
if np.sum(missing_row_mask) == 0:
(Xt, None) = (Xt, None)
X_test = _safe_indexing(_safe_indexing(Xt, neighbor_feat_idx, axis=1), missing_row_mask, axis=0)
if self.sample_posterior:
(mus, sigmas) = None.predict(X_test, return_std=True)
imputed_values = np.zeros(mus.shape, dtype=Xt.dtype)
positive_sigmas = sigmas > 0
imputed_values[~positive_sigmas] = mus[~positive_sigmas]
mus_too_low = mus < self._min_value[feat_idx]
imputed_values[mus_too_low] = self._min_value[feat_idx]
mus_too_high = mus > self._max_value[feat_idx]
imputed_values[mus_too_high] = self._max_value[feat_idx]
inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high
mus = mus[inrange_mask]
sigmas = sigmas[inrange_mask]
a = (self._min_value[feat_idx] - mus) / sigmas
b = (self._max_value[feat_idx] - mus) / sigmas
truncated_normal = stats.truncnorm(a=a, b=b, loc=mus, scale=sigmas)
imputed_values[inrange_mask] = truncated_normal.rvs(random_state=self.random_state_)
else:
imputed_values = None.predict(X_test)
imputed_values = np.clip(imputed_values, self._min_value[feat_idx], self._max_value[feat_idx])
_safe_assign(Xt, imputed_values, row_indexer=missing_row_mask, column_indexer=feat_idx)
(Xt, None) = (Xt, None)
estimator_triplet = _ImputerTriplet(feat_idx, neighbor_feat_idx, estimator)
self.imputation_sequence_.append(estimator_triplet)
if self.verbose > 1:
print('[IterativeImputer] Ending imputation round %d/%d, elapsed time %0.2f' % (self.n_iter_, self.max_iter, time() - start_t))
if not self.sample_posterior:
inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf, axis=None)
if self.verbose > 0:
print('[IterativeImputer] Change: {}, scaled tolerance: {} '.format(inf_norm, normalized_tol))
if inf_norm < normalized_tol:
if self.verbose > 0:
print('[IterativeImputer] Early stopping criterion reached.')
break
Xt_previous = Xt.copy()
else:
if not self.sample_posterior:
warnings.warn('[IterativeImputer] Early stopping criterion not reached.', ConvergenceWarning)
if hasattr(Xt, 'mask'):
Xt.mask(cond=~mask_missing_values, other=X, inplace=True)
else:
Xt[~mask_missing_values] = X[~mask_missing_values]
return super()._concatenate_indicator(Xt, X_indicator)
|
def fit_transform(self, X, y=None):
"""Fit the imputer on `X` and return the transformed `X`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
self._validate_params()
self.random_state_ = getattr(self, 'random_state_', check_random_state(self.random_state))
if self.estimator is None:
from ..linear_model import BayesianRidge
self._estimator = BayesianRidge()
else:
self._estimator = clone(self.estimator)
self.imputation_sequence_ = []
self.initial_imputer_ = None
<DeepExtract>
if is_scalar_nan(self.missing_values):
force_all_finite = 'allow-nan'
else:
force_all_finite = True
X = self._validate_data(X, dtype=FLOAT_DTYPES, order='F', reset=True, force_all_finite=force_all_finite)
_check_inputs_dtype(X, self.missing_values)
X_missing_mask = _get_mask(X, self.missing_values)
mask_missing_values = X_missing_mask.copy()
if self.initial_imputer_ is None:
self.initial_imputer_ = SimpleImputer(missing_values=self.missing_values, strategy=self.initial_strategy, fill_value=self.fill_value, keep_empty_features=self.keep_empty_features)
X_filled = self.initial_imputer_.fit_transform(X)
else:
X_filled = self.initial_imputer_.transform(X)
valid_mask = np.flatnonzero(np.logical_not(np.isnan(self.initial_imputer_.statistics_)))
if not self.keep_empty_features:
Xt = X[:, valid_mask]
mask_missing_values = mask_missing_values[:, valid_mask]
else:
mask_missing_values[:, valid_mask] = True
Xt = X
(X, Xt, mask_missing_values, complete_mask) = (Xt, X_filled, mask_missing_values, X_missing_mask)
</DeepExtract>
super()._fit_indicator(complete_mask)
X_indicator = super()._transform_indicator(complete_mask)
if self.max_iter == 0 or np.all(mask_missing_values):
self.n_iter_ = 0
return super()._concatenate_indicator(Xt, X_indicator)
if Xt.shape[1] == 1:
self.n_iter_ = 0
return super()._concatenate_indicator(Xt, X_indicator)
<DeepExtract>
limit_bound = np.inf if 'min' == 'max' else -np.inf
self.min_value = limit_bound if self.min_value is None else self.min_value
if np.isscalar(self.min_value):
self.min_value = np.full(X.shape[1], self.min_value)
self.min_value = check_array(self.min_value, force_all_finite=False, copy=False, ensure_2d=False)
if not self.min_value.shape[0] == X.shape[1]:
raise ValueError(f"'{'min'}_value' should be of shape ({X.shape[1]},) when an array-like is provided. Got {self.min_value.shape}, instead.")
self._min_value = self.min_value
</DeepExtract>
<DeepExtract>
limit_bound = np.inf if 'max' == 'max' else -np.inf
self.max_value = limit_bound if self.max_value is None else self.max_value
if np.isscalar(self.max_value):
self.max_value = np.full(X.shape[1], self.max_value)
self.max_value = check_array(self.max_value, force_all_finite=False, copy=False, ensure_2d=False)
if not self.max_value.shape[0] == X.shape[1]:
raise ValueError(f"'{'max'}_value' should be of shape ({X.shape[1]},) when an array-like is provided. Got {self.max_value.shape}, instead.")
self._max_value = self.max_value
</DeepExtract>
if not np.all(np.greater(self._max_value, self._min_value)):
raise ValueError('One (or more) features have min_value >= max_value.')
<DeepExtract>
frac_of_missing_values = mask_missing_values.mean(axis=0)
if self.skip_complete:
missing_values_idx = np.flatnonzero(frac_of_missing_values)
else:
missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0])
if self.imputation_order == 'roman':
ordered_idx = missing_values_idx
elif self.imputation_order == 'arabic':
ordered_idx = missing_values_idx[::-1]
elif self.imputation_order == 'ascending':
n = len(frac_of_missing_values) - len(missing_values_idx)
ordered_idx = np.argsort(frac_of_missing_values, kind='mergesort')[n:]
elif self.imputation_order == 'descending':
n = len(frac_of_missing_values) - len(missing_values_idx)
ordered_idx = np.argsort(frac_of_missing_values, kind='mergesort')[n:][::-1]
elif self.imputation_order == 'random':
ordered_idx = missing_values_idx
self.random_state_.shuffle(ordered_idx)
ordered_idx = ordered_idx
</DeepExtract>
self.n_features_with_missing_ = len(ordered_idx)
<DeepExtract>
n_features = Xt.shape[1]
if self.n_nearest_features is None or self.n_nearest_features >= n_features:
abs_corr_mat = None
with np.errstate(invalid='ignore'):
abs_corr_mat = np.abs(np.corrcoef(Xt.T))
abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance
np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat)
np.fill_diagonal(abs_corr_mat, 0)
abs_corr_mat = normalize(abs_corr_mat, norm='l1', axis=0, copy=False)
abs_corr_mat = abs_corr_mat
</DeepExtract>
(n_samples, n_features) = Xt.shape
if self.verbose > 0:
print('[IterativeImputer] Completing matrix with shape %s' % (X.shape,))
start_t = time()
if not self.sample_posterior:
Xt_previous = Xt.copy()
normalized_tol = self.tol * np.max(np.abs(X[~mask_missing_values]))
for self.n_iter_ in range(1, self.max_iter + 1):
if self.imputation_order == 'random':
<DeepExtract>
frac_of_missing_values = mask_missing_values.mean(axis=0)
if self.skip_complete:
missing_values_idx = np.flatnonzero(frac_of_missing_values)
else:
missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0])
if self.imputation_order == 'roman':
ordered_idx = missing_values_idx
elif self.imputation_order == 'arabic':
ordered_idx = missing_values_idx[::-1]
elif self.imputation_order == 'ascending':
n = len(frac_of_missing_values) - len(missing_values_idx)
ordered_idx = np.argsort(frac_of_missing_values, kind='mergesort')[n:]
elif self.imputation_order == 'descending':
n = len(frac_of_missing_values) - len(missing_values_idx)
ordered_idx = np.argsort(frac_of_missing_values, kind='mergesort')[n:][::-1]
elif self.imputation_order == 'random':
ordered_idx = missing_values_idx
self.random_state_.shuffle(ordered_idx)
ordered_idx = ordered_idx
</DeepExtract>
for feat_idx in ordered_idx:
<DeepExtract>
if self.n_nearest_features is not None and self.n_nearest_features < n_features:
p = abs_corr_mat[:, feat_idx]
neighbor_feat_idx = self.random_state_.choice(np.arange(n_features), self.n_nearest_features, replace=False, p=p)
else:
inds_left = np.arange(feat_idx)
inds_right = np.arange(feat_idx + 1, n_features)
neighbor_feat_idx = np.concatenate((inds_left, inds_right))
neighbor_feat_idx = neighbor_feat_idx
</DeepExtract>
<DeepExtract>
if None is None and True is False:
raise ValueError('If fit_mode is False, then an already-fitted estimator should be passed in.')
if None is None:
None = clone(self._estimator)
missing_row_mask = mask_missing_values[:, feat_idx]
if True:
X_train = _safe_indexing(_safe_indexing(Xt, neighbor_feat_idx, axis=1), ~missing_row_mask, axis=0)
y_train = _safe_indexing(_safe_indexing(Xt, feat_idx, axis=1), ~missing_row_mask, axis=0)
None.fit(X_train, y_train)
if np.sum(missing_row_mask) == 0:
(Xt, None) = (Xt, None)
X_test = _safe_indexing(_safe_indexing(Xt, neighbor_feat_idx, axis=1), missing_row_mask, axis=0)
if self.sample_posterior:
(mus, sigmas) = None.predict(X_test, return_std=True)
imputed_values = np.zeros(mus.shape, dtype=Xt.dtype)
positive_sigmas = sigmas > 0
imputed_values[~positive_sigmas] = mus[~positive_sigmas]
mus_too_low = mus < self._min_value[feat_idx]
imputed_values[mus_too_low] = self._min_value[feat_idx]
mus_too_high = mus > self._max_value[feat_idx]
imputed_values[mus_too_high] = self._max_value[feat_idx]
inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high
mus = mus[inrange_mask]
sigmas = sigmas[inrange_mask]
a = (self._min_value[feat_idx] - mus) / sigmas
b = (self._max_value[feat_idx] - mus) / sigmas
truncated_normal = stats.truncnorm(a=a, b=b, loc=mus, scale=sigmas)
imputed_values[inrange_mask] = truncated_normal.rvs(random_state=self.random_state_)
else:
imputed_values = None.predict(X_test)
imputed_values = np.clip(imputed_values, self._min_value[feat_idx], self._max_value[feat_idx])
_safe_assign(Xt, imputed_values, row_indexer=missing_row_mask, column_indexer=feat_idx)
(Xt, None) = (Xt, None)
</DeepExtract>
estimator_triplet = _ImputerTriplet(feat_idx, neighbor_feat_idx, estimator)
self.imputation_sequence_.append(estimator_triplet)
if self.verbose > 1:
print('[IterativeImputer] Ending imputation round %d/%d, elapsed time %0.2f' % (self.n_iter_, self.max_iter, time() - start_t))
if not self.sample_posterior:
inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf, axis=None)
if self.verbose > 0:
print('[IterativeImputer] Change: {}, scaled tolerance: {} '.format(inf_norm, normalized_tol))
if inf_norm < normalized_tol:
if self.verbose > 0:
print('[IterativeImputer] Early stopping criterion reached.')
break
Xt_previous = Xt.copy()
else:
if not self.sample_posterior:
warnings.warn('[IterativeImputer] Early stopping criterion not reached.', ConvergenceWarning)
<DeepExtract>
if hasattr(Xt, 'mask'):
Xt.mask(cond=~mask_missing_values, other=X, inplace=True)
else:
Xt[~mask_missing_values] = X[~mask_missing_values]
</DeepExtract>
return super()._concatenate_indicator(Xt, X_indicator)
|
def make_constraint(constraint):
"""Convert the constraint into the appropriate Constraint object.
Parameters
----------
constraint : object
The constraint to convert.
Returns
-------
constraint : instance of _Constraint
The converted constraint.
"""
if isinstance(constraint, str) and constraint == 'array-like':
return _ArrayLikes()
if isinstance(constraint, str) and constraint == 'sparse matrix':
return _SparseMatrices()
if isinstance(constraint, str) and constraint == 'random_state':
return _RandomStates()
if constraint is callable:
return _Callables()
if constraint is None:
return _NoneConstraint()
if isinstance(constraint, type):
return _InstancesOf(constraint)
if isinstance(constraint, (Interval, StrOptions, Options, HasMethods)):
return constraint
if isinstance(constraint, str) and constraint == 'boolean':
return _Booleans()
if isinstance(constraint, str) and constraint == 'verbose':
return _VerboseHelper()
if isinstance(constraint, str) and constraint == 'missing_values':
return _MissingValues()
if isinstance(constraint, str) and constraint == 'cv_object':
return _CVObjects()
if isinstance(constraint, Hidden):
if isinstance(constraint.constraint, str) and constraint.constraint == 'array-like':
constraint.constraint = _ArrayLikes()
if isinstance(constraint.constraint, str) and constraint.constraint == 'sparse matrix':
constraint.constraint = _SparseMatrices()
if isinstance(constraint.constraint, str) and constraint.constraint == 'random_state':
constraint.constraint = _RandomStates()
if constraint.constraint is callable:
constraint.constraint = _Callables()
if constraint.constraint is None:
constraint.constraint = _NoneConstraint()
if isinstance(constraint.constraint, type):
constraint.constraint = _InstancesOf(constraint.constraint)
if isinstance(constraint.constraint, (Interval, StrOptions, Options, HasMethods)):
constraint.constraint = constraint.constraint
if isinstance(constraint.constraint, str) and constraint.constraint == 'boolean':
constraint.constraint = _Booleans()
if isinstance(constraint.constraint, str) and constraint.constraint == 'verbose':
constraint.constraint = _VerboseHelper()
if isinstance(constraint.constraint, str) and constraint.constraint == 'missing_values':
constraint.constraint = _MissingValues()
if isinstance(constraint.constraint, str) and constraint.constraint == 'cv_object':
constraint.constraint = _CVObjects()
if isinstance(constraint.constraint, Hidden):
constraint.constraint = make_constraint(constraint.constraint.constraint)
constraint.constraint.hidden = True
constraint.constraint = constraint.constraint
raise ValueError(f'Unknown constraint type: {constraint.constraint}')
constraint.hidden = True
return constraint
raise ValueError(f'Unknown constraint type: {constraint}')
|
def make_constraint(constraint):
"""Convert the constraint into the appropriate Constraint object.
Parameters
----------
constraint : object
The constraint to convert.
Returns
-------
constraint : instance of _Constraint
The converted constraint.
"""
if isinstance(constraint, str) and constraint == 'array-like':
return _ArrayLikes()
if isinstance(constraint, str) and constraint == 'sparse matrix':
return _SparseMatrices()
if isinstance(constraint, str) and constraint == 'random_state':
return _RandomStates()
if constraint is callable:
return _Callables()
if constraint is None:
return _NoneConstraint()
if isinstance(constraint, type):
return _InstancesOf(constraint)
if isinstance(constraint, (Interval, StrOptions, Options, HasMethods)):
return constraint
if isinstance(constraint, str) and constraint == 'boolean':
return _Booleans()
if isinstance(constraint, str) and constraint == 'verbose':
return _VerboseHelper()
if isinstance(constraint, str) and constraint == 'missing_values':
return _MissingValues()
if isinstance(constraint, str) and constraint == 'cv_object':
return _CVObjects()
if isinstance(constraint, Hidden):
<DeepExtract>
if isinstance(constraint.constraint, str) and constraint.constraint == 'array-like':
constraint.constraint = _ArrayLikes()
if isinstance(constraint.constraint, str) and constraint.constraint == 'sparse matrix':
constraint.constraint = _SparseMatrices()
if isinstance(constraint.constraint, str) and constraint.constraint == 'random_state':
constraint.constraint = _RandomStates()
if constraint.constraint is callable:
constraint.constraint = _Callables()
if constraint.constraint is None:
constraint.constraint = _NoneConstraint()
if isinstance(constraint.constraint, type):
constraint.constraint = _InstancesOf(constraint.constraint)
if isinstance(constraint.constraint, (Interval, StrOptions, Options, HasMethods)):
constraint.constraint = constraint.constraint
if isinstance(constraint.constraint, str) and constraint.constraint == 'boolean':
constraint.constraint = _Booleans()
if isinstance(constraint.constraint, str) and constraint.constraint == 'verbose':
constraint.constraint = _VerboseHelper()
if isinstance(constraint.constraint, str) and constraint.constraint == 'missing_values':
constraint.constraint = _MissingValues()
if isinstance(constraint.constraint, str) and constraint.constraint == 'cv_object':
constraint.constraint = _CVObjects()
if isinstance(constraint.constraint, Hidden):
constraint.constraint = make_constraint(constraint.constraint.constraint)
constraint.constraint.hidden = True
constraint.constraint = constraint.constraint
raise ValueError(f'Unknown constraint type: {constraint.constraint}')
</DeepExtract>
constraint.hidden = True
return constraint
raise ValueError(f'Unknown constraint type: {constraint}')
|
def _get_estimators_indices(self):
for seed in self._seeds:
seed = check_random_state(seed)
feature_indices = _generate_indices(seed, self.bootstrap_features, self.n_features_in_, self._max_features)
sample_indices = _generate_indices(seed, self.bootstrap, self._n_samples, self._max_samples)
(feature_indices, sample_indices) = (feature_indices, sample_indices)
yield (feature_indices, sample_indices)
|
def _get_estimators_indices(self):
for seed in self._seeds:
<DeepExtract>
seed = check_random_state(seed)
feature_indices = _generate_indices(seed, self.bootstrap_features, self.n_features_in_, self._max_features)
sample_indices = _generate_indices(seed, self.bootstrap, self._n_samples, self._max_samples)
(feature_indices, sample_indices) = (feature_indices, sample_indices)
</DeepExtract>
yield (feature_indices, sample_indices)
|
def __str__(self):
if hasattr(self, 'name'):
return self.name
return self.__class__.__name__ + ': ...'
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
if isinstance(self.expr, unicode):
retString = self.expr
try:
retString = str(self.expr)
except UnicodeEncodeError:
ret = unicode(self.expr).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex('&#\\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
retString = xmlcharref.transformString(ret)
else:
retString = 'None'
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ': ' + retString
|
def __str__(self):
if hasattr(self, 'name'):
return self.name
return self.__class__.__name__ + ': ...'
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
<DeepExtract>
if isinstance(self.expr, unicode):
retString = self.expr
try:
retString = str(self.expr)
except UnicodeEncodeError:
ret = unicode(self.expr).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex('&#\\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
retString = xmlcharref.transformString(ret)
</DeepExtract>
else:
retString = 'None'
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ': ' + retString
|
def test_1d_multioutput_enet_and_multitask_enet_cv():
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(10, n_targets)
else:
w = random_state.randn(10)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, 10)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, 10)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=0.002, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=0.002, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
|
def test_1d_multioutput_enet_and_multitask_enet_cv():
<DeepExtract>
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(10, n_targets)
else:
w = random_state.randn(10)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, 10)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, 10)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
</DeepExtract>
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=0.002, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=0.002, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
|
@ignore_warnings(category=FutureWarning)
def check_estimators_overwrite_params(name, estimator_orig):
(X, y) = make_blobs(random_state=0, n_samples=21)
if '1darray' in _safe_tags(estimator_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator_orig, key='pairwise'):
X = rbf_kernel(X, X)
X = X
estimator = clone(estimator_orig)
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
set_random_state(estimator)
params = estimator.get_params()
original_params = deepcopy(params)
estimator.fit(X, y)
new_params = estimator.get_params()
for (param_name, original_value) in original_params.items():
new_value = new_params[param_name]
assert joblib.hash(new_value) == joblib.hash(original_value), 'Estimator %s should not change or mutate the parameter %s from %s to %s during fit.' % (name, param_name, original_value, new_value)
|
@ignore_warnings(category=FutureWarning)
def check_estimators_overwrite_params(name, estimator_orig):
(X, y) = make_blobs(random_state=0, n_samples=21)
<DeepExtract>
if '1darray' in _safe_tags(estimator_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator_orig, key='pairwise'):
X = rbf_kernel(X, X)
X = X
</DeepExtract>
estimator = clone(estimator_orig)
<DeepExtract>
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
set_random_state(estimator)
params = estimator.get_params()
original_params = deepcopy(params)
estimator.fit(X, y)
new_params = estimator.get_params()
for (param_name, original_value) in original_params.items():
new_value = new_params[param_name]
assert joblib.hash(new_value) == joblib.hash(original_value), 'Estimator %s should not change or mutate the parameter %s from %s to %s during fit.' % (name, param_name, original_value, new_value)
|
@ignore_warnings(category=FutureWarning)
def check_supervised_y_no_nan(name, estimator_orig):
estimator = clone(estimator_orig)
rng = np.random.RandomState(888)
X = rng.standard_normal(size=(10, 5))
for value in [np.nan, np.inf]:
y = np.full(10, value)
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
module_name = estimator.__module__
if module_name.startswith('sklearn.') and (not ('test_' in module_name or module_name.endswith('_testing'))):
if np.isinf(value):
match = "Input (y|Y) contains infinity or a value too large for dtype\\('float64'\\)."
else:
match = 'Input (y|Y) contains NaN.'
else:
match = None
err_msg = f'Estimator {name} should have raised error on fitting array y with inf value.'
with raises(ValueError, match=match, err_msg=err_msg):
estimator.fit(X, y)
|
@ignore_warnings(category=FutureWarning)
def check_supervised_y_no_nan(name, estimator_orig):
estimator = clone(estimator_orig)
rng = np.random.RandomState(888)
X = rng.standard_normal(size=(10, 5))
for value in [np.nan, np.inf]:
y = np.full(10, value)
<DeepExtract>
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
module_name = estimator.__module__
if module_name.startswith('sklearn.') and (not ('test_' in module_name or module_name.endswith('_testing'))):
if np.isinf(value):
match = "Input (y|Y) contains infinity or a value too large for dtype\\('float64'\\)."
else:
match = 'Input (y|Y) contains NaN.'
else:
match = None
err_msg = f'Estimator {name} should have raised error on fitting array y with inf value.'
with raises(ValueError, match=match, err_msg=err_msg):
estimator.fit(X, y)
|
def randomized_range_finder(A, *, size, n_iter, power_iteration_normalizer='auto', random_state=None):
"""Compute an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A : 2D array
The input data matrix.
size : int
Size of the return array.
n_iter : int
Number of power iterations used to stabilize the result.
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state : int, RandomState instance or None, default=None
The seed of the pseudo random number generator to use when shuffling
the data, i.e. getting the random vectors to initialize the algorithm.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
Q : ndarray
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
:arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
<0909.4061>`
Halko, et al. (2009)
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
Q = random_state.normal(size=(A.shape[1], size))
if hasattr(A, 'dtype') and A.dtype.kind == 'f':
Q = Q.astype(A.dtype, copy=False)
if power_iteration_normalizer == 'auto':
if n_iter <= 2:
power_iteration_normalizer = 'none'
else:
power_iteration_normalizer = 'LU'
for i in range(n_iter):
if power_iteration_normalizer == 'none':
if A.ndim > 2 or Q.ndim > 2:
if sparse.issparse(A):
b_ = np.rollaxis(Q, -2)
b_2d = b_.reshape((Q.shape[-2], -1))
ret = A @ b_2d
ret = ret.reshape(A.shape[0], *b_.shape[1:])
elif sparse.issparse(Q):
a_2d = A.reshape(-1, A.shape[-1])
ret = a_2d @ Q
ret = ret.reshape(*A.shape[:-1], Q.shape[1])
else:
ret = np.dot(A, Q)
else:
ret = A @ Q
if sparse.issparse(A) and sparse.issparse(Q) and dense_output and hasattr(ret, 'toarray'):
Q = ret.toarray()
Q = ret
if A.T.ndim > 2 or Q.ndim > 2:
if sparse.issparse(A.T):
b_ = np.rollaxis(Q, -2)
b_2d = b_.reshape((Q.shape[-2], -1))
ret = A.T @ b_2d
ret = ret.reshape(A.T.shape[0], *b_.shape[1:])
elif sparse.issparse(Q):
a_2d = A.T.reshape(-1, A.T.shape[-1])
ret = a_2d @ Q
ret = ret.reshape(*A.T.shape[:-1], Q.shape[1])
else:
ret = np.dot(A.T, Q)
else:
ret = A.T @ Q
if sparse.issparse(A.T) and sparse.issparse(Q) and dense_output and hasattr(ret, 'toarray'):
Q = ret.toarray()
Q = ret
elif power_iteration_normalizer == 'LU':
(Q, _) = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
(Q, _) = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == 'QR':
(Q, _) = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
(Q, _) = linalg.qr(safe_sparse_dot(A.T, Q), mode='economic')
(Q, _) = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
return Q
|
def randomized_range_finder(A, *, size, n_iter, power_iteration_normalizer='auto', random_state=None):
"""Compute an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A : 2D array
The input data matrix.
size : int
Size of the return array.
n_iter : int
Number of power iterations used to stabilize the result.
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state : int, RandomState instance or None, default=None
The seed of the pseudo random number generator to use when shuffling
the data, i.e. getting the random vectors to initialize the algorithm.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
Q : ndarray
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
:arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
<0909.4061>`
Halko, et al. (2009)
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
Q = random_state.normal(size=(A.shape[1], size))
if hasattr(A, 'dtype') and A.dtype.kind == 'f':
Q = Q.astype(A.dtype, copy=False)
if power_iteration_normalizer == 'auto':
if n_iter <= 2:
power_iteration_normalizer = 'none'
else:
power_iteration_normalizer = 'LU'
for i in range(n_iter):
if power_iteration_normalizer == 'none':
<DeepExtract>
if A.ndim > 2 or Q.ndim > 2:
if sparse.issparse(A):
b_ = np.rollaxis(Q, -2)
b_2d = b_.reshape((Q.shape[-2], -1))
ret = A @ b_2d
ret = ret.reshape(A.shape[0], *b_.shape[1:])
elif sparse.issparse(Q):
a_2d = A.reshape(-1, A.shape[-1])
ret = a_2d @ Q
ret = ret.reshape(*A.shape[:-1], Q.shape[1])
else:
ret = np.dot(A, Q)
else:
ret = A @ Q
if sparse.issparse(A) and sparse.issparse(Q) and dense_output and hasattr(ret, 'toarray'):
Q = ret.toarray()
Q = ret
</DeepExtract>
<DeepExtract>
if A.T.ndim > 2 or Q.ndim > 2:
if sparse.issparse(A.T):
b_ = np.rollaxis(Q, -2)
b_2d = b_.reshape((Q.shape[-2], -1))
ret = A.T @ b_2d
ret = ret.reshape(A.T.shape[0], *b_.shape[1:])
elif sparse.issparse(Q):
a_2d = A.T.reshape(-1, A.T.shape[-1])
ret = a_2d @ Q
ret = ret.reshape(*A.T.shape[:-1], Q.shape[1])
else:
ret = np.dot(A.T, Q)
else:
ret = A.T @ Q
if sparse.issparse(A.T) and sparse.issparse(Q) and dense_output and hasattr(ret, 'toarray'):
Q = ret.toarray()
Q = ret
</DeepExtract>
elif power_iteration_normalizer == 'LU':
(Q, _) = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
(Q, _) = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == 'QR':
(Q, _) = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
(Q, _) = linalg.qr(safe_sparse_dot(A.T, Q), mode='economic')
(Q, _) = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
return Q
|
def test_enet_multitarget():
n_targets = 3
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(8, n_targets)
else:
w = random_state.randn(8)
w[10:] = 0.0
X = random_state.randn(10, 8)
y = np.dot(X, w)
X_test = random_state.randn(10, 8)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
estimator = ElasticNet(alpha=0.01)
estimator.fit(X, y)
(coef, intercept, dual_gap) = (estimator.coef_, estimator.intercept_, estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
|
def test_enet_multitarget():
n_targets = 3
<DeepExtract>
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(8, n_targets)
else:
w = random_state.randn(8)
w[10:] = 0.0
X = random_state.randn(10, 8)
y = np.dot(X, w)
X_test = random_state.randn(10, 8)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
</DeepExtract>
estimator = ElasticNet(alpha=0.01)
estimator.fit(X, y)
(coef, intercept, dual_gap) = (estimator.coef_, estimator.intercept_, estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
|
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100), decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100), decimal=1)
assert_almost_equal(scaled.sum(axis=0).mean(), scaled.sum(axis=1).mean(), decimal=1)
|
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
<DeepExtract>
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100), decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100), decimal=1)
</DeepExtract>
assert_almost_equal(scaled.sum(axis=0).mean(), scaled.sum(axis=1).mean(), decimal=1)
|
def _check_sample_weight(sample_weight, X, dtype=None, copy=False, only_non_negative=False):
"""Validate sample weights.
Note that passing sample_weight=None will output an array of ones.
Therefore, in some cases, you may want to protect the call with:
if sample_weight is not None:
sample_weight = _check_sample_weight(...)
Parameters
----------
sample_weight : {ndarray, Number or None}, shape (n_samples,)
Input sample weights.
X : {ndarray, list, sparse matrix}
Input data.
only_non_negative : bool, default=False,
Whether or not the weights are expected to be non-negative.
.. versionadded:: 1.0
dtype : dtype, default=None
dtype of the validated `sample_weight`.
If None, and the input `sample_weight` is an array, the dtype of the
input is preserved; otherwise an array with the default numpy dtype
is be allocated. If `dtype` is not one of `float32`, `float64`,
`None`, the output will be of dtype `float64`.
copy : bool, default=False
If True, a copy of sample_weight will be created.
Returns
-------
sample_weight : ndarray of shape (n_samples,)
Validated sample weight. It is guaranteed to be "C" contiguous.
"""
message = 'Expected sequence or array-like, got %s' % type(X)
if hasattr(X, 'fit') and callable(X.fit):
raise TypeError(message)
if not hasattr(X, '__len__') and (not hasattr(X, 'shape')):
if hasattr(X, '__array__'):
X = np.asarray(X)
else:
raise TypeError(message)
if hasattr(X, 'shape') and X.shape is not None:
if len(X.shape) == 0:
raise TypeError('Singleton array %r cannot be considered a valid collection.' % X)
if isinstance(X.shape[0], numbers.Integral):
n_samples = X.shape[0]
try:
n_samples = len(X)
except TypeError as type_error:
raise TypeError(message) from type_error
if dtype is not None and dtype not in [np.float32, np.float64]:
dtype = np.float64
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=dtype)
elif isinstance(sample_weight, numbers.Number):
sample_weight = np.full(n_samples, sample_weight, dtype=dtype)
else:
if dtype is None:
dtype = [np.float64, np.float32]
if isinstance(sample_weight, np.matrix):
raise TypeError('np.matrix is not supported. Please convert to a numpy array with np.asarray. For more information see: https://numpy.org/doc/stable/reference/generated/numpy.matrix.html')
(xp, is_array_api) = get_namespace(sample_weight)
array_orig = sample_weight
dtype_numeric = isinstance(dtype, str) and dtype == 'numeric'
dtype_orig = getattr(sample_weight, 'dtype', None)
if not hasattr(dtype_orig, 'kind'):
dtype_orig = None
dtypes_orig = None
pandas_requires_conversion = False
if hasattr(sample_weight, 'dtypes') and hasattr(sample_weight.dtypes, '__array__'):
with suppress(ImportError):
from pandas.api.types import is_sparse
if not hasattr(sample_weight, 'sparse') and sample_weight.dtypes.apply(is_sparse).any():
warnings.warn('pandas.DataFrame with sparse columns found.It will be converted to a dense numpy array.')
dtypes_orig = list(sample_weight.dtypes)
pandas_requires_conversion = any((_pandas_dtype_needs_early_conversion(i) for i in dtypes_orig))
if all((isinstance(dtype_iter, np.dtype) for dtype_iter in dtypes_orig)):
dtype_orig = np.result_type(*dtypes_orig)
elif pandas_requires_conversion and any((d == object for d in dtypes_orig)):
dtype_orig = object
elif (_is_extension_array_dtype(sample_weight) or hasattr(sample_weight, 'iloc')) and hasattr(sample_weight, 'dtype'):
pandas_requires_conversion = _pandas_dtype_needs_early_conversion(sample_weight.dtype)
if isinstance(sample_weight.dtype, np.dtype):
dtype_orig = sample_weight.dtype
else:
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == 'O':
dtype = xp.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
dtype = None
else:
dtype = dtype[0]
if pandas_requires_conversion:
new_dtype = dtype_orig if dtype is None else dtype
sample_weight = sample_weight.astype(new_dtype)
dtype = None
if force_all_finite not in (True, False, 'allow-nan'):
raise ValueError('force_all_finite should be a bool or "allow-nan". Got {!r} instead'.format(force_all_finite))
estimator_name = _check_estimator_name(estimator)
context = ' by %s' % estimator_name if estimator is not None else ''
if hasattr(sample_weight, 'sparse') and sample_weight.ndim > 1:
with suppress(ImportError):
from pandas.api.types import is_sparse
if sample_weight.dtypes.apply(is_sparse).all():
sample_weight = sample_weight.sparse.to_coo()
if sample_weight.dtype == np.dtype('object'):
unique_dtypes = set([dt.subtype.name for dt in array_orig.dtypes])
if len(unique_dtypes) > 1:
raise ValueError('Pandas DataFrame with mixed sparse extension arrays generated a sparse matrix with object dtype which can not be converted to a scipy sparse matrix.Sparse extension arrays should all have the same numeric type.')
if sp.issparse(sample_weight):
_ensure_no_complex_data(sample_weight)
sample_weight = _ensure_sparse_format(sample_weight, accept_sparse=False, dtype=dtype, copy=copy, force_all_finite=force_all_finite, accept_large_sparse=accept_large_sparse, estimator_name=estimator_name, input_name='sample_weight')
else:
with warnings.catch_warnings():
try:
warnings.simplefilter('error', ComplexWarning)
if dtype is not None and np.dtype(dtype).kind in 'iu':
sample_weight = _asarray_with_order(sample_weight, order='C', xp=xp)
if sample_weight.dtype.kind == 'f':
_assert_all_finite(sample_weight, allow_nan=False, msg_dtype=dtype, estimator_name=estimator_name, input_name='sample_weight')
sample_weight = xp.astype(sample_weight, dtype, copy=False)
else:
sample_weight = _asarray_with_order(sample_weight, order='C', dtype=dtype, xp=xp)
except ComplexWarning as complex_warning:
raise ValueError('Complex data not supported\n{}\n'.format(sample_weight)) from complex_warning
_ensure_no_complex_data(sample_weight)
if False:
if sample_weight.ndim == 0:
raise ValueError('Expected 2D array, got scalar array instead:\narray={}.\nReshape your data either using array.reshape(-1, 1) if your data has a single feature or array.reshape(1, -1) if it contains a single sample.'.format(sample_weight))
if sample_weight.ndim == 1:
raise ValueError('Expected 2D array, got 1D array instead:\narray={}.\nReshape your data either using array.reshape(-1, 1) if your data has a single feature or array.reshape(1, -1) if it contains a single sample.'.format(sample_weight))
if dtype_numeric and sample_weight.dtype.kind in 'USV':
raise ValueError("dtype='numeric' is not compatible with arrays of bytes/strings.Convert your data to numeric values explicitly instead.")
if not allow_nd and sample_weight.ndim >= 3:
raise ValueError('Found array with dim %d. %s expected <= 2.' % (sample_weight.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(sample_weight, input_name='sample_weight', estimator_name=estimator_name, allow_nan=force_all_finite == 'allow-nan')
if ensure_min_samples > 0:
n_samples = _num_samples(sample_weight)
if n_samples < ensure_min_samples:
raise ValueError('Found array with %d sample(s) (shape=%s) while a minimum of %d is required%s.' % (n_samples, sample_weight.shape, ensure_min_samples, context))
if ensure_min_features > 0 and sample_weight.ndim == 2:
n_features = sample_weight.shape[1]
if n_features < ensure_min_features:
raise ValueError('Found array with %d feature(s) (shape=%s) while a minimum of %d is required%s.' % (n_features, sample_weight.shape, ensure_min_features, context))
if copy:
if xp.__name__ in {'numpy', 'numpy.array_api'}:
if np.may_share_memory(sample_weight, array_orig):
sample_weight = _asarray_with_order(sample_weight, dtype=dtype, order='C', copy=True, xp=xp)
else:
sample_weight = _asarray_with_order(sample_weight, dtype=dtype, order='C', copy=True, xp=xp)
sample_weight = sample_weight
if sample_weight.ndim != 1:
raise ValueError('Sample weights must be 1D array or scalar')
if sample_weight.shape != (n_samples,):
raise ValueError('sample_weight.shape == {}, expected {}!'.format(sample_weight.shape, (n_samples,)))
if only_non_negative:
(xp, _) = get_namespace(sample_weight)
if sp.issparse(sample_weight):
if sample_weight.format in ['lil', 'dok']:
sample_weight = sample_weight.tocsr()
if sample_weight.data.size == 0:
X_min = 0
else:
X_min = sample_weight.data.min()
else:
X_min = xp.min(sample_weight)
if X_min < 0:
raise ValueError('Negative values in data passed to %s' % '`sample_weight`')
return sample_weight
|
def _check_sample_weight(sample_weight, X, dtype=None, copy=False, only_non_negative=False):
"""Validate sample weights.
Note that passing sample_weight=None will output an array of ones.
Therefore, in some cases, you may want to protect the call with:
if sample_weight is not None:
sample_weight = _check_sample_weight(...)
Parameters
----------
sample_weight : {ndarray, Number or None}, shape (n_samples,)
Input sample weights.
X : {ndarray, list, sparse matrix}
Input data.
only_non_negative : bool, default=False,
Whether or not the weights are expected to be non-negative.
.. versionadded:: 1.0
dtype : dtype, default=None
dtype of the validated `sample_weight`.
If None, and the input `sample_weight` is an array, the dtype of the
input is preserved; otherwise an array with the default numpy dtype
is be allocated. If `dtype` is not one of `float32`, `float64`,
`None`, the output will be of dtype `float64`.
copy : bool, default=False
If True, a copy of sample_weight will be created.
Returns
-------
sample_weight : ndarray of shape (n_samples,)
Validated sample weight. It is guaranteed to be "C" contiguous.
"""
<DeepExtract>
message = 'Expected sequence or array-like, got %s' % type(X)
if hasattr(X, 'fit') and callable(X.fit):
raise TypeError(message)
if not hasattr(X, '__len__') and (not hasattr(X, 'shape')):
if hasattr(X, '__array__'):
X = np.asarray(X)
else:
raise TypeError(message)
if hasattr(X, 'shape') and X.shape is not None:
if len(X.shape) == 0:
raise TypeError('Singleton array %r cannot be considered a valid collection.' % X)
if isinstance(X.shape[0], numbers.Integral):
n_samples = X.shape[0]
try:
n_samples = len(X)
except TypeError as type_error:
raise TypeError(message) from type_error
</DeepExtract>
if dtype is not None and dtype not in [np.float32, np.float64]:
dtype = np.float64
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=dtype)
elif isinstance(sample_weight, numbers.Number):
sample_weight = np.full(n_samples, sample_weight, dtype=dtype)
else:
if dtype is None:
dtype = [np.float64, np.float32]
<DeepExtract>
if isinstance(sample_weight, np.matrix):
raise TypeError('np.matrix is not supported. Please convert to a numpy array with np.asarray. For more information see: https://numpy.org/doc/stable/reference/generated/numpy.matrix.html')
(xp, is_array_api) = get_namespace(sample_weight)
array_orig = sample_weight
dtype_numeric = isinstance(dtype, str) and dtype == 'numeric'
dtype_orig = getattr(sample_weight, 'dtype', None)
if not hasattr(dtype_orig, 'kind'):
dtype_orig = None
dtypes_orig = None
pandas_requires_conversion = False
if hasattr(sample_weight, 'dtypes') and hasattr(sample_weight.dtypes, '__array__'):
with suppress(ImportError):
from pandas.api.types import is_sparse
if not hasattr(sample_weight, 'sparse') and sample_weight.dtypes.apply(is_sparse).any():
warnings.warn('pandas.DataFrame with sparse columns found.It will be converted to a dense numpy array.')
dtypes_orig = list(sample_weight.dtypes)
pandas_requires_conversion = any((_pandas_dtype_needs_early_conversion(i) for i in dtypes_orig))
if all((isinstance(dtype_iter, np.dtype) for dtype_iter in dtypes_orig)):
dtype_orig = np.result_type(*dtypes_orig)
elif pandas_requires_conversion and any((d == object for d in dtypes_orig)):
dtype_orig = object
elif (_is_extension_array_dtype(sample_weight) or hasattr(sample_weight, 'iloc')) and hasattr(sample_weight, 'dtype'):
pandas_requires_conversion = _pandas_dtype_needs_early_conversion(sample_weight.dtype)
if isinstance(sample_weight.dtype, np.dtype):
dtype_orig = sample_weight.dtype
else:
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == 'O':
dtype = xp.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
dtype = None
else:
dtype = dtype[0]
if pandas_requires_conversion:
new_dtype = dtype_orig if dtype is None else dtype
sample_weight = sample_weight.astype(new_dtype)
dtype = None
if force_all_finite not in (True, False, 'allow-nan'):
raise ValueError('force_all_finite should be a bool or "allow-nan". Got {!r} instead'.format(force_all_finite))
estimator_name = _check_estimator_name(estimator)
context = ' by %s' % estimator_name if estimator is not None else ''
if hasattr(sample_weight, 'sparse') and sample_weight.ndim > 1:
with suppress(ImportError):
from pandas.api.types import is_sparse
if sample_weight.dtypes.apply(is_sparse).all():
sample_weight = sample_weight.sparse.to_coo()
if sample_weight.dtype == np.dtype('object'):
unique_dtypes = set([dt.subtype.name for dt in array_orig.dtypes])
if len(unique_dtypes) > 1:
raise ValueError('Pandas DataFrame with mixed sparse extension arrays generated a sparse matrix with object dtype which can not be converted to a scipy sparse matrix.Sparse extension arrays should all have the same numeric type.')
if sp.issparse(sample_weight):
_ensure_no_complex_data(sample_weight)
sample_weight = _ensure_sparse_format(sample_weight, accept_sparse=False, dtype=dtype, copy=copy, force_all_finite=force_all_finite, accept_large_sparse=accept_large_sparse, estimator_name=estimator_name, input_name='sample_weight')
else:
with warnings.catch_warnings():
try:
warnings.simplefilter('error', ComplexWarning)
if dtype is not None and np.dtype(dtype).kind in 'iu':
sample_weight = _asarray_with_order(sample_weight, order='C', xp=xp)
if sample_weight.dtype.kind == 'f':
_assert_all_finite(sample_weight, allow_nan=False, msg_dtype=dtype, estimator_name=estimator_name, input_name='sample_weight')
sample_weight = xp.astype(sample_weight, dtype, copy=False)
else:
sample_weight = _asarray_with_order(sample_weight, order='C', dtype=dtype, xp=xp)
except ComplexWarning as complex_warning:
raise ValueError('Complex data not supported\n{}\n'.format(sample_weight)) from complex_warning
_ensure_no_complex_data(sample_weight)
if False:
if sample_weight.ndim == 0:
raise ValueError('Expected 2D array, got scalar array instead:\narray={}.\nReshape your data either using array.reshape(-1, 1) if your data has a single feature or array.reshape(1, -1) if it contains a single sample.'.format(sample_weight))
if sample_weight.ndim == 1:
raise ValueError('Expected 2D array, got 1D array instead:\narray={}.\nReshape your data either using array.reshape(-1, 1) if your data has a single feature or array.reshape(1, -1) if it contains a single sample.'.format(sample_weight))
if dtype_numeric and sample_weight.dtype.kind in 'USV':
raise ValueError("dtype='numeric' is not compatible with arrays of bytes/strings.Convert your data to numeric values explicitly instead.")
if not allow_nd and sample_weight.ndim >= 3:
raise ValueError('Found array with dim %d. %s expected <= 2.' % (sample_weight.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(sample_weight, input_name='sample_weight', estimator_name=estimator_name, allow_nan=force_all_finite == 'allow-nan')
if ensure_min_samples > 0:
n_samples = _num_samples(sample_weight)
if n_samples < ensure_min_samples:
raise ValueError('Found array with %d sample(s) (shape=%s) while a minimum of %d is required%s.' % (n_samples, sample_weight.shape, ensure_min_samples, context))
if ensure_min_features > 0 and sample_weight.ndim == 2:
n_features = sample_weight.shape[1]
if n_features < ensure_min_features:
raise ValueError('Found array with %d feature(s) (shape=%s) while a minimum of %d is required%s.' % (n_features, sample_weight.shape, ensure_min_features, context))
if copy:
if xp.__name__ in {'numpy', 'numpy.array_api'}:
if np.may_share_memory(sample_weight, array_orig):
sample_weight = _asarray_with_order(sample_weight, dtype=dtype, order='C', copy=True, xp=xp)
else:
sample_weight = _asarray_with_order(sample_weight, dtype=dtype, order='C', copy=True, xp=xp)
sample_weight = sample_weight
</DeepExtract>
if sample_weight.ndim != 1:
raise ValueError('Sample weights must be 1D array or scalar')
if sample_weight.shape != (n_samples,):
raise ValueError('sample_weight.shape == {}, expected {}!'.format(sample_weight.shape, (n_samples,)))
if only_non_negative:
<DeepExtract>
(xp, _) = get_namespace(sample_weight)
if sp.issparse(sample_weight):
if sample_weight.format in ['lil', 'dok']:
sample_weight = sample_weight.tocsr()
if sample_weight.data.size == 0:
X_min = 0
else:
X_min = sample_weight.data.min()
else:
X_min = xp.min(sample_weight)
if X_min < 0:
raise ValueError('Negative values in data passed to %s' % '`sample_weight`')
</DeepExtract>
return sample_weight
|
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""Compute the sigmoid kernel between X and Y.
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=None
Coefficient of the vector inner product. If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Constant offset added to scaled inner product.
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
Sigmoid kernel between two arrays.
"""
(X, Y, dtype_float) = _return_float_dtype(X, Y)
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
else:
X = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError('Precomputed metric requires shape (n_queries, n_indexed). Got (%d, %d) for %d indexed.' % (X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError('Incompatible dimension for X and Y matrices: X.shape[1] == %d while Y.shape[1] == %d' % (X.shape[1], Y.shape[1]))
(X, Y) = (X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K)
return K
|
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""Compute the sigmoid kernel between X and Y.
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=None
Coefficient of the vector inner product. If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Constant offset added to scaled inner product.
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
Sigmoid kernel between two arrays.
"""
<DeepExtract>
(X, Y, dtype_float) = _return_float_dtype(X, Y)
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
else:
X = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError('Precomputed metric requires shape (n_queries, n_indexed). Got (%d, %d) for %d indexed.' % (X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError('Incompatible dimension for X and Y matrices: X.shape[1] == %d while Y.shape[1] == %d' % (X.shape[1], Y.shape[1]))
(X, Y) = (X, Y)
</DeepExtract>
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K)
return K
|
def test_kernelcenterer_non_linear_kernel():
"""Check kernel centering for non-linear kernel."""
rng = np.random.RandomState(0)
(X, X_test) = (rng.randn(100, 50), rng.randn(20, 50))
def phi(X):
"""Our mapping function phi."""
return np.vstack([np.clip(X, a_min=0, a_max=None), -np.clip(X, a_min=None, a_max=0)])
phi_X = np.vstack([np.clip(X, a_min=0, a_max=None), -np.clip(X, a_min=None, a_max=0)])
phi_X_test = np.vstack([np.clip(X_test, a_min=0, a_max=None), -np.clip(X_test, a_min=None, a_max=0)])
scaler = StandardScaler(with_std=False)
phi_X_center = scaler.fit_transform(phi_X)
phi_X_test_center = scaler.transform(phi_X_test)
K = phi_X @ phi_X.T
K_test = phi_X_test @ phi_X.T
K_center = phi_X_center @ phi_X_center.T
K_test_center = phi_X_test_center @ phi_X_center.T
kernel_centerer = KernelCenterer()
kernel_centerer.fit(K)
assert_allclose(kernel_centerer.transform(K), K_center)
assert_allclose(kernel_centerer.transform(K_test), K_test_center)
ones_M = np.ones_like(K) / K.shape[0]
K_centered = K - ones_M @ K - K @ ones_M + ones_M @ K @ ones_M
assert_allclose(kernel_centerer.transform(K), K_centered)
ones_prime_M = np.ones_like(K_test) / K.shape[0]
K_test_centered = K_test - ones_prime_M @ K - K_test @ ones_M + ones_prime_M @ K @ ones_M
assert_allclose(kernel_centerer.transform(K_test), K_test_centered)
|
def test_kernelcenterer_non_linear_kernel():
"""Check kernel centering for non-linear kernel."""
rng = np.random.RandomState(0)
(X, X_test) = (rng.randn(100, 50), rng.randn(20, 50))
def phi(X):
"""Our mapping function phi."""
return np.vstack([np.clip(X, a_min=0, a_max=None), -np.clip(X, a_min=None, a_max=0)])
<DeepExtract>
phi_X = np.vstack([np.clip(X, a_min=0, a_max=None), -np.clip(X, a_min=None, a_max=0)])
</DeepExtract>
<DeepExtract>
phi_X_test = np.vstack([np.clip(X_test, a_min=0, a_max=None), -np.clip(X_test, a_min=None, a_max=0)])
</DeepExtract>
scaler = StandardScaler(with_std=False)
phi_X_center = scaler.fit_transform(phi_X)
phi_X_test_center = scaler.transform(phi_X_test)
K = phi_X @ phi_X.T
K_test = phi_X_test @ phi_X.T
K_center = phi_X_center @ phi_X_center.T
K_test_center = phi_X_test_center @ phi_X_center.T
kernel_centerer = KernelCenterer()
kernel_centerer.fit(K)
assert_allclose(kernel_centerer.transform(K), K_center)
assert_allclose(kernel_centerer.transform(K_test), K_test_center)
ones_M = np.ones_like(K) / K.shape[0]
K_centered = K - ones_M @ K - K @ ones_M + ones_M @ K @ ones_M
assert_allclose(kernel_centerer.transform(K), K_centered)
ones_prime_M = np.ones_like(K_test) / K.shape[0]
K_test_centered = K_test - ones_prime_M @ K - K_test @ ones_M + ones_prime_M @ K @ ones_M
assert_allclose(kernel_centerer.transform(K_test), K_test_centered)
|
def test_huber_sparse():
rng = np.random.RandomState(0)
(X, y) = make_regression(n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05)
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
(X, y) = (X, y)
huber = HuberRegressor(alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
assert_array_equal(huber.outliers_, huber_sparse.outliers_)
|
def test_huber_sparse():
<DeepExtract>
rng = np.random.RandomState(0)
(X, y) = make_regression(n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05)
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
(X, y) = (X, y)
</DeepExtract>
huber = HuberRegressor(alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
assert_array_equal(huber.outliers_, huber_sparse.outliers_)
|
@ignore_warnings
def check_fit2d_1sample(name, estimator_orig):
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
if '1darray' in _safe_tags(estimator_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator_orig, key='pairwise'):
X = kernel(X, X)
X = X
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
if hasattr(estimator, 'n_components'):
estimator.n_components = 1
if hasattr(estimator, 'n_clusters'):
estimator.n_clusters = 1
set_random_state(estimator, 1)
if name == 'OPTICS':
estimator.set_params(min_samples=1.0)
if name == 'TSNE':
estimator.set_params(perplexity=0.5)
msgs = ['1 sample', 'n_samples = 1', 'n_samples=1', 'one sample', '1 class', 'one class']
with raises(ValueError, match=msgs, may_pass=True):
estimator.fit(X, y)
|
@ignore_warnings
def check_fit2d_1sample(name, estimator_orig):
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
<DeepExtract>
if '1darray' in _safe_tags(estimator_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator_orig, key='pairwise'):
X = kernel(X, X)
X = X
</DeepExtract>
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
<DeepExtract>
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
if hasattr(estimator, 'n_components'):
estimator.n_components = 1
if hasattr(estimator, 'n_clusters'):
estimator.n_clusters = 1
set_random_state(estimator, 1)
if name == 'OPTICS':
estimator.set_params(min_samples=1.0)
if name == 'TSNE':
estimator.set_params(perplexity=0.5)
msgs = ['1 sample', 'n_samples = 1', 'n_samples=1', 'one sample', '1 class', 'one class']
with raises(ValueError, match=msgs, may_pass=True):
estimator.fit(X, y)
|
def predict_proba(X):
"""
Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest.
The class probability of a single tree is the fraction of samples of
the same class in a leaf.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : ndarray of shape (n_samples, n_classes), or a list of such arrays
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
check_is_fitted(self)
X = self._validate_data(X, dtype=DTYPE, accept_sparse='csr', reset=False)
if issparse(X) and (X.indices.dtype != np.intc or X.indptr.dtype != np.intc):
raise ValueError('No support for np.int64 index based sparse matrices')
X = X
(n_jobs, _, _) = _partition_estimators(self.n_estimators, self.n_jobs)
all_proba = [np.zeros((X.shape[0], j), dtype=np.float64) for j in np.atleast_1d(self.n_classes_)]
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose, require='sharedmem')((delayed(_accumulate_prediction)(e.predict_proba, X, all_proba, lock) for e in self.estimators_))
for proba in all_proba:
proba /= len(self.estimators_)
if len(all_proba) == 1:
return all_proba[0]
else:
return all_proba
|
def predict_proba(X):
"""
Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest.
The class probability of a single tree is the fraction of samples of
the same class in a leaf.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : ndarray of shape (n_samples, n_classes), or a list of such arrays
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
<DeepExtract>
check_is_fitted(self)
X = self._validate_data(X, dtype=DTYPE, accept_sparse='csr', reset=False)
if issparse(X) and (X.indices.dtype != np.intc or X.indptr.dtype != np.intc):
raise ValueError('No support for np.int64 index based sparse matrices')
X = X
</DeepExtract>
(n_jobs, _, _) = _partition_estimators(self.n_estimators, self.n_jobs)
all_proba = [np.zeros((X.shape[0], j), dtype=np.float64) for j in np.atleast_1d(self.n_classes_)]
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose, require='sharedmem')((delayed(_accumulate_prediction)(e.predict_proba, X, all_proba, lock) for e in self.estimators_))
for proba in all_proba:
proba /= len(self.estimators_)
if len(all_proba) == 1:
return all_proba[0]
else:
return all_proba
|
@validate_params({'data_home': [str, None], 'funneled': ['boolean'], 'resize': [Interval(Real, 0, None, closed='neither'), None], 'min_faces_per_person': [Interval(Integral, 0, None, closed='left'), None], 'color': ['boolean'], 'slice_': [tuple, Hidden(None)], 'download_if_missing': ['boolean'], 'return_X_y': ['boolean']})
def fetch_lfw_people(*, data_home=None, funneled=True, resize=0.5, min_faces_per_person=0, color=False, slice_=(slice(70, 195), slice(78, 172)), download_if_missing=True, return_X_y=False):
"""Load the Labeled Faces in the Wild (LFW) people dataset (classification).
Download it if necessary.
================= =======================
Classes 5749
Samples total 13233
Dimensionality 5828
Features real, between 0 and 255
================= =======================
Read more in the :ref:`User Guide <labeled_faces_in_the_wild_dataset>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
funneled : bool, default=True
Download and use the funneled variant of the dataset.
resize : float or None, default=0.5
Ratio used to resize the each face picture. If `None`, no resizing is
performed.
min_faces_per_person : int, default=None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : bool, default=False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172))
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch
object. See below for more information about the `dataset.data` and
`dataset.target` object.
.. versionadded:: 0.20
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image
of original size 62 x 47 pixels.
Changing the ``slice_`` or resize parameters will change the
shape of the output.
images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_``
or resize parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each face image.
Those labels range from 0-5748 and correspond to the person IDs.
target_names : numpy array of shape (5749,)
Names of all persons in the dataset.
Position in array corresponds to the person ID in the target array.
DESCR : str
Description of the Labeled Faces in the Wild (LFW) dataset.
(data, target) : tuple if ``return_X_y`` is True
A tuple of two ndarray. The first containing a 2D array of
shape (n_samples, n_features) with each row representing one
sample and each column representing the features. The second
ndarray of shape (n_samples,) containing the target samples.
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, 'lfw_home')
if not exists(lfw_home):
makedirs(lfw_home)
for target in TARGETS:
target_filepath = join(lfw_home, target.filename)
if not exists(target_filepath):
if download_if_missing:
logger.info('Downloading LFW metadata: %s', target.url)
_fetch_remote(target, dirname=lfw_home)
else:
raise IOError('%s is missing' % target_filepath)
if funneled:
data_folder_path = join(lfw_home, 'lfw_funneled')
archive = FUNNELED_ARCHIVE
else:
data_folder_path = join(lfw_home, 'lfw')
archive = ARCHIVE
if not exists(data_folder_path):
archive_path = join(lfw_home, archive.filename)
if not exists(archive_path):
if download_if_missing:
logger.info('Downloading LFW data (~200MB): %s', archive.url)
_fetch_remote(archive, dirname=lfw_home)
else:
raise IOError('%s is missing' % archive_path)
import tarfile
logger.debug('Decompressing the data archive to %s', data_folder_path)
tarfile.open(archive_path, 'r:gz').extractall(path=lfw_home)
remove(archive_path)
(lfw_home, data_folder_path) = (lfw_home, data_folder_path)
logger.debug('Loading LFW people faces from %s', lfw_home)
m = Memory(location=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
(faces, target, target_names) = load_func(data_folder_path, resize=resize, min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
X = faces.reshape(len(faces), -1)
fdescr = load_descr('lfw.rst')
if return_X_y:
return (X, target)
return Bunch(data=X, images=faces, target=target, target_names=target_names, DESCR=fdescr)
|
@validate_params({'data_home': [str, None], 'funneled': ['boolean'], 'resize': [Interval(Real, 0, None, closed='neither'), None], 'min_faces_per_person': [Interval(Integral, 0, None, closed='left'), None], 'color': ['boolean'], 'slice_': [tuple, Hidden(None)], 'download_if_missing': ['boolean'], 'return_X_y': ['boolean']})
def fetch_lfw_people(*, data_home=None, funneled=True, resize=0.5, min_faces_per_person=0, color=False, slice_=(slice(70, 195), slice(78, 172)), download_if_missing=True, return_X_y=False):
"""Load the Labeled Faces in the Wild (LFW) people dataset (classification).
Download it if necessary.
================= =======================
Classes 5749
Samples total 13233
Dimensionality 5828
Features real, between 0 and 255
================= =======================
Read more in the :ref:`User Guide <labeled_faces_in_the_wild_dataset>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
funneled : bool, default=True
Download and use the funneled variant of the dataset.
resize : float or None, default=0.5
Ratio used to resize the each face picture. If `None`, no resizing is
performed.
min_faces_per_person : int, default=None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : bool, default=False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172))
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch
object. See below for more information about the `dataset.data` and
`dataset.target` object.
.. versionadded:: 0.20
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image
of original size 62 x 47 pixels.
Changing the ``slice_`` or resize parameters will change the
shape of the output.
images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_``
or resize parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each face image.
Those labels range from 0-5748 and correspond to the person IDs.
target_names : numpy array of shape (5749,)
Names of all persons in the dataset.
Position in array corresponds to the person ID in the target array.
DESCR : str
Description of the Labeled Faces in the Wild (LFW) dataset.
(data, target) : tuple if ``return_X_y`` is True
A tuple of two ndarray. The first containing a 2D array of
shape (n_samples, n_features) with each row representing one
sample and each column representing the features. The second
ndarray of shape (n_samples,) containing the target samples.
.. versionadded:: 0.20
"""
<DeepExtract>
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, 'lfw_home')
if not exists(lfw_home):
makedirs(lfw_home)
for target in TARGETS:
target_filepath = join(lfw_home, target.filename)
if not exists(target_filepath):
if download_if_missing:
logger.info('Downloading LFW metadata: %s', target.url)
_fetch_remote(target, dirname=lfw_home)
else:
raise IOError('%s is missing' % target_filepath)
if funneled:
data_folder_path = join(lfw_home, 'lfw_funneled')
archive = FUNNELED_ARCHIVE
else:
data_folder_path = join(lfw_home, 'lfw')
archive = ARCHIVE
if not exists(data_folder_path):
archive_path = join(lfw_home, archive.filename)
if not exists(archive_path):
if download_if_missing:
logger.info('Downloading LFW data (~200MB): %s', archive.url)
_fetch_remote(archive, dirname=lfw_home)
else:
raise IOError('%s is missing' % archive_path)
import tarfile
logger.debug('Decompressing the data archive to %s', data_folder_path)
tarfile.open(archive_path, 'r:gz').extractall(path=lfw_home)
remove(archive_path)
(lfw_home, data_folder_path) = (lfw_home, data_folder_path)
</DeepExtract>
logger.debug('Loading LFW people faces from %s', lfw_home)
m = Memory(location=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
(faces, target, target_names) = load_func(data_folder_path, resize=resize, min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
X = faces.reshape(len(faces), -1)
fdescr = load_descr('lfw.rst')
if return_X_y:
return (X, target)
return Bunch(data=X, images=faces, target=target, target_names=target_names, DESCR=fdescr)
|
def linear_kernel(X, Y=None, dense_output=True):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.20
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
The Gram matrix of the linear kernel, i.e. `X @ Y.T`.
"""
(X, Y, dtype_float) = _return_float_dtype(X, Y)
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
else:
X = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError('Precomputed metric requires shape (n_queries, n_indexed). Got (%d, %d) for %d indexed.' % (X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError('Incompatible dimension for X and Y matrices: X.shape[1] == %d while Y.shape[1] == %d' % (X.shape[1], Y.shape[1]))
(X, Y) = (X, Y)
return safe_sparse_dot(X, Y.T, dense_output=dense_output)
|
def linear_kernel(X, Y=None, dense_output=True):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.20
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
The Gram matrix of the linear kernel, i.e. `X @ Y.T`.
"""
<DeepExtract>
(X, Y, dtype_float) = _return_float_dtype(X, Y)
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
else:
X = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError('Precomputed metric requires shape (n_queries, n_indexed). Got (%d, %d) for %d indexed.' % (X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError('Incompatible dimension for X and Y matrices: X.shape[1] == %d while Y.shape[1] == %d' % (X.shape[1], Y.shape[1]))
(X, Y) = (X, Y)
</DeepExtract>
return safe_sparse_dot(X, Y.T, dense_output=dense_output)
|
def test_huber_scaling_invariant():
rng = np.random.RandomState(0)
(X, y) = make_regression(n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05)
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
(X, y) = (X, y)
huber = HuberRegressor(fit_intercept=False, alpha=0.0)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
assert not np.all(n_outliers_mask_1)
huber.fit(X, 2.0 * y)
n_outliers_mask_2 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
huber.fit(2.0 * X, 2.0 * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
|
def test_huber_scaling_invariant():
<DeepExtract>
rng = np.random.RandomState(0)
(X, y) = make_regression(n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05)
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
(X, y) = (X, y)
</DeepExtract>
huber = HuberRegressor(fit_intercept=False, alpha=0.0)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
assert not np.all(n_outliers_mask_1)
huber.fit(X, 2.0 * y)
n_outliers_mask_2 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
huber.fit(2.0 * X, 2.0 * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
|
@pytest.mark.parametrize('gcv_mode', ['svd', 'eigen'])
@pytest.mark.parametrize('X_constructor', [np.asarray, sp.csr_matrix])
@pytest.mark.parametrize('n_features', [8, 20])
@pytest.mark.parametrize('y_shape, fit_intercept, noise', [((11,), True, 1.0), ((11, 1), True, 20.0), ((11, 3), True, 150.0), ((11, 3), False, 30.0)])
def test_ridge_gcv_sample_weights(gcv_mode, X_constructor, fit_intercept, n_features, y_shape, noise):
alphas = [0.001, 0.1, 1.0, 10.0, 1000.0]
rng = np.random.RandomState(0)
n_targets = y_shape[-1] if len(y_shape) == 2 else 1
(X, y, c) = make_regression(n_samples=11, n_features=n_features, n_informative=n_informative, n_targets=n_targets, bias=bias, noise=noise, shuffle=False, coef=True, random_state=0)
if n_features == 1:
c = np.asarray([c])
X += X_offset
mask = np.random.RandomState(0).binomial(1, proportion_nonzero, X.shape) > 0
removed_X = X.copy()
X[~mask] = 0.0
removed_X[mask] = 0.0
y -= removed_X.dot(c)
if positive:
y += X.dot(np.abs(c) + 1 - c)
c = np.abs(c) + 1
if n_features == 1:
c = c[0]
if coef:
(X, y) = (X, y, c)
(X, y) = (X, y)
y = y.reshape(y_shape)
sample_weight = 3 * rng.randn(len(X))
sample_weight = (sample_weight - sample_weight.min() + 1).astype(int)
indices = np.repeat(np.arange(X.shape[0]), sample_weight)
sample_weight = sample_weight.astype(float)
(X_tiled, y_tiled) = (X[indices], y[indices])
cv = GroupKFold(n_splits=X.shape[0])
splits = cv.split(X_tiled, y_tiled, groups=indices)
kfold = RidgeCV(alphas=alphas, cv=splits, scoring='neg_mean_squared_error', fit_intercept=fit_intercept)
kfold.fit(X_tiled, y_tiled)
ridge_reg = Ridge(alpha=kfold.alpha_, fit_intercept=fit_intercept)
splits = cv.split(X_tiled, y_tiled, groups=indices)
predictions = cross_val_predict(ridge_reg, X_tiled, y_tiled, cv=splits)
kfold_errors = (y_tiled - predictions) ** 2
kfold_errors = [np.sum(kfold_errors[indices == i], axis=0) for i in np.arange(X.shape[0])]
kfold_errors = np.asarray(kfold_errors)
X_gcv = X_constructor(X)
gcv_ridge = RidgeCV(alphas=alphas, store_cv_values=True, gcv_mode=gcv_mode, fit_intercept=fit_intercept)
gcv_ridge.fit(X_gcv, y, sample_weight=sample_weight)
if len(y_shape) == 2:
gcv_errors = gcv_ridge.cv_values_[:, :, alphas.index(kfold.alpha_)]
else:
gcv_errors = gcv_ridge.cv_values_[:, alphas.index(kfold.alpha_)]
assert kfold.alpha_ == pytest.approx(gcv_ridge.alpha_)
assert_allclose(gcv_errors, kfold_errors, rtol=0.001)
assert_allclose(gcv_ridge.coef_, kfold.coef_, rtol=0.001)
assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=0.001)
|
@pytest.mark.parametrize('gcv_mode', ['svd', 'eigen'])
@pytest.mark.parametrize('X_constructor', [np.asarray, sp.csr_matrix])
@pytest.mark.parametrize('n_features', [8, 20])
@pytest.mark.parametrize('y_shape, fit_intercept, noise', [((11,), True, 1.0), ((11, 1), True, 20.0), ((11, 3), True, 150.0), ((11, 3), False, 30.0)])
def test_ridge_gcv_sample_weights(gcv_mode, X_constructor, fit_intercept, n_features, y_shape, noise):
alphas = [0.001, 0.1, 1.0, 10.0, 1000.0]
rng = np.random.RandomState(0)
n_targets = y_shape[-1] if len(y_shape) == 2 else 1
<DeepExtract>
(X, y, c) = make_regression(n_samples=11, n_features=n_features, n_informative=n_informative, n_targets=n_targets, bias=bias, noise=noise, shuffle=False, coef=True, random_state=0)
if n_features == 1:
c = np.asarray([c])
X += X_offset
mask = np.random.RandomState(0).binomial(1, proportion_nonzero, X.shape) > 0
removed_X = X.copy()
X[~mask] = 0.0
removed_X[mask] = 0.0
y -= removed_X.dot(c)
if positive:
y += X.dot(np.abs(c) + 1 - c)
c = np.abs(c) + 1
if n_features == 1:
c = c[0]
if coef:
(X, y) = (X, y, c)
(X, y) = (X, y)
</DeepExtract>
y = y.reshape(y_shape)
sample_weight = 3 * rng.randn(len(X))
sample_weight = (sample_weight - sample_weight.min() + 1).astype(int)
indices = np.repeat(np.arange(X.shape[0]), sample_weight)
sample_weight = sample_weight.astype(float)
(X_tiled, y_tiled) = (X[indices], y[indices])
cv = GroupKFold(n_splits=X.shape[0])
splits = cv.split(X_tiled, y_tiled, groups=indices)
kfold = RidgeCV(alphas=alphas, cv=splits, scoring='neg_mean_squared_error', fit_intercept=fit_intercept)
kfold.fit(X_tiled, y_tiled)
ridge_reg = Ridge(alpha=kfold.alpha_, fit_intercept=fit_intercept)
splits = cv.split(X_tiled, y_tiled, groups=indices)
predictions = cross_val_predict(ridge_reg, X_tiled, y_tiled, cv=splits)
kfold_errors = (y_tiled - predictions) ** 2
kfold_errors = [np.sum(kfold_errors[indices == i], axis=0) for i in np.arange(X.shape[0])]
kfold_errors = np.asarray(kfold_errors)
X_gcv = X_constructor(X)
gcv_ridge = RidgeCV(alphas=alphas, store_cv_values=True, gcv_mode=gcv_mode, fit_intercept=fit_intercept)
gcv_ridge.fit(X_gcv, y, sample_weight=sample_weight)
if len(y_shape) == 2:
gcv_errors = gcv_ridge.cv_values_[:, :, alphas.index(kfold.alpha_)]
else:
gcv_errors = gcv_ridge.cv_values_[:, alphas.index(kfold.alpha_)]
assert kfold.alpha_ == pytest.approx(gcv_ridge.alpha_)
assert_allclose(gcv_errors, kfold_errors, rtol=0.001)
assert_allclose(gcv_ridge.coef_, kfold.coef_, rtol=0.001)
assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=0.001)
|
def _plot_one_way_partial_dependence(self, kind, preds, avg_preds, feature_values, feature_idx, n_ice_lines, ax, n_cols, pd_plot_idx, n_lines, ice_lines_kw, pd_line_kw, categorical, bar_kw, pdp_lim):
"""Plot 1-way partial dependence: ICE and PDP.
Parameters
----------
kind : str
The kind of partial plot to draw.
preds : ndarray of shape (n_instances, n_grid_points) or None
The predictions computed for all points of `feature_values` for a
given feature for all samples in `X`.
avg_preds : ndarray of shape (n_grid_points,)
The average predictions for all points of `feature_values` for a
given feature for all samples in `X`.
feature_values : ndarray of shape (n_grid_points,)
The feature values for which the predictions have been computed.
feature_idx : int
The index corresponding to the target feature.
n_ice_lines : int
The number of ICE lines to plot.
ax : Matplotlib axes
The axis on which to plot the ICE and PDP lines.
n_cols : int or None
The number of column in the axis.
pd_plot_idx : int
The sequential index of the plot. It will be unraveled to find the
matching 2D position in the grid layout.
n_lines : int
The total number of lines expected to be plot on the axis.
ice_lines_kw : dict
Dict with keywords passed when plotting the ICE lines.
pd_line_kw : dict
Dict with keywords passed when plotting the PD plot.
categorical : bool
Whether feature is categorical.
bar_kw: dict
Dict with keywords passed when plotting the PD bars (categorical).
pdp_lim : dict
Global min and max average predictions, such that all plots will
have the same scale and y limits. `pdp_lim[1]` is the global min
and max for single partial dependence curves.
"""
from matplotlib import transforms
if kind in ('individual', 'both'):
rng = check_random_state(self.random_state)
ice_lines_idx = rng.choice(preds[self.target_idx].shape[0], n_ice_lines, replace=False)
ice_lines_subsampled = preds[self.target_idx][ice_lines_idx, :]
for (ice_idx, ice) in enumerate(ice_lines_subsampled):
line_idx = np.unravel_index(pd_plot_idx * n_lines + ice_idx, self.lines_.shape)
self.lines_[line_idx] = ax.plot(feature_values, ice.ravel(), **ice_lines_kw)[0]
if kind in ('average', 'both'):
if kind == 'average':
pd_line_idx = pd_plot_idx
else:
pd_line_idx = pd_plot_idx * n_lines + n_ice_lines
if categorical:
bar_idx = np.unravel_index(pd_line_idx, self.bars_.shape)
self.bars_[bar_idx] = ax.bar(feature_values, avg_preds[self.target_idx].ravel(), **bar_kw)[0]
ax.tick_params(axis='x', rotation=90)
else:
line_idx = np.unravel_index(pd_line_idx, self.lines_.shape)
self.lines_[line_idx] = ax.plot(feature_values, avg_preds[self.target_idx].ravel(), **pd_line_kw)[0]
trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)
vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape)
if self.deciles.get(feature_idx[0], None) is not None:
self.deciles_vlines_[vlines_idx] = ax.vlines(self.deciles[feature_idx[0]], 0, 0.05, transform=trans, color='k')
min_val = min((val[0] for val in pdp_lim.values()))
max_val = max((val[1] for val in pdp_lim.values()))
ax.set_ylim([min_val, max_val])
if not ax.get_xlabel():
ax.set_xlabel(self.feature_names[feature_idx[0]])
if n_cols is None or pd_plot_idx % n_cols == 0:
if not ax.get_ylabel():
ax.set_ylabel('Partial dependence')
else:
ax.set_yticklabels([])
if pd_line_kw.get('label', None) and kind != 'individual' and (not categorical):
ax.legend()
|
def _plot_one_way_partial_dependence(self, kind, preds, avg_preds, feature_values, feature_idx, n_ice_lines, ax, n_cols, pd_plot_idx, n_lines, ice_lines_kw, pd_line_kw, categorical, bar_kw, pdp_lim):
"""Plot 1-way partial dependence: ICE and PDP.
Parameters
----------
kind : str
The kind of partial plot to draw.
preds : ndarray of shape (n_instances, n_grid_points) or None
The predictions computed for all points of `feature_values` for a
given feature for all samples in `X`.
avg_preds : ndarray of shape (n_grid_points,)
The average predictions for all points of `feature_values` for a
given feature for all samples in `X`.
feature_values : ndarray of shape (n_grid_points,)
The feature values for which the predictions have been computed.
feature_idx : int
The index corresponding to the target feature.
n_ice_lines : int
The number of ICE lines to plot.
ax : Matplotlib axes
The axis on which to plot the ICE and PDP lines.
n_cols : int or None
The number of column in the axis.
pd_plot_idx : int
The sequential index of the plot. It will be unraveled to find the
matching 2D position in the grid layout.
n_lines : int
The total number of lines expected to be plot on the axis.
ice_lines_kw : dict
Dict with keywords passed when plotting the ICE lines.
pd_line_kw : dict
Dict with keywords passed when plotting the PD plot.
categorical : bool
Whether feature is categorical.
bar_kw: dict
Dict with keywords passed when plotting the PD bars (categorical).
pdp_lim : dict
Global min and max average predictions, such that all plots will
have the same scale and y limits. `pdp_lim[1]` is the global min
and max for single partial dependence curves.
"""
from matplotlib import transforms
if kind in ('individual', 'both'):
<DeepExtract>
rng = check_random_state(self.random_state)
ice_lines_idx = rng.choice(preds[self.target_idx].shape[0], n_ice_lines, replace=False)
ice_lines_subsampled = preds[self.target_idx][ice_lines_idx, :]
for (ice_idx, ice) in enumerate(ice_lines_subsampled):
line_idx = np.unravel_index(pd_plot_idx * n_lines + ice_idx, self.lines_.shape)
self.lines_[line_idx] = ax.plot(feature_values, ice.ravel(), **ice_lines_kw)[0]
</DeepExtract>
if kind in ('average', 'both'):
if kind == 'average':
pd_line_idx = pd_plot_idx
else:
pd_line_idx = pd_plot_idx * n_lines + n_ice_lines
<DeepExtract>
if categorical:
bar_idx = np.unravel_index(pd_line_idx, self.bars_.shape)
self.bars_[bar_idx] = ax.bar(feature_values, avg_preds[self.target_idx].ravel(), **bar_kw)[0]
ax.tick_params(axis='x', rotation=90)
else:
line_idx = np.unravel_index(pd_line_idx, self.lines_.shape)
self.lines_[line_idx] = ax.plot(feature_values, avg_preds[self.target_idx].ravel(), **pd_line_kw)[0]
</DeepExtract>
trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)
vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape)
if self.deciles.get(feature_idx[0], None) is not None:
self.deciles_vlines_[vlines_idx] = ax.vlines(self.deciles[feature_idx[0]], 0, 0.05, transform=trans, color='k')
min_val = min((val[0] for val in pdp_lim.values()))
max_val = max((val[1] for val in pdp_lim.values()))
ax.set_ylim([min_val, max_val])
if not ax.get_xlabel():
ax.set_xlabel(self.feature_names[feature_idx[0]])
if n_cols is None or pd_plot_idx % n_cols == 0:
if not ax.get_ylabel():
ax.set_ylabel('Partial dependence')
else:
ax.set_yticklabels([])
if pd_line_kw.get('label', None) and kind != 'individual' and (not categorical):
ax.legend()
|
def test_standard_scaler_partial_fit():
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert scaler_batch.var_ == scaler_incr.var_
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64), scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64), scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0), scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0), scaler_incr.scale_)
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for (i, batch) in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
if batch.stop != n:
assert (i + 1) * chunk_size == scaler_incr.n_samples_seen_
else:
assert i * chunk_size + (batch.stop - batch.start) == scaler_incr.n_samples_seen_
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
|
def test_standard_scaler_partial_fit():
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert scaler_batch.var_ == scaler_incr.var_
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64), scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64), scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0), scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0), scaler_incr.scale_)
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for (i, batch) in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
<DeepExtract>
if batch.stop != n:
assert (i + 1) * chunk_size == scaler_incr.n_samples_seen_
else:
assert i * chunk_size + (batch.stop - batch.start) == scaler_incr.n_samples_seen_
</DeepExtract>
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
|
def _fit(v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : ndarray of shape (n_samples, n_features)
The data to use for training.
rng : RandomState instance
Random number generator to use for sampling.
"""
p = safe_sparse_dot(v_pos, self.components_.T)
p += self.intercept_hidden_
h_pos = expit(p, out=p)
p = np.dot(self.h_samples_, self.components_)
p += self.intercept_visible_
expit(p, out=p)
v_neg = rng.uniform(size=p.shape) < p
p = safe_sparse_dot(v_neg, self.components_.T)
p += self.intercept_hidden_
h_neg = expit(p, out=p)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(v_pos.sum(axis=0)).squeeze() - v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0
self.h_samples_ = np.floor(h_neg, h_neg)
|
def _fit(v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : ndarray of shape (n_samples, n_features)
The data to use for training.
rng : RandomState instance
Random number generator to use for sampling.
"""
<DeepExtract>
p = safe_sparse_dot(v_pos, self.components_.T)
p += self.intercept_hidden_
h_pos = expit(p, out=p)
</DeepExtract>
<DeepExtract>
p = np.dot(self.h_samples_, self.components_)
p += self.intercept_visible_
expit(p, out=p)
v_neg = rng.uniform(size=p.shape) < p
</DeepExtract>
<DeepExtract>
p = safe_sparse_dot(v_neg, self.components_.T)
p += self.intercept_hidden_
h_neg = expit(p, out=p)
</DeepExtract>
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(v_pos.sum(axis=0)).squeeze() - v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0
self.h_samples_ = np.floor(h_neg, h_neg)
|
def _make_random_matrix(self, n_components, n_features):
"""Generate the random projection matrix
Parameters
----------
n_components : int
Dimensionality of the target projection space.
n_features : int
Dimensionality of the original source space.
Returns
-------
components : sparse matrix of shape (n_components, n_features)
The generated random matrix in CSR format.
"""
random_state = check_random_state(self.random_state)
if self.density == 'auto':
self.density = 1 / np.sqrt(n_features)
elif self.density <= 0 or self.density > 1:
raise ValueError('Expected density in range ]0, 1], got: %r' % self.density)
self.density_ = self.density
return _sparse_random_matrix(n_components, n_features, density=self.density_, random_state=random_state)
|
def _make_random_matrix(self, n_components, n_features):
"""Generate the random projection matrix
Parameters
----------
n_components : int
Dimensionality of the target projection space.
n_features : int
Dimensionality of the original source space.
Returns
-------
components : sparse matrix of shape (n_components, n_features)
The generated random matrix in CSR format.
"""
random_state = check_random_state(self.random_state)
<DeepExtract>
if self.density == 'auto':
self.density = 1 / np.sqrt(n_features)
elif self.density <= 0 or self.density > 1:
raise ValueError('Expected density in range ]0, 1], got: %r' % self.density)
self.density_ = self.density
</DeepExtract>
return _sparse_random_matrix(n_components, n_features, density=self.density_, random_state=random_state)
|
def _ndcg_sample_scores(y_true, y_score, k=None, ignore_ties=False):
"""Compute Normalized Discounted Cumulative Gain.
Sum the true scores ranked in the order induced by the predicted scores,
after applying a logarithmic discount. Then divide by the best possible
score (Ideal DCG, obtained for a perfect ranking) to obtain a score between
0 and 1.
This ranking metric yields a high value if true labels are ranked high by
``y_score``.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True targets of multilabel classification, or true scores of entities
to be ranked.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates, confidence values,
or non-thresholded measure of decisions (as returned by
"decision_function" on some classifiers).
k : int, default=None
Only consider the highest k scores in the ranking. If None, use all
outputs.
ignore_ties : bool, default=False
Assume that there are no ties in y_score (which is likely to be the
case if y_score is continuous) for efficiency gains.
Returns
-------
normalized_discounted_cumulative_gain : ndarray of shape (n_samples,)
The NDCG score for each sample (float in [0., 1.]).
See Also
--------
dcg_score : Discounted Cumulative Gain (not normalized).
"""
discount = 1 / (np.log(np.arange(y_true.shape[1]) + 2) / np.log(log_base))
if k is not None:
discount[k:] = 0
if ignore_ties:
ranking = np.argsort(y_score)[:, ::-1]
ranked = y_true[np.arange(ranking.shape[0])[:, np.newaxis], ranking]
cumulative_gains = discount.dot(ranked.T)
else:
discount_cumsum = np.cumsum(discount)
cumulative_gains = [_tie_averaged_dcg(y_t, y_s, discount_cumsum) for (y_t, y_s) in zip(y_true, y_score)]
cumulative_gains = np.asarray(cumulative_gains)
gain = cumulative_gains
discount = 1 / (np.log(np.arange(y_true.shape[1]) + 2) / np.log(log_base))
if k is not None:
discount[k:] = 0
if True:
ranking = np.argsort(y_true)[:, ::-1]
ranked = y_true[np.arange(ranking.shape[0])[:, np.newaxis], ranking]
cumulative_gains = discount.dot(ranked.T)
else:
discount_cumsum = np.cumsum(discount)
cumulative_gains = [_tie_averaged_dcg(y_t, y_s, discount_cumsum) for (y_t, y_s) in zip(y_true, y_true)]
cumulative_gains = np.asarray(cumulative_gains)
normalizing_gain = cumulative_gains
all_irrelevant = normalizing_gain == 0
gain[all_irrelevant] = 0
gain[~all_irrelevant] /= normalizing_gain[~all_irrelevant]
return gain
|
def _ndcg_sample_scores(y_true, y_score, k=None, ignore_ties=False):
"""Compute Normalized Discounted Cumulative Gain.
Sum the true scores ranked in the order induced by the predicted scores,
after applying a logarithmic discount. Then divide by the best possible
score (Ideal DCG, obtained for a perfect ranking) to obtain a score between
0 and 1.
This ranking metric yields a high value if true labels are ranked high by
``y_score``.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True targets of multilabel classification, or true scores of entities
to be ranked.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates, confidence values,
or non-thresholded measure of decisions (as returned by
"decision_function" on some classifiers).
k : int, default=None
Only consider the highest k scores in the ranking. If None, use all
outputs.
ignore_ties : bool, default=False
Assume that there are no ties in y_score (which is likely to be the
case if y_score is continuous) for efficiency gains.
Returns
-------
normalized_discounted_cumulative_gain : ndarray of shape (n_samples,)
The NDCG score for each sample (float in [0., 1.]).
See Also
--------
dcg_score : Discounted Cumulative Gain (not normalized).
"""
<DeepExtract>
discount = 1 / (np.log(np.arange(y_true.shape[1]) + 2) / np.log(log_base))
if k is not None:
discount[k:] = 0
if ignore_ties:
ranking = np.argsort(y_score)[:, ::-1]
ranked = y_true[np.arange(ranking.shape[0])[:, np.newaxis], ranking]
cumulative_gains = discount.dot(ranked.T)
else:
discount_cumsum = np.cumsum(discount)
cumulative_gains = [_tie_averaged_dcg(y_t, y_s, discount_cumsum) for (y_t, y_s) in zip(y_true, y_score)]
cumulative_gains = np.asarray(cumulative_gains)
gain = cumulative_gains
</DeepExtract>
<DeepExtract>
discount = 1 / (np.log(np.arange(y_true.shape[1]) + 2) / np.log(log_base))
if k is not None:
discount[k:] = 0
if True:
ranking = np.argsort(y_true)[:, ::-1]
ranked = y_true[np.arange(ranking.shape[0])[:, np.newaxis], ranking]
cumulative_gains = discount.dot(ranked.T)
else:
discount_cumsum = np.cumsum(discount)
cumulative_gains = [_tie_averaged_dcg(y_t, y_s, discount_cumsum) for (y_t, y_s) in zip(y_true, y_true)]
cumulative_gains = np.asarray(cumulative_gains)
normalizing_gain = cumulative_gains
</DeepExtract>
all_irrelevant = normalizing_gain == 0
gain[all_irrelevant] = 0
gain[~all_irrelevant] /= normalizing_gain[~all_irrelevant]
return gain
|
def test_sparse_input_convergence_warning():
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(500, n_targets)
else:
w = random_state.randn(500)
w[n_informative_features:] = 0.0
X = random_state.randn(1000, 500)
y = np.dot(X, w)
X_test = random_state.randn(1000, 500)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
with pytest.warns(ConvergenceWarning):
ElasticNet(max_iter=1, tol=0).fit(sparse.csr_matrix(X, dtype=np.float32), y)
with warnings.catch_warnings():
warnings.simplefilter('error', ConvergenceWarning)
Lasso().fit(sparse.csr_matrix(X, dtype=np.float32), y)
|
def test_sparse_input_convergence_warning():
<DeepExtract>
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(500, n_targets)
else:
w = random_state.randn(500)
w[n_informative_features:] = 0.0
X = random_state.randn(1000, 500)
y = np.dot(X, w)
X_test = random_state.randn(1000, 500)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
</DeepExtract>
with pytest.warns(ConvergenceWarning):
ElasticNet(max_iter=1, tol=0).fit(sparse.csr_matrix(X, dtype=np.float32), y)
with warnings.catch_warnings():
warnings.simplefilter('error', ConvergenceWarning)
Lasso().fit(sparse.csr_matrix(X, dtype=np.float32), y)
|
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : ndarray of shape (n_samples, n_classes) or (n_samples,)
The decision function of the input samples, which corresponds to
the raw values predicted from the trees of the ensemble . The
order of the classes corresponds to that in the attribute
:term:`classes_`. Regression and binary classification produce an
array of shape (n_samples,).
"""
X = self._validate_data(X, dtype=DTYPE, order='C', accept_sparse='csr', reset=False)
raw_predictions = self._raw_predict_init(X)
predict_stages(self.estimators_, X, self.learning_rate, raw_predictions)
raw_predictions = raw_predictions
if raw_predictions.shape[1] == 1:
return raw_predictions.ravel()
return raw_predictions
|
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : ndarray of shape (n_samples, n_classes) or (n_samples,)
The decision function of the input samples, which corresponds to
the raw values predicted from the trees of the ensemble . The
order of the classes corresponds to that in the attribute
:term:`classes_`. Regression and binary classification produce an
array of shape (n_samples,).
"""
X = self._validate_data(X, dtype=DTYPE, order='C', accept_sparse='csr', reset=False)
<DeepExtract>
raw_predictions = self._raw_predict_init(X)
predict_stages(self.estimators_, X, self.learning_rate, raw_predictions)
raw_predictions = raw_predictions
</DeepExtract>
if raw_predictions.shape[1] == 1:
return raw_predictions.ravel()
return raw_predictions
|
def load_diabetes(*, return_X_y=False, as_frame=False, scaled=True):
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
.. note::
The meaning of each feature (i.e. `feature_names`) might be unclear
(especially for `ltg`) as the documentation of the original dataset is
not explicit. We provide information that seems correct in regard with
the scientific literature in this field of research.
Read more in the :ref:`User Guide <diabetes_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
scaled : bool, default=True
If True, the feature variables are mean centered and scaled by the
standard deviation times the square root of `n_samples`.
If False, raw data is returned for the feature variables.
.. versionadded:: 1.1
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (442, 10)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, Series} of shape (442,)
The regression target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
frame: DataFrame of shape (442, 11)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
data_filename: str
The path to the location of the data.
target_filename: str
The path to the location of the target.
(data, target) : tuple if ``return_X_y`` is True
Returns a tuple of two ndarray of shape (n_samples, n_features)
A 2D array with each row representing one sample and each column
representing the features and/or target of a given sample.
.. versionadded:: 0.18
"""
data_filename = 'diabetes_data_raw.csv.gz'
target_filename = 'diabetes_target.csv.gz'
with _open_binary(data_module, data_filename) as compressed_file:
compressed_file = gzip.open(compressed_file, mode='rt', encoding=encoding)
data = np.loadtxt(compressed_file, **kwargs)
if descr_file_name is None:
data = data
else:
assert descr_module is not None
descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name)
data = (data, descr)
with _open_binary(data_module, target_filename) as compressed_file:
compressed_file = gzip.open(compressed_file, mode='rt', encoding=encoding)
data = np.loadtxt(compressed_file, **kwargs)
if descr_file_name is None:
target = data
else:
assert descr_module is not None
descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name)
target = (data, descr)
if scaled:
data = scale(data, copy=False)
data /= data.shape[0] ** 0.5
fdescr = _read_text(descr_module, 'diabetes.rst')
fdescr = fdescr
feature_names = ['age', 'sex', 'bmi', 'bp', 's1', 's2', 's3', 's4', 's5', 's6']
frame = None
target_columns = ['target']
if as_frame:
pd = check_pandas_support('{} with as_frame=True'.format('load_diabetes'))
if not sparse_data:
data_df = pd.DataFrame(data, columns=feature_names)
else:
data_df = pd.DataFrame.sparse.from_spmatrix(data, columns=feature_names)
target_df = pd.DataFrame(target, columns=target_columns)
combined_df = pd.concat([data_df, target_df], axis=1)
X = combined_df[feature_names]
y = combined_df[target_columns]
if y.shape[1] == 1:
y = y.iloc[:, 0]
(frame, data, target) = (combined_df, X, y)
if return_X_y:
return (data, target)
return Bunch(data=data, target=target, frame=frame, DESCR=fdescr, feature_names=feature_names, data_filename=data_filename, target_filename=target_filename, data_module=DATA_MODULE)
|
def load_diabetes(*, return_X_y=False, as_frame=False, scaled=True):
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
.. note::
The meaning of each feature (i.e. `feature_names`) might be unclear
(especially for `ltg`) as the documentation of the original dataset is
not explicit. We provide information that seems correct in regard with
the scientific literature in this field of research.
Read more in the :ref:`User Guide <diabetes_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
scaled : bool, default=True
If True, the feature variables are mean centered and scaled by the
standard deviation times the square root of `n_samples`.
If False, raw data is returned for the feature variables.
.. versionadded:: 1.1
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (442, 10)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, Series} of shape (442,)
The regression target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
frame: DataFrame of shape (442, 11)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
data_filename: str
The path to the location of the data.
target_filename: str
The path to the location of the target.
(data, target) : tuple if ``return_X_y`` is True
Returns a tuple of two ndarray of shape (n_samples, n_features)
A 2D array with each row representing one sample and each column
representing the features and/or target of a given sample.
.. versionadded:: 0.18
"""
data_filename = 'diabetes_data_raw.csv.gz'
target_filename = 'diabetes_target.csv.gz'
<DeepExtract>
with _open_binary(data_module, data_filename) as compressed_file:
compressed_file = gzip.open(compressed_file, mode='rt', encoding=encoding)
data = np.loadtxt(compressed_file, **kwargs)
if descr_file_name is None:
data = data
else:
assert descr_module is not None
descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name)
data = (data, descr)
</DeepExtract>
<DeepExtract>
with _open_binary(data_module, target_filename) as compressed_file:
compressed_file = gzip.open(compressed_file, mode='rt', encoding=encoding)
data = np.loadtxt(compressed_file, **kwargs)
if descr_file_name is None:
target = data
else:
assert descr_module is not None
descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name)
target = (data, descr)
</DeepExtract>
if scaled:
data = scale(data, copy=False)
data /= data.shape[0] ** 0.5
<DeepExtract>
fdescr = _read_text(descr_module, 'diabetes.rst')
fdescr = fdescr
</DeepExtract>
feature_names = ['age', 'sex', 'bmi', 'bp', 's1', 's2', 's3', 's4', 's5', 's6']
frame = None
target_columns = ['target']
if as_frame:
<DeepExtract>
pd = check_pandas_support('{} with as_frame=True'.format('load_diabetes'))
if not sparse_data:
data_df = pd.DataFrame(data, columns=feature_names)
else:
data_df = pd.DataFrame.sparse.from_spmatrix(data, columns=feature_names)
target_df = pd.DataFrame(target, columns=target_columns)
combined_df = pd.concat([data_df, target_df], axis=1)
X = combined_df[feature_names]
y = combined_df[target_columns]
if y.shape[1] == 1:
y = y.iloc[:, 0]
(frame, data, target) = (combined_df, X, y)
</DeepExtract>
if return_X_y:
return (data, target)
return Bunch(data=data, target=target, frame=frame, DESCR=fdescr, feature_names=feature_names, data_filename=data_filename, target_filename=target_filename, data_module=DATA_MODULE)
|
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plt.plot(all_components, all_times['pca'], label='PCA', marker='o')
plt.plot(all_components, all_times['ipca'], label='IncrementalPCA, bsize=%i' % batch_size, marker='o')
plt.legend(loc='upper left')
plt.suptitle('Algorithm runtime vs. n_components\n LFW, size %i x %i' % data.shape)
plt.xlabel('Number of components (out of max %i)' % data.shape[1])
plt.ylabel('Time (seconds)')
|
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
<DeepExtract>
plt.plot(all_components, all_times['pca'], label='PCA', marker='o')
</DeepExtract>
<DeepExtract>
plt.plot(all_components, all_times['ipca'], label='IncrementalPCA, bsize=%i' % batch_size, marker='o')
</DeepExtract>
plt.legend(loc='upper left')
plt.suptitle('Algorithm runtime vs. n_components\n LFW, size %i x %i' % data.shape)
plt.xlabel('Number of components (out of max %i)' % data.shape[1])
plt.ylabel('Time (seconds)')
|
@pytest.mark.parametrize('algorithm', ALGORITHMS)
def test_k_and_radius_neighbors_X_None(algorithm):
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
(dist, ind) = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
(dist, ind) = nn.radius_neighbors(None, radius=1.5)
for (ind, ele) in enumerate(dist):
assert_array_equal(ele, [[1], [1]][ind])
for (ind, ele) in enumerate(ind):
assert_array_equal(ele, [[1], [0]][ind])
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(graph.A, [[0, 1], [1, 0]])
assert_array_equal(graph.data, [1, 1])
assert_array_equal(graph.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(nn.kneighbors_graph().A, np.array([[0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0]]))
|
@pytest.mark.parametrize('algorithm', ALGORITHMS)
def test_k_and_radius_neighbors_X_None(algorithm):
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
(dist, ind) = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
(dist, ind) = nn.radius_neighbors(None, radius=1.5)
<DeepExtract>
for (ind, ele) in enumerate(dist):
assert_array_equal(ele, [[1], [1]][ind])
</DeepExtract>
<DeepExtract>
for (ind, ele) in enumerate(ind):
assert_array_equal(ele, [[1], [0]][ind])
</DeepExtract>
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(graph.A, [[0, 1], [1, 0]])
assert_array_equal(graph.data, [1, 1])
assert_array_equal(graph.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(nn.kneighbors_graph().A, np.array([[0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0]]))
|
def assert_children_values_bounded(grower, monotonic_cst):
if monotonic_cst == MonotonicConstraint.NO_CST:
return
def recursively_check_children_node_values(node, right_sibling=None):
if node.is_leaf:
return
if right_sibling is not None:
middle = (node.value + right_sibling.value) / 2
if monotonic_cst == MonotonicConstraint.POS:
assert node.left_child.value <= node.right_child.value <= middle
if not right_sibling.is_leaf:
assert middle <= right_sibling.left_child.value <= right_sibling.right_child.value
else:
assert node.left_child.value >= node.right_child.value >= middle
if not right_sibling.is_leaf:
assert middle >= right_sibling.left_child.value >= right_sibling.right_child.value
if node.left_child.is_leaf:
return
if node.right_child is not None:
middle = (node.left_child.value + node.right_child.value) / 2
if monotonic_cst == MonotonicConstraint.POS:
assert node.left_child.left_child.value <= node.left_child.right_child.value <= middle
if not node.right_child.is_leaf:
assert middle <= node.right_child.left_child.value <= node.right_child.right_child.value
else:
assert node.left_child.left_child.value >= node.left_child.right_child.value >= middle
if not node.right_child.is_leaf:
assert middle >= node.right_child.left_child.value >= node.right_child.right_child.value
recursively_check_children_node_values(node.left_child.left_child, right_sibling=node.left_child.right_child)
recursively_check_children_node_values(node.left_child.right_child)
if node.right_child.is_leaf:
return
if right_sibling is not None:
middle = (node.right_child.value + right_sibling.value) / 2
if monotonic_cst == MonotonicConstraint.POS:
assert node.right_child.left_child.value <= node.right_child.right_child.value <= middle
if not right_sibling.is_leaf:
assert middle <= right_sibling.left_child.value <= right_sibling.right_child.value
else:
assert node.right_child.left_child.value >= node.right_child.right_child.value >= middle
if not right_sibling.is_leaf:
assert middle >= right_sibling.left_child.value >= right_sibling.right_child.value
recursively_check_children_node_values(node.right_child.left_child, right_sibling=node.right_child.right_child)
recursively_check_children_node_values(node.right_child.right_child)
if grower.root.is_leaf:
return
if right_sibling is not None:
middle = (grower.root.value + right_sibling.value) / 2
if monotonic_cst == MonotonicConstraint.POS:
assert grower.root.left_child.value <= grower.root.right_child.value <= middle
if not right_sibling.is_leaf:
assert middle <= right_sibling.left_child.value <= right_sibling.right_child.value
else:
assert grower.root.left_child.value >= grower.root.right_child.value >= middle
if not right_sibling.is_leaf:
assert middle >= right_sibling.left_child.value >= right_sibling.right_child.value
recursively_check_children_node_values(grower.root.left_child, right_sibling=grower.root.right_child)
recursively_check_children_node_values(grower.root.right_child)
</DeepExtract>
|
def assert_children_values_bounded(grower, monotonic_cst):
if monotonic_cst == MonotonicConstraint.NO_CST:
return
def recursively_check_children_node_values(node, right_sibling=None):
if node.is_leaf:
return
if right_sibling is not None:
middle = (node.value + right_sibling.value) / 2
if monotonic_cst == MonotonicConstraint.POS:
assert node.left_child.value <= node.right_child.value <= middle
if not right_sibling.is_leaf:
assert middle <= right_sibling.left_child.value <= right_sibling.right_child.value
else:
assert node.left_child.value >= node.right_child.value >= middle
if not right_sibling.is_leaf:
assert middle >= right_sibling.left_child.value >= right_sibling.right_child.value
<DeepExtract>
if node.left_child.is_leaf:
return
if node.right_child is not None:
middle = (node.left_child.value + node.right_child.value) / 2
if monotonic_cst == MonotonicConstraint.POS:
assert node.left_child.left_child.value <= node.left_child.right_child.value <= middle
if not node.right_child.is_leaf:
assert middle <= node.right_child.left_child.value <= node.right_child.right_child.value
else:
assert node.left_child.left_child.value >= node.left_child.right_child.value >= middle
if not node.right_child.is_leaf:
assert middle >= node.right_child.left_child.value >= node.right_child.right_child.value
recursively_check_children_node_values(node.left_child.left_child, right_sibling=node.left_child.right_child)
recursively_check_children_node_values(node.left_child.right_child)
</DeepExtract>
<DeepExtract>
if node.right_child.is_leaf:
return
if right_sibling is not None:
middle = (node.right_child.value + right_sibling.value) / 2
if monotonic_cst == MonotonicConstraint.POS:
assert node.right_child.left_child.value <= node.right_child.right_child.value <= middle
if not right_sibling.is_leaf:
assert middle <= right_sibling.left_child.value <= right_sibling.right_child.value
else:
assert node.right_child.left_child.value >= node.right_child.right_child.value >= middle
if not right_sibling.is_leaf:
assert middle >= right_sibling.left_child.value >= right_sibling.right_child.value
recursively_check_children_node_values(node.right_child.left_child, right_sibling=node.right_child.right_child)
recursively_check_children_node_values(node.right_child.right_child)
</DeepExtract>
<DeepExtract>
if grower.root.is_leaf:
return
if right_sibling is not None:
middle = (grower.root.value + right_sibling.value) / 2
if monotonic_cst == MonotonicConstraint.POS:
assert grower.root.left_child.value <= grower.root.right_child.value <= middle
if not right_sibling.is_leaf:
assert middle <= right_sibling.left_child.value <= right_sibling.right_child.value
else:
assert grower.root.left_child.value >= grower.root.right_child.value >= middle
if not right_sibling.is_leaf:
assert middle >= right_sibling.left_child.value >= right_sibling.right_child.value
recursively_check_children_node_values(grower.root.left_child, right_sibling=grower.root.right_child)
recursively_check_children_node_values(grower.root.right_child)
</DeepExtract>
|
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + '.json' + path_suffix
data_file_name = output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
|
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
<DeepExtract>
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + '.json' + path_suffix
data_file_name = output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
</DeepExtract>
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
|
def test_lasso_cv():
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
(X, y, X_test, y_test) = (X, y, X_test, y_test)
max_iter = 150
clf = LassoCV(n_alphas=10, eps=0.001, max_iter=max_iter, cv=3).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=0.001, max_iter=max_iter, precompute=True, cv=3)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
lars = LassoLarsCV(max_iter=30, cv=3).fit(X, y)
assert np.abs(np.searchsorted(clf.alphas_[::-1], lars.alpha_) - np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(), clf.mse_path_[5].mean(), significant=2)
assert clf.score(X_test, y_test) > 0.99
|
def test_lasso_cv():
<DeepExtract>
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
(X, y, X_test, y_test) = (X, y, X_test, y_test)
</DeepExtract>
max_iter = 150
clf = LassoCV(n_alphas=10, eps=0.001, max_iter=max_iter, cv=3).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=0.001, max_iter=max_iter, precompute=True, cv=3)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
lars = LassoLarsCV(max_iter=30, cv=3).fit(X, y)
assert np.abs(np.searchsorted(clf.alphas_[::-1], lars.alpha_) - np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(), clf.mse_path_[5].mean(), significant=2)
assert clf.score(X_test, y_test) > 0.99
|
def test_hidden_stroptions():
"""Check that we can have 2 StrOptions constraints, one being hidden."""
@validate_params({'param': [StrOptions({'auto'}), Hidden(StrOptions({'warn'}))]})
def f(param):
pass
pass
pass
with pytest.raises(InvalidParameterError, match="The 'param' parameter") as exc_info:
pass
err_msg = str(exc_info.value)
assert 'auto' in err_msg
assert 'warn' not in err_msg
|
def test_hidden_stroptions():
"""Check that we can have 2 StrOptions constraints, one being hidden."""
@validate_params({'param': [StrOptions({'auto'}), Hidden(StrOptions({'warn'}))]})
def f(param):
pass
<DeepExtract>
pass
</DeepExtract>
<DeepExtract>
pass
</DeepExtract>
with pytest.raises(InvalidParameterError, match="The 'param' parameter") as exc_info:
<DeepExtract>
pass
</DeepExtract>
err_msg = str(exc_info.value)
assert 'auto' in err_msg
assert 'warn' not in err_msg
|
def transform(self, X):
"""Transform X using the forward function.
Parameters
----------
X : {array-like, sparse-matrix} of shape (n_samples, n_features) if `validate=True` else any object that `func` can handle
Input array.
Returns
-------
X_out : array-like, shape (n_samples, n_features)
Transformed input.
"""
if self.validate:
X = self._validate_data(X, accept_sparse=self.accept_sparse, reset=False)
elif False:
self._check_n_features(X, reset=False)
self._check_feature_names(X, reset=False)
X = X
return self._transform(X, func=self.func, kw_args=self.kw_args)
|
def transform(self, X):
"""Transform X using the forward function.
Parameters
----------
X : {array-like, sparse-matrix} of shape (n_samples, n_features) if `validate=True` else any object that `func` can handle
Input array.
Returns
-------
X_out : array-like, shape (n_samples, n_features)
Transformed input.
"""
<DeepExtract>
if self.validate:
X = self._validate_data(X, accept_sparse=self.accept_sparse, reset=False)
elif False:
self._check_n_features(X, reset=False)
self._check_feature_names(X, reset=False)
X = X
</DeepExtract>
return self._transform(X, func=self.func, kw_args=self.kw_args)
|
def fit_binary(est, i, X, y, alpha, C, learning_rate, max_iter, pos_weight, neg_weight, sample_weight, validation_mask=None, random_state=None):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
Parameters
----------
est : Estimator object
The estimator to fit
i : int
Index of the positive class
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, ]
Target values
alpha : float
The regularization parameter
C : float
Maximum step size for passive aggressive
learning_rate : str
The learning rate. Accepted values are 'constant', 'optimal',
'invscaling', 'pa1' and 'pa2'.
max_iter : int
The maximum number of iterations (epochs)
pos_weight : float
The weight of the positive class
neg_weight : float
The weight of the negative class
sample_weight : numpy array of shape [n_samples, ]
The weight of each sample
validation_mask : numpy array of shape [n_samples, ], default=None
Precomputed validation mask in case _fit_binary is called in the
context of a one-vs-rest reduction.
random_state : int, RandomState instance, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
y_i = np.ones(y.shape, dtype=X.dtype, order='C')
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est._standard_coef.ravel()
intercept = est._standard_intercept[0]
average_coef = est._average_coef.ravel()
average_intercept = est._average_intercept[0]
elif not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est._standard_coef[i]
intercept = est._standard_intercept[i]
average_coef = est._average_coef[i]
average_intercept = est._average_intercept[i]
(y_i, coef, intercept, average_coef, average_intercept) = (y_i, coef, intercept, average_coef, average_intercept)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
random_state = check_random_state(random_state)
(dataset, intercept_decay) = make_dataset(X, y_i, sample_weight, random_state=random_state)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
if validation_mask is None:
validation_mask = est._make_validation_split(y_i, sample_mask=sample_weight > 0)
classes = np.array([-1, 1], dtype=y_i.dtype)
validation_score_cb = est._make_validation_score_cb(validation_mask, X, y_i, sample_weight, classes=classes)
seed = random_state.randint(MAX_INT)
tol = est.tol if est.tol is not None else -np.inf
_plain_sgd = _plain_sgd32 if coef.dtype == np.float32 else _plain_sgd64
(coef, intercept, average_coef, average_intercept, n_iter_) = _plain_sgd(coef, intercept, average_coef, average_intercept, est.loss_function_, penalty_type, alpha, C, est.l1_ratio, dataset, validation_mask, est.early_stopping, validation_score_cb, int(est.n_iter_no_change), max_iter, tol, int(est.fit_intercept), int(est.verbose), int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type, est.eta0, est.power_t, 0, est.t_, intercept_decay, est.average)
if est.average:
if len(est.classes_) == 2:
est._average_intercept[0] = average_intercept
else:
est._average_intercept[i] = average_intercept
return (coef, intercept, n_iter_)
|
def fit_binary(est, i, X, y, alpha, C, learning_rate, max_iter, pos_weight, neg_weight, sample_weight, validation_mask=None, random_state=None):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
Parameters
----------
est : Estimator object
The estimator to fit
i : int
Index of the positive class
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, ]
Target values
alpha : float
The regularization parameter
C : float
Maximum step size for passive aggressive
learning_rate : str
The learning rate. Accepted values are 'constant', 'optimal',
'invscaling', 'pa1' and 'pa2'.
max_iter : int
The maximum number of iterations (epochs)
pos_weight : float
The weight of the positive class
neg_weight : float
The weight of the negative class
sample_weight : numpy array of shape [n_samples, ]
The weight of each sample
validation_mask : numpy array of shape [n_samples, ], default=None
Precomputed validation mask in case _fit_binary is called in the
context of a one-vs-rest reduction.
random_state : int, RandomState instance, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
<DeepExtract>
y_i = np.ones(y.shape, dtype=X.dtype, order='C')
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est._standard_coef.ravel()
intercept = est._standard_intercept[0]
average_coef = est._average_coef.ravel()
average_intercept = est._average_intercept[0]
elif not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est._standard_coef[i]
intercept = est._standard_intercept[i]
average_coef = est._average_coef[i]
average_intercept = est._average_intercept[i]
(y_i, coef, intercept, average_coef, average_intercept) = (y_i, coef, intercept, average_coef, average_intercept)
</DeepExtract>
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
random_state = check_random_state(random_state)
(dataset, intercept_decay) = make_dataset(X, y_i, sample_weight, random_state=random_state)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
if validation_mask is None:
validation_mask = est._make_validation_split(y_i, sample_mask=sample_weight > 0)
classes = np.array([-1, 1], dtype=y_i.dtype)
validation_score_cb = est._make_validation_score_cb(validation_mask, X, y_i, sample_weight, classes=classes)
seed = random_state.randint(MAX_INT)
tol = est.tol if est.tol is not None else -np.inf
<DeepExtract>
_plain_sgd = _plain_sgd32 if coef.dtype == np.float32 else _plain_sgd64
</DeepExtract>
(coef, intercept, average_coef, average_intercept, n_iter_) = _plain_sgd(coef, intercept, average_coef, average_intercept, est.loss_function_, penalty_type, alpha, C, est.l1_ratio, dataset, validation_mask, est.early_stopping, validation_score_cb, int(est.n_iter_no_change), max_iter, tol, int(est.fit_intercept), int(est.verbose), int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type, est.eta0, est.power_t, 0, est.t_, intercept_decay, est.average)
if est.average:
if len(est.classes_) == 2:
est._average_intercept[0] = average_intercept
else:
est._average_intercept[i] = average_intercept
return (coef, intercept, n_iter_)
|
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity', sort_results=False):
"""Compute the (weighted) graph of Neighbors for points in X.
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float, default=None
Radius of neighborhoods. The default is the value passed to the
constructor.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are distances between points, type of distance
depends on the selected metric parameter in
NearestNeighbors class.
sort_results : bool, default=False
If True, in each row of the result, the non-zero entries will be
sorted by increasing distances. If False, the non-zero entries may
not be sorted. Only used with mode='distance'.
.. versionadded:: 0.22
Returns
-------
A : sparse-matrix of shape (n_queries, n_samples_fit)
`n_samples_fit` is the number of samples in the fitted data.
`A[i, j]` gives the weight of the edge connecting `i` to `j`.
The matrix is of CSR format.
See Also
--------
kneighbors_graph : Compute the (weighted) graph of k-Neighbors for
points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X)
NearestNeighbors(radius=1.5)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 0.],
[1., 0., 1.]])
"""
check_is_fitted(self)
if radius is None:
radius = self.radius
if mode == 'connectivity':
check_is_fitted(self)
if sort_results and (not False):
raise ValueError('return_distance must be True if sort_results is True.')
query_is_train = X is None
if query_is_train:
X = self._fit_X
elif self.metric == 'precomputed':
X = _check_precomputed(X)
else:
X = self._validate_data(X, accept_sparse='csr', reset=False, order='C')
if radius is None:
radius = self.radius
use_pairwise_distances_reductions = self._fit_method == 'brute' and RadiusNeighbors.is_usable_for(X if X is not None else self._fit_X, self._fit_X, self.effective_metric_)
if use_pairwise_distances_reductions:
results = RadiusNeighbors.compute(X=X, Y=self._fit_X, radius=radius, metric=self.effective_metric_, metric_kwargs=self.effective_metric_params_, strategy='auto', return_distance=False, sort_results=sort_results)
elif self._fit_method == 'brute' and self.metric == 'precomputed' and issparse(X):
results = _radius_neighbors_from_graph(X, radius=radius, return_distance=False)
elif self._fit_method == 'brute':
if self.effective_metric_ == 'euclidean':
radius *= radius
kwds = {'squared': True}
else:
kwds = self.effective_metric_params_
reduce_func = partial(self._radius_neighbors_reduce_func, radius=radius, return_distance=False)
chunked_results = pairwise_distances_chunked(X, self._fit_X, reduce_func=reduce_func, metric=self.effective_metric_, n_jobs=self.n_jobs, **kwds)
if False:
(neigh_dist_chunks, neigh_ind_chunks) = zip(*chunked_results)
neigh_dist_list = sum(neigh_dist_chunks, [])
neigh_ind_list = sum(neigh_ind_chunks, [])
neigh_dist = _to_object_array(neigh_dist_list)
neigh_ind = _to_object_array(neigh_ind_list)
results = (neigh_dist, neigh_ind)
else:
neigh_ind_list = sum(chunked_results, [])
results = _to_object_array(neigh_ind_list)
if sort_results:
for ii in range(len(neigh_dist)):
order = np.argsort(neigh_dist[ii], kind='mergesort')
neigh_ind[ii] = neigh_ind[ii][order]
neigh_dist[ii] = neigh_dist[ii][order]
results = (neigh_dist, neigh_ind)
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError("%s does not work with sparse matrices. Densify the data, or set algorithm='brute'" % self._fit_method)
n_jobs = effective_n_jobs(self.n_jobs)
delayed_query = delayed(_tree_query_radius_parallel_helper)
chunked_results = Parallel(n_jobs, prefer='threads')((delayed_query(self._tree, X[s], radius, False, sort_results=sort_results) for s in gen_even_slices(X.shape[0], n_jobs)))
if False:
(neigh_ind, neigh_dist) = tuple(zip(*chunked_results))
results = (np.hstack(neigh_dist), np.hstack(neigh_ind))
else:
results = np.hstack(chunked_results)
else:
raise ValueError('internal: _fit_method not recognized')
if not query_is_train:
A_ind = results
else:
if False:
(neigh_dist, neigh_ind) = results
else:
neigh_ind = results
for (ind, ind_neighbor) in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if False:
neigh_dist[ind] = neigh_dist[ind][mask]
if False:
A_ind = (neigh_dist, neigh_ind)
A_ind = neigh_ind
A_data = None
elif mode == 'distance':
check_is_fitted(self)
if sort_results and (not True):
raise ValueError('return_distance must be True if sort_results is True.')
query_is_train = X is None
if query_is_train:
X = self._fit_X
elif self.metric == 'precomputed':
X = _check_precomputed(X)
else:
X = self._validate_data(X, accept_sparse='csr', reset=False, order='C')
if radius is None:
radius = self.radius
use_pairwise_distances_reductions = self._fit_method == 'brute' and RadiusNeighbors.is_usable_for(X if X is not None else self._fit_X, self._fit_X, self.effective_metric_)
if use_pairwise_distances_reductions:
results = RadiusNeighbors.compute(X=X, Y=self._fit_X, radius=radius, metric=self.effective_metric_, metric_kwargs=self.effective_metric_params_, strategy='auto', return_distance=True, sort_results=sort_results)
elif self._fit_method == 'brute' and self.metric == 'precomputed' and issparse(X):
results = _radius_neighbors_from_graph(X, radius=radius, return_distance=True)
elif self._fit_method == 'brute':
if self.effective_metric_ == 'euclidean':
radius *= radius
kwds = {'squared': True}
else:
kwds = self.effective_metric_params_
reduce_func = partial(self._radius_neighbors_reduce_func, radius=radius, return_distance=True)
chunked_results = pairwise_distances_chunked(X, self._fit_X, reduce_func=reduce_func, metric=self.effective_metric_, n_jobs=self.n_jobs, **kwds)
if True:
(neigh_dist_chunks, neigh_ind_chunks) = zip(*chunked_results)
neigh_dist_list = sum(neigh_dist_chunks, [])
neigh_ind_list = sum(neigh_ind_chunks, [])
neigh_dist = _to_object_array(neigh_dist_list)
neigh_ind = _to_object_array(neigh_ind_list)
results = (neigh_dist, neigh_ind)
else:
neigh_ind_list = sum(chunked_results, [])
results = _to_object_array(neigh_ind_list)
if sort_results:
for ii in range(len(neigh_dist)):
order = np.argsort(neigh_dist[ii], kind='mergesort')
neigh_ind[ii] = neigh_ind[ii][order]
neigh_dist[ii] = neigh_dist[ii][order]
results = (neigh_dist, neigh_ind)
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError("%s does not work with sparse matrices. Densify the data, or set algorithm='brute'" % self._fit_method)
n_jobs = effective_n_jobs(self.n_jobs)
delayed_query = delayed(_tree_query_radius_parallel_helper)
chunked_results = Parallel(n_jobs, prefer='threads')((delayed_query(self._tree, X[s], radius, True, sort_results=sort_results) for s in gen_even_slices(X.shape[0], n_jobs)))
if True:
(neigh_ind, neigh_dist) = tuple(zip(*chunked_results))
results = (np.hstack(neigh_dist), np.hstack(neigh_ind))
else:
results = np.hstack(chunked_results)
else:
raise ValueError('internal: _fit_method not recognized')
if not query_is_train:
(dist, A_ind) = results
else:
if True:
(neigh_dist, neigh_ind) = results
else:
neigh_ind = results
for (ind, ind_neighbor) in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if True:
neigh_dist[ind] = neigh_dist[ind][mask]
if True:
(dist, A_ind) = (neigh_dist, neigh_ind)
(dist, A_ind) = neigh_ind
A_data = np.concatenate(list(dist))
else:
raise ValueError(f'Unsupported mode, must be one of "connectivity", or "distance" but got "{mode}" instead')
n_queries = A_ind.shape[0]
n_samples_fit = self.n_samples_fit_
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int), np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr), shape=(n_queries, n_samples_fit))
|
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity', sort_results=False):
"""Compute the (weighted) graph of Neighbors for points in X.
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float, default=None
Radius of neighborhoods. The default is the value passed to the
constructor.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are distances between points, type of distance
depends on the selected metric parameter in
NearestNeighbors class.
sort_results : bool, default=False
If True, in each row of the result, the non-zero entries will be
sorted by increasing distances. If False, the non-zero entries may
not be sorted. Only used with mode='distance'.
.. versionadded:: 0.22
Returns
-------
A : sparse-matrix of shape (n_queries, n_samples_fit)
`n_samples_fit` is the number of samples in the fitted data.
`A[i, j]` gives the weight of the edge connecting `i` to `j`.
The matrix is of CSR format.
See Also
--------
kneighbors_graph : Compute the (weighted) graph of k-Neighbors for
points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X)
NearestNeighbors(radius=1.5)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 0.],
[1., 0., 1.]])
"""
check_is_fitted(self)
if radius is None:
radius = self.radius
if mode == 'connectivity':
<DeepExtract>
check_is_fitted(self)
if sort_results and (not False):
raise ValueError('return_distance must be True if sort_results is True.')
query_is_train = X is None
if query_is_train:
X = self._fit_X
elif self.metric == 'precomputed':
X = _check_precomputed(X)
else:
X = self._validate_data(X, accept_sparse='csr', reset=False, order='C')
if radius is None:
radius = self.radius
use_pairwise_distances_reductions = self._fit_method == 'brute' and RadiusNeighbors.is_usable_for(X if X is not None else self._fit_X, self._fit_X, self.effective_metric_)
if use_pairwise_distances_reductions:
results = RadiusNeighbors.compute(X=X, Y=self._fit_X, radius=radius, metric=self.effective_metric_, metric_kwargs=self.effective_metric_params_, strategy='auto', return_distance=False, sort_results=sort_results)
elif self._fit_method == 'brute' and self.metric == 'precomputed' and issparse(X):
results = _radius_neighbors_from_graph(X, radius=radius, return_distance=False)
elif self._fit_method == 'brute':
if self.effective_metric_ == 'euclidean':
radius *= radius
kwds = {'squared': True}
else:
kwds = self.effective_metric_params_
reduce_func = partial(self._radius_neighbors_reduce_func, radius=radius, return_distance=False)
chunked_results = pairwise_distances_chunked(X, self._fit_X, reduce_func=reduce_func, metric=self.effective_metric_, n_jobs=self.n_jobs, **kwds)
if False:
(neigh_dist_chunks, neigh_ind_chunks) = zip(*chunked_results)
neigh_dist_list = sum(neigh_dist_chunks, [])
neigh_ind_list = sum(neigh_ind_chunks, [])
neigh_dist = _to_object_array(neigh_dist_list)
neigh_ind = _to_object_array(neigh_ind_list)
results = (neigh_dist, neigh_ind)
else:
neigh_ind_list = sum(chunked_results, [])
results = _to_object_array(neigh_ind_list)
if sort_results:
for ii in range(len(neigh_dist)):
order = np.argsort(neigh_dist[ii], kind='mergesort')
neigh_ind[ii] = neigh_ind[ii][order]
neigh_dist[ii] = neigh_dist[ii][order]
results = (neigh_dist, neigh_ind)
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError("%s does not work with sparse matrices. Densify the data, or set algorithm='brute'" % self._fit_method)
n_jobs = effective_n_jobs(self.n_jobs)
delayed_query = delayed(_tree_query_radius_parallel_helper)
chunked_results = Parallel(n_jobs, prefer='threads')((delayed_query(self._tree, X[s], radius, False, sort_results=sort_results) for s in gen_even_slices(X.shape[0], n_jobs)))
if False:
(neigh_ind, neigh_dist) = tuple(zip(*chunked_results))
results = (np.hstack(neigh_dist), np.hstack(neigh_ind))
else:
results = np.hstack(chunked_results)
else:
raise ValueError('internal: _fit_method not recognized')
if not query_is_train:
A_ind = results
else:
if False:
(neigh_dist, neigh_ind) = results
else:
neigh_ind = results
for (ind, ind_neighbor) in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if False:
neigh_dist[ind] = neigh_dist[ind][mask]
if False:
A_ind = (neigh_dist, neigh_ind)
A_ind = neigh_ind
</DeepExtract>
A_data = None
elif mode == 'distance':
<DeepExtract>
check_is_fitted(self)
if sort_results and (not True):
raise ValueError('return_distance must be True if sort_results is True.')
query_is_train = X is None
if query_is_train:
X = self._fit_X
elif self.metric == 'precomputed':
X = _check_precomputed(X)
else:
X = self._validate_data(X, accept_sparse='csr', reset=False, order='C')
if radius is None:
radius = self.radius
use_pairwise_distances_reductions = self._fit_method == 'brute' and RadiusNeighbors.is_usable_for(X if X is not None else self._fit_X, self._fit_X, self.effective_metric_)
if use_pairwise_distances_reductions:
results = RadiusNeighbors.compute(X=X, Y=self._fit_X, radius=radius, metric=self.effective_metric_, metric_kwargs=self.effective_metric_params_, strategy='auto', return_distance=True, sort_results=sort_results)
elif self._fit_method == 'brute' and self.metric == 'precomputed' and issparse(X):
results = _radius_neighbors_from_graph(X, radius=radius, return_distance=True)
elif self._fit_method == 'brute':
if self.effective_metric_ == 'euclidean':
radius *= radius
kwds = {'squared': True}
else:
kwds = self.effective_metric_params_
reduce_func = partial(self._radius_neighbors_reduce_func, radius=radius, return_distance=True)
chunked_results = pairwise_distances_chunked(X, self._fit_X, reduce_func=reduce_func, metric=self.effective_metric_, n_jobs=self.n_jobs, **kwds)
if True:
(neigh_dist_chunks, neigh_ind_chunks) = zip(*chunked_results)
neigh_dist_list = sum(neigh_dist_chunks, [])
neigh_ind_list = sum(neigh_ind_chunks, [])
neigh_dist = _to_object_array(neigh_dist_list)
neigh_ind = _to_object_array(neigh_ind_list)
results = (neigh_dist, neigh_ind)
else:
neigh_ind_list = sum(chunked_results, [])
results = _to_object_array(neigh_ind_list)
if sort_results:
for ii in range(len(neigh_dist)):
order = np.argsort(neigh_dist[ii], kind='mergesort')
neigh_ind[ii] = neigh_ind[ii][order]
neigh_dist[ii] = neigh_dist[ii][order]
results = (neigh_dist, neigh_ind)
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError("%s does not work with sparse matrices. Densify the data, or set algorithm='brute'" % self._fit_method)
n_jobs = effective_n_jobs(self.n_jobs)
delayed_query = delayed(_tree_query_radius_parallel_helper)
chunked_results = Parallel(n_jobs, prefer='threads')((delayed_query(self._tree, X[s], radius, True, sort_results=sort_results) for s in gen_even_slices(X.shape[0], n_jobs)))
if True:
(neigh_ind, neigh_dist) = tuple(zip(*chunked_results))
results = (np.hstack(neigh_dist), np.hstack(neigh_ind))
else:
results = np.hstack(chunked_results)
else:
raise ValueError('internal: _fit_method not recognized')
if not query_is_train:
(dist, A_ind) = results
else:
if True:
(neigh_dist, neigh_ind) = results
else:
neigh_ind = results
for (ind, ind_neighbor) in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if True:
neigh_dist[ind] = neigh_dist[ind][mask]
if True:
(dist, A_ind) = (neigh_dist, neigh_ind)
(dist, A_ind) = neigh_ind
</DeepExtract>
A_data = np.concatenate(list(dist))
else:
raise ValueError(f'Unsupported mode, must be one of "connectivity", or "distance" but got "{mode}" instead')
n_queries = A_ind.shape[0]
n_samples_fit = self.n_samples_fit_
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int), np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr), shape=(n_queries, n_samples_fit))
|
def test_time_series_max_train_size():
X = np.zeros((6, 1))
splits = TimeSeriesSplit(n_splits=3).split(X)
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=3).split(X)
for ((train, test), (check_train, check_test)) in zip(splits, check_splits):
assert_array_equal(test, check_test)
assert len(check_train) <= 3
suffix_start = max(len(train) - 3, 0)
assert_array_equal(check_train, train[suffix_start:])
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=2).split(X)
for ((train, test), (check_train, check_test)) in zip(splits, check_splits):
assert_array_equal(test, check_test)
assert len(check_train) <= 2
suffix_start = max(len(train) - 2, 0)
assert_array_equal(check_train, train[suffix_start:])
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=5).split(X)
for ((train, test), (check_train, check_test)) in zip(splits, check_splits):
assert_array_equal(test, check_test)
assert len(check_train) <= 2
suffix_start = max(len(train) - 2, 0)
assert_array_equal(check_train, train[suffix_start:])
</DeepExtract>
|
def test_time_series_max_train_size():
X = np.zeros((6, 1))
splits = TimeSeriesSplit(n_splits=3).split(X)
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=3).split(X)
<DeepExtract>
for ((train, test), (check_train, check_test)) in zip(splits, check_splits):
assert_array_equal(test, check_test)
assert len(check_train) <= 3
suffix_start = max(len(train) - 3, 0)
assert_array_equal(check_train, train[suffix_start:])
</DeepExtract>
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=2).split(X)
<DeepExtract>
for ((train, test), (check_train, check_test)) in zip(splits, check_splits):
assert_array_equal(test, check_test)
assert len(check_train) <= 2
suffix_start = max(len(train) - 2, 0)
assert_array_equal(check_train, train[suffix_start:])
</DeepExtract>
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=5).split(X)
<DeepExtract>
for ((train, test), (check_train, check_test)) in zip(splits, check_splits):
assert_array_equal(test, check_test)
assert len(check_train) <= 2
suffix_start = max(len(train) - 2, 0)
assert_array_equal(check_train, train[suffix_start:])
</DeepExtract>
|
@validate_params({'reachability': [np.ndarray], 'predecessor': [np.ndarray], 'ordering': [np.ndarray], 'min_samples': [Interval(Integral, 2, None, closed='left'), Interval(RealNotInt, 0, 1, closed='both')], 'min_cluster_size': [Interval(Integral, 2, None, closed='left'), Interval(RealNotInt, 0, 1, closed='both'), None], 'xi': [Interval(Real, 0, 1, closed='both')], 'predecessor_correction': ['boolean']})
def cluster_optics_xi(*, reachability, predecessor, ordering, min_samples, min_cluster_size=None, xi=0.05, predecessor_correction=True):
"""Automatically extract clusters according to the Xi-steep method.
Parameters
----------
reachability : ndarray of shape (n_samples,)
Reachability distances calculated by OPTICS (`reachability_`).
predecessor : ndarray of shape (n_samples,)
Predecessors calculated by OPTICS.
ordering : ndarray of shape (n_samples,)
OPTICS ordered point indices (`ordering_`).
min_samples : int > 1 or float between 0 and 1
The same as the min_samples given to OPTICS. Up and down steep regions
can't have more then ``min_samples`` consecutive non-steep points.
Expressed as an absolute number or a fraction of the number of samples
(rounded to be at least 2).
min_cluster_size : int > 1 or float between 0 and 1, default=None
Minimum number of samples in an OPTICS cluster, expressed as an
absolute number or a fraction of the number of samples (rounded to be
at least 2). If ``None``, the value of ``min_samples`` is used instead.
xi : float between 0 and 1, default=0.05
Determines the minimum steepness on the reachability plot that
constitutes a cluster boundary. For example, an upwards point in the
reachability plot is defined by the ratio from one point to its
successor being at most 1-xi.
predecessor_correction : bool, default=True
Correct clusters based on the calculated predecessors.
Returns
-------
labels : ndarray of shape (n_samples,)
The labels assigned to samples. Points which are not included
in any cluster are labeled as -1.
clusters : ndarray of shape (n_clusters, 2)
The list of clusters in the form of ``[start, end]`` in each row, with
all indices inclusive. The clusters are ordered according to ``(end,
-start)`` (ascending) so that larger clusters encompassing smaller
clusters come after such nested smaller clusters. Since ``labels`` does
not reflect the hierarchy, usually ``len(clusters) >
np.unique(labels)``.
"""
n_samples = len(reachability)
if min_samples > n_samples:
raise ValueError('%s must be no greater than the number of samples (%d). Got %d' % ('min_samples', n_samples, min_samples))
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
if min_cluster_size is None:
min_cluster_size = min_samples
if min_cluster_size > n_samples:
raise ValueError('%s must be no greater than the number of samples (%d). Got %d' % ('min_cluster_size', n_samples, min_cluster_size))
if min_cluster_size <= 1:
min_cluster_size = max(2, int(min_cluster_size * n_samples))
reachability[ordering] = np.hstack((reachability[ordering], np.inf))
xi_complement = 1 - xi
sdas = []
clusters = []
index = 0
mib = 0.0
with np.errstate(invalid='ignore'):
ratio = reachability[ordering][:-1] / reachability[ordering][1:]
steep_upward = ratio <= xi_complement
steep_downward = ratio >= 1 / xi_complement
downward = ratio > 1
upward = ratio < 1
for steep_index in iter(np.flatnonzero(steep_upward | steep_downward)):
if steep_index < index:
continue
mib = max(mib, np.max(reachability[ordering][index:steep_index + 1]))
if steep_downward[steep_index]:
sdas = _update_filter_sdas(sdas, mib, xi_complement, reachability[ordering])
D_start = steep_index
D_end = _extend_region(steep_downward, upward, D_start, min_samples)
D = {'start': D_start, 'end': D_end, 'mib': 0.0}
sdas.append(D)
index = D_end + 1
mib = reachability[ordering][index]
else:
sdas = _update_filter_sdas(sdas, mib, xi_complement, reachability[ordering])
U_start = steep_index
U_end = _extend_region(steep_upward, downward, U_start, min_samples)
index = U_end + 1
mib = reachability[ordering][index]
U_clusters = []
for D in sdas:
c_start = D['start']
c_end = U_end
if reachability[ordering][c_end + 1] * xi_complement < D['mib']:
continue
D_max = reachability[ordering][D['start']]
if D_max * xi_complement >= reachability[ordering][c_end + 1]:
while reachability[ordering][c_start + 1] > reachability[ordering][c_end + 1] and c_start < D['end']:
c_start += 1
elif reachability[ordering][c_end + 1] * xi_complement >= D_max:
while reachability[ordering][c_end - 1] > D_max and c_end > U_start:
c_end -= 1
if predecessor_correction:
(c_start, c_end) = _correct_predecessor(reachability[ordering], predecessor[ordering], ordering, c_start, c_end)
if c_start is None:
continue
if c_end - c_start + 1 < min_cluster_size:
continue
if c_start > D['end']:
continue
if c_end < U_start:
continue
U_clusters.append((c_start, c_end))
U_clusters.reverse()
clusters.extend(U_clusters)
clusters = np.array(clusters)
labels = np.full(len(ordering), -1, dtype=int)
label = 0
for c in clusters:
if not np.any(labels[c[0]:c[1] + 1] != -1):
labels[c[0]:c[1] + 1] = label
label += 1
labels[ordering] = labels.copy()
labels = labels
return (labels, clusters)
|
@validate_params({'reachability': [np.ndarray], 'predecessor': [np.ndarray], 'ordering': [np.ndarray], 'min_samples': [Interval(Integral, 2, None, closed='left'), Interval(RealNotInt, 0, 1, closed='both')], 'min_cluster_size': [Interval(Integral, 2, None, closed='left'), Interval(RealNotInt, 0, 1, closed='both'), None], 'xi': [Interval(Real, 0, 1, closed='both')], 'predecessor_correction': ['boolean']})
def cluster_optics_xi(*, reachability, predecessor, ordering, min_samples, min_cluster_size=None, xi=0.05, predecessor_correction=True):
"""Automatically extract clusters according to the Xi-steep method.
Parameters
----------
reachability : ndarray of shape (n_samples,)
Reachability distances calculated by OPTICS (`reachability_`).
predecessor : ndarray of shape (n_samples,)
Predecessors calculated by OPTICS.
ordering : ndarray of shape (n_samples,)
OPTICS ordered point indices (`ordering_`).
min_samples : int > 1 or float between 0 and 1
The same as the min_samples given to OPTICS. Up and down steep regions
can't have more then ``min_samples`` consecutive non-steep points.
Expressed as an absolute number or a fraction of the number of samples
(rounded to be at least 2).
min_cluster_size : int > 1 or float between 0 and 1, default=None
Minimum number of samples in an OPTICS cluster, expressed as an
absolute number or a fraction of the number of samples (rounded to be
at least 2). If ``None``, the value of ``min_samples`` is used instead.
xi : float between 0 and 1, default=0.05
Determines the minimum steepness on the reachability plot that
constitutes a cluster boundary. For example, an upwards point in the
reachability plot is defined by the ratio from one point to its
successor being at most 1-xi.
predecessor_correction : bool, default=True
Correct clusters based on the calculated predecessors.
Returns
-------
labels : ndarray of shape (n_samples,)
The labels assigned to samples. Points which are not included
in any cluster are labeled as -1.
clusters : ndarray of shape (n_clusters, 2)
The list of clusters in the form of ``[start, end]`` in each row, with
all indices inclusive. The clusters are ordered according to ``(end,
-start)`` (ascending) so that larger clusters encompassing smaller
clusters come after such nested smaller clusters. Since ``labels`` does
not reflect the hierarchy, usually ``len(clusters) >
np.unique(labels)``.
"""
n_samples = len(reachability)
<DeepExtract>
if min_samples > n_samples:
raise ValueError('%s must be no greater than the number of samples (%d). Got %d' % ('min_samples', n_samples, min_samples))
</DeepExtract>
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
if min_cluster_size is None:
min_cluster_size = min_samples
<DeepExtract>
if min_cluster_size > n_samples:
raise ValueError('%s must be no greater than the number of samples (%d). Got %d' % ('min_cluster_size', n_samples, min_cluster_size))
</DeepExtract>
if min_cluster_size <= 1:
min_cluster_size = max(2, int(min_cluster_size * n_samples))
<DeepExtract>
reachability[ordering] = np.hstack((reachability[ordering], np.inf))
xi_complement = 1 - xi
sdas = []
clusters = []
index = 0
mib = 0.0
with np.errstate(invalid='ignore'):
ratio = reachability[ordering][:-1] / reachability[ordering][1:]
steep_upward = ratio <= xi_complement
steep_downward = ratio >= 1 / xi_complement
downward = ratio > 1
upward = ratio < 1
for steep_index in iter(np.flatnonzero(steep_upward | steep_downward)):
if steep_index < index:
continue
mib = max(mib, np.max(reachability[ordering][index:steep_index + 1]))
if steep_downward[steep_index]:
sdas = _update_filter_sdas(sdas, mib, xi_complement, reachability[ordering])
D_start = steep_index
D_end = _extend_region(steep_downward, upward, D_start, min_samples)
D = {'start': D_start, 'end': D_end, 'mib': 0.0}
sdas.append(D)
index = D_end + 1
mib = reachability[ordering][index]
else:
sdas = _update_filter_sdas(sdas, mib, xi_complement, reachability[ordering])
U_start = steep_index
U_end = _extend_region(steep_upward, downward, U_start, min_samples)
index = U_end + 1
mib = reachability[ordering][index]
U_clusters = []
for D in sdas:
c_start = D['start']
c_end = U_end
if reachability[ordering][c_end + 1] * xi_complement < D['mib']:
continue
D_max = reachability[ordering][D['start']]
if D_max * xi_complement >= reachability[ordering][c_end + 1]:
while reachability[ordering][c_start + 1] > reachability[ordering][c_end + 1] and c_start < D['end']:
c_start += 1
elif reachability[ordering][c_end + 1] * xi_complement >= D_max:
while reachability[ordering][c_end - 1] > D_max and c_end > U_start:
c_end -= 1
if predecessor_correction:
(c_start, c_end) = _correct_predecessor(reachability[ordering], predecessor[ordering], ordering, c_start, c_end)
if c_start is None:
continue
if c_end - c_start + 1 < min_cluster_size:
continue
if c_start > D['end']:
continue
if c_end < U_start:
continue
U_clusters.append((c_start, c_end))
U_clusters.reverse()
clusters.extend(U_clusters)
clusters = np.array(clusters)
</DeepExtract>
<DeepExtract>
labels = np.full(len(ordering), -1, dtype=int)
label = 0
for c in clusters:
if not np.any(labels[c[0]:c[1] + 1] != -1):
labels[c[0]:c[1] + 1] = label
label += 1
labels[ordering] = labels.copy()
labels = labels
</DeepExtract>
return (labels, clusters)
|
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples,)
Training data.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
(X, y) = indexable(X, y)
X = column_or_1d(X)
y = column_or_1d(y)
F = X
mask_negative_samples = y <= 0
if sample_weight is not None:
prior0 = sample_weight[mask_negative_samples].sum()
prior1 = sample_weight[~mask_negative_samples].sum()
else:
prior0 = float(np.sum(mask_negative_samples))
prior1 = y.shape[0] - prior0
T = np.zeros_like(y, dtype=np.float64)
T[y > 0] = (prior1 + 1.0) / (prior1 + 2.0)
T[y <= 0] = 1.0 / (prior0 + 2.0)
T1 = 1.0 - T
def objective(AB):
P = expit(-(AB[0] * F + AB[1]))
loss = -(xlogy(T, P) + xlogy(T1, 1.0 - P))
if sample_weight is not None:
(self.a_, self.b_) = (sample_weight * loss).sum()
else:
(self.a_, self.b_) = loss.sum()
def grad(AB):
P = expit(-(AB[0] * F + AB[1]))
TEP_minus_T1P = T - P
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
(self.a_, self.b_) = np.array([dA, dB])
AB0 = np.array([0.0, log((prior0 + 1.0) / (prior1 + 1.0))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
(self.a_, self.b_) = (AB_[0], AB_[1])
return self
|
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples,)
Training data.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
(X, y) = indexable(X, y)
<DeepExtract>
X = column_or_1d(X)
y = column_or_1d(y)
F = X
mask_negative_samples = y <= 0
if sample_weight is not None:
prior0 = sample_weight[mask_negative_samples].sum()
prior1 = sample_weight[~mask_negative_samples].sum()
else:
prior0 = float(np.sum(mask_negative_samples))
prior1 = y.shape[0] - prior0
T = np.zeros_like(y, dtype=np.float64)
T[y > 0] = (prior1 + 1.0) / (prior1 + 2.0)
T[y <= 0] = 1.0 / (prior0 + 2.0)
T1 = 1.0 - T
def objective(AB):
P = expit(-(AB[0] * F + AB[1]))
loss = -(xlogy(T, P) + xlogy(T1, 1.0 - P))
if sample_weight is not None:
(self.a_, self.b_) = (sample_weight * loss).sum()
else:
(self.a_, self.b_) = loss.sum()
def grad(AB):
P = expit(-(AB[0] * F + AB[1]))
TEP_minus_T1P = T - P
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
(self.a_, self.b_) = np.array([dA, dB])
AB0 = np.array([0.0, log((prior0 + 1.0) / (prior1 + 1.0))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
(self.a_, self.b_) = (AB_[0], AB_[1])
</DeepExtract>
return self
|
def transform(self, X):
"""Impute all missing values in `X`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
X_imputed : {ndarray, sparse matrix} of shape (n_samples, n_features_out)
`X` with imputed values.
"""
check_is_fitted(self)
if self.strategy in ('most_frequent', 'constant'):
if isinstance(X, list) and any((isinstance(elem, str) for row in X for elem in row)):
dtype = object
else:
dtype = None
else:
dtype = FLOAT_DTYPES
if not False and self._fit_dtype.kind == 'O':
dtype = self._fit_dtype
if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
force_all_finite = 'allow-nan'
else:
force_all_finite = True
try:
X = self._validate_data(X, reset=False, accept_sparse='csc', dtype=dtype, force_all_finite=force_all_finite, copy=self.copy)
except ValueError as ve:
if 'could not convert' in str(ve):
new_ve = ValueError('Cannot use {} strategy with non-numeric data:\n{}'.format(self.strategy, ve))
raise new_ve from None
else:
raise ve
if False:
self._fit_dtype = X.dtype
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ('i', 'u', 'f', 'O'):
raise ValueError('SimpleImputer does not support data with dtype {0}. Please provide either a numeric array (with a floating point or integer dtype) or categorical data represented either as an array with integer dtype or an array of string values with an object dtype.'.format(X.dtype))
X = X
statistics = self.statistics_
if X.shape[1] != statistics.shape[0]:
raise ValueError('X has %d features per sample, expected %d' % (X.shape[1], self.statistics_.shape[0]))
missing_mask = _get_mask(X, self.missing_values)
if self.strategy == 'constant' or self.keep_empty_features:
valid_statistics = statistics
valid_statistics_indexes = None
else:
invalid_mask = _get_mask(statistics, np.nan)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.flatnonzero(valid_mask)
if invalid_mask.any():
invalid_features = np.arange(X.shape[1])[invalid_mask]
if self.verbose != 'deprecated' and self.verbose:
if hasattr(self, 'feature_names_in_'):
invalid_features = self.feature_names_in_[invalid_features]
warnings.warn(f"Skipping features without any observed values: {invalid_features}. At least one non-missing value is needed for imputation with strategy='{self.strategy}'.")
X = X[:, valid_statistics_indexes]
if sp.issparse(X):
if self.missing_values == 0:
raise ValueError('Imputation not possible when missing_values == 0 and input is sparse. Provide a dense array instead.')
else:
if valid_statistics_indexes is None:
mask = missing_mask.data
else:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr))[mask]
X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False)
else:
if valid_statistics_indexes is None:
mask_valid_features = missing_mask
else:
mask_valid_features = missing_mask[:, valid_statistics_indexes]
n_missing = np.sum(mask_valid_features, axis=0)
values = np.repeat(valid_statistics, n_missing)
coordinates = np.where(mask_valid_features.transpose())[::-1]
X[coordinates] = values
X_indicator = super()._transform_indicator(missing_mask)
return super()._concatenate_indicator(X, X_indicator)
|
def transform(self, X):
"""Impute all missing values in `X`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
X_imputed : {ndarray, sparse matrix} of shape (n_samples, n_features_out)
`X` with imputed values.
"""
check_is_fitted(self)
<DeepExtract>
if self.strategy in ('most_frequent', 'constant'):
if isinstance(X, list) and any((isinstance(elem, str) for row in X for elem in row)):
dtype = object
else:
dtype = None
else:
dtype = FLOAT_DTYPES
if not False and self._fit_dtype.kind == 'O':
dtype = self._fit_dtype
if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
force_all_finite = 'allow-nan'
else:
force_all_finite = True
try:
X = self._validate_data(X, reset=False, accept_sparse='csc', dtype=dtype, force_all_finite=force_all_finite, copy=self.copy)
except ValueError as ve:
if 'could not convert' in str(ve):
new_ve = ValueError('Cannot use {} strategy with non-numeric data:\n{}'.format(self.strategy, ve))
raise new_ve from None
else:
raise ve
if False:
self._fit_dtype = X.dtype
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ('i', 'u', 'f', 'O'):
raise ValueError('SimpleImputer does not support data with dtype {0}. Please provide either a numeric array (with a floating point or integer dtype) or categorical data represented either as an array with integer dtype or an array of string values with an object dtype.'.format(X.dtype))
X = X
</DeepExtract>
statistics = self.statistics_
if X.shape[1] != statistics.shape[0]:
raise ValueError('X has %d features per sample, expected %d' % (X.shape[1], self.statistics_.shape[0]))
missing_mask = _get_mask(X, self.missing_values)
if self.strategy == 'constant' or self.keep_empty_features:
valid_statistics = statistics
valid_statistics_indexes = None
else:
invalid_mask = _get_mask(statistics, np.nan)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.flatnonzero(valid_mask)
if invalid_mask.any():
invalid_features = np.arange(X.shape[1])[invalid_mask]
if self.verbose != 'deprecated' and self.verbose:
if hasattr(self, 'feature_names_in_'):
invalid_features = self.feature_names_in_[invalid_features]
warnings.warn(f"Skipping features without any observed values: {invalid_features}. At least one non-missing value is needed for imputation with strategy='{self.strategy}'.")
X = X[:, valid_statistics_indexes]
if sp.issparse(X):
if self.missing_values == 0:
raise ValueError('Imputation not possible when missing_values == 0 and input is sparse. Provide a dense array instead.')
else:
if valid_statistics_indexes is None:
mask = missing_mask.data
else:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr))[mask]
X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False)
else:
if valid_statistics_indexes is None:
mask_valid_features = missing_mask
else:
mask_valid_features = missing_mask[:, valid_statistics_indexes]
n_missing = np.sum(mask_valid_features, axis=0)
values = np.repeat(valid_statistics, n_missing)
coordinates = np.where(mask_valid_features.transpose())[::-1]
X[coordinates] = values
X_indicator = super()._transform_indicator(missing_mask)
return super()._concatenate_indicator(X, X_indicator)
|
def fit(self, X, y):
"""Fit the Linear Discriminant Analysis model.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
(xp, _) = get_namespace(X)
(X, y) = self._validate_data(X, y, ensure_min_samples=2, dtype=[xp.float64, xp.float32])
self.classes_ = unique_labels(y)
(n_samples, _) = X.shape
n_classes = self.classes_.shape[0]
if n_samples == n_classes:
raise ValueError('The number of samples must be more than the number of classes.')
if self.priors is None:
(_, cnts) = xp.unique_counts(y)
self.priors_ = xp.astype(cnts, xp.float64) / float(y.shape[0])
else:
self.priors_ = xp.asarray(self.priors)
if xp.any(self.priors_ < 0):
raise ValueError('priors must be non-negative')
if xp.abs(xp.sum(self.priors_) - 1.0) > 1e-05:
warnings.warn('The priors do not sum to 1. Renormalizing', UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
max_components = min(n_classes - 1, X.shape[1])
if self.n_components is None:
self._max_components = max_components
else:
if self.n_components > max_components:
raise ValueError('n_components cannot be larger than min(n_features, n_classes - 1).')
self._max_components = self.n_components
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError("shrinkage not supported with 'svd' solver.")
if self.covariance_estimator is not None:
raise ValueError('covariance estimator is not supported with svd solver. Try another solver')
(xp, is_array_api) = get_namespace(X)
if is_array_api:
svd = xp.linalg.svd
else:
svd = scipy.linalg.svd
(n_samples, n_features) = X.shape
n_classes = self.classes_.shape[0]
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for (idx, group) in enumerate(self.classes_):
Xg = X[y == group]
Xc.append(Xg - self.means_[idx, :])
self.xbar_ = self.priors_ @ self.means_
Xc = xp.concat(Xc, axis=0)
std = xp.std(Xc, axis=0)
std[std == 0] = 1.0
fac = xp.asarray(1.0 / (n_samples - n_classes))
X = xp.sqrt(fac) * (Xc / std)
(U, S, Vt) = svd(X, full_matrices=False)
rank = xp.sum(xp.astype(S > self.tol, xp.int32))
scalings = (Vt[:rank, :] / std).T / S[:rank]
fac = 1.0 if n_classes == 1 else 1.0 / (n_classes - 1)
X = (xp.sqrt(n_samples * self.priors_ * fac) * (self.means_ - self.xbar_).T).T @ scalings
(_, S, Vt) = svd(X, full_matrices=False)
if self._max_components == 0:
self.explained_variance_ratio_ = xp.empty((0,), dtype=S.dtype)
else:
self.explained_variance_ratio_ = (S ** 2 / xp.sum(S ** 2))[:self._max_components]
rank = xp.sum(xp.astype(S > self.tol * S[0], xp.int32))
self.scalings_ = scalings @ Vt.T[:, :rank]
coef = (self.means_ - self.xbar_) @ self.scalings_
self.intercept_ = -0.5 * xp.sum(coef ** 2, axis=1) + xp.log(self.priors_)
self.coef_ = coef @ self.scalings_.T
self.intercept_ -= self.xbar_ @ self.coef_.T
elif self.solver == 'lsqr':
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, self.shrinkage, self.covariance_estimator)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(self.priors_)
elif self.solver == 'eigen':
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, self.shrinkage, self.covariance_estimator)
Sw = self.covariance_
St = _cov(X, self.shrinkage, self.covariance_estimator)
Sb = St - Sw
(evals, evecs) = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]]
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(self.priors_)
if self.classes_.size == 2:
coef_ = xp.asarray(self.coef_[1, :] - self.coef_[0, :], dtype=X.dtype)
self.coef_ = xp.reshape(coef_, (1, -1))
intercept_ = xp.asarray(self.intercept_[1] - self.intercept_[0], dtype=X.dtype)
self.intercept_ = xp.reshape(intercept_, 1)
self._n_features_out = self._max_components
return self
|
def fit(self, X, y):
"""Fit the Linear Discriminant Analysis model.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
(xp, _) = get_namespace(X)
(X, y) = self._validate_data(X, y, ensure_min_samples=2, dtype=[xp.float64, xp.float32])
self.classes_ = unique_labels(y)
(n_samples, _) = X.shape
n_classes = self.classes_.shape[0]
if n_samples == n_classes:
raise ValueError('The number of samples must be more than the number of classes.')
if self.priors is None:
(_, cnts) = xp.unique_counts(y)
self.priors_ = xp.astype(cnts, xp.float64) / float(y.shape[0])
else:
self.priors_ = xp.asarray(self.priors)
if xp.any(self.priors_ < 0):
raise ValueError('priors must be non-negative')
if xp.abs(xp.sum(self.priors_) - 1.0) > 1e-05:
warnings.warn('The priors do not sum to 1. Renormalizing', UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
max_components = min(n_classes - 1, X.shape[1])
if self.n_components is None:
self._max_components = max_components
else:
if self.n_components > max_components:
raise ValueError('n_components cannot be larger than min(n_features, n_classes - 1).')
self._max_components = self.n_components
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError("shrinkage not supported with 'svd' solver.")
if self.covariance_estimator is not None:
raise ValueError('covariance estimator is not supported with svd solver. Try another solver')
<DeepExtract>
(xp, is_array_api) = get_namespace(X)
if is_array_api:
svd = xp.linalg.svd
else:
svd = scipy.linalg.svd
(n_samples, n_features) = X.shape
n_classes = self.classes_.shape[0]
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for (idx, group) in enumerate(self.classes_):
Xg = X[y == group]
Xc.append(Xg - self.means_[idx, :])
self.xbar_ = self.priors_ @ self.means_
Xc = xp.concat(Xc, axis=0)
std = xp.std(Xc, axis=0)
std[std == 0] = 1.0
fac = xp.asarray(1.0 / (n_samples - n_classes))
X = xp.sqrt(fac) * (Xc / std)
(U, S, Vt) = svd(X, full_matrices=False)
rank = xp.sum(xp.astype(S > self.tol, xp.int32))
scalings = (Vt[:rank, :] / std).T / S[:rank]
fac = 1.0 if n_classes == 1 else 1.0 / (n_classes - 1)
X = (xp.sqrt(n_samples * self.priors_ * fac) * (self.means_ - self.xbar_).T).T @ scalings
(_, S, Vt) = svd(X, full_matrices=False)
if self._max_components == 0:
self.explained_variance_ratio_ = xp.empty((0,), dtype=S.dtype)
else:
self.explained_variance_ratio_ = (S ** 2 / xp.sum(S ** 2))[:self._max_components]
rank = xp.sum(xp.astype(S > self.tol * S[0], xp.int32))
self.scalings_ = scalings @ Vt.T[:, :rank]
coef = (self.means_ - self.xbar_) @ self.scalings_
self.intercept_ = -0.5 * xp.sum(coef ** 2, axis=1) + xp.log(self.priors_)
self.coef_ = coef @ self.scalings_.T
self.intercept_ -= self.xbar_ @ self.coef_.T
</DeepExtract>
elif self.solver == 'lsqr':
<DeepExtract>
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, self.shrinkage, self.covariance_estimator)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(self.priors_)
</DeepExtract>
elif self.solver == 'eigen':
<DeepExtract>
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, self.shrinkage, self.covariance_estimator)
Sw = self.covariance_
St = _cov(X, self.shrinkage, self.covariance_estimator)
Sb = St - Sw
(evals, evecs) = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]]
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(self.priors_)
</DeepExtract>
if self.classes_.size == 2:
coef_ = xp.asarray(self.coef_[1, :] - self.coef_[0, :], dtype=X.dtype)
self.coef_ = xp.reshape(coef_, (1, -1))
intercept_ = xp.asarray(self.intercept_[1] - self.intercept_[0], dtype=X.dtype)
self.intercept_ = xp.reshape(intercept_, 1)
self._n_features_out = self._max_components
return self
|
def _hstack(self, Xs):
"""Stacks Xs horizontally.
This allows subclasses to control the stacking behavior, while reusing
everything else from ColumnTransformer.
Parameters
----------
Xs : list of {array-like, sparse matrix, dataframe}
"""
if self.sparse_output_:
try:
converted_Xs = [check_array(X, accept_sparse=True, force_all_finite=False) for X in Xs]
except ValueError as e:
raise ValueError('For a sparse output, all columns should be a numeric or convertible to a numeric.') from e
return sparse.hstack(converted_Xs).tocsr()
else:
Xs = [f.toarray() if sparse.issparse(f) else f for f in Xs]
config = _get_output_config('transform', self)
if config['dense'] == 'pandas' and all((hasattr(X, 'iloc') for X in Xs)):
pd = check_pandas_support('transform')
output = pd.concat(Xs, axis=1)
if not self.verbose_feature_names_out:
return output
transformer_names = [t[0] for t in self._iter(fitted=True, replace_strings=True)]
feature_names_outs = [X.columns for X in Xs if X.shape[1] != 0]
if self.verbose_feature_names_out:
names = list(chain.from_iterable(((f'{name}__{i}' for i in feature_names_out) for (name, feature_names_out) in list(zip(transformer_names, feature_names_outs)))))
names_out = np.asarray(names, dtype=object)
feature_names_count = Counter(chain.from_iterable((s for (_, s) in list(zip(transformer_names, feature_names_outs)))))
top_6_overlap = [name for (name, count) in feature_names_count.most_common(6) if count > 1]
top_6_overlap.sort()
if top_6_overlap:
if len(top_6_overlap) == 6:
names_repr = str(top_6_overlap[:5])[:-1] + ', ...]'
else:
names_repr = str(top_6_overlap)
raise ValueError(f'Output feature names: {names_repr} are not unique. Please set verbose_feature_names_out=True to add prefixes to feature names')
names_out = np.concatenate([name for (_, name) in list(zip(transformer_names, feature_names_outs))])
output.columns = names_out
return output
return np.hstack(Xs)
|
def _hstack(self, Xs):
"""Stacks Xs horizontally.
This allows subclasses to control the stacking behavior, while reusing
everything else from ColumnTransformer.
Parameters
----------
Xs : list of {array-like, sparse matrix, dataframe}
"""
if self.sparse_output_:
try:
converted_Xs = [check_array(X, accept_sparse=True, force_all_finite=False) for X in Xs]
except ValueError as e:
raise ValueError('For a sparse output, all columns should be a numeric or convertible to a numeric.') from e
return sparse.hstack(converted_Xs).tocsr()
else:
Xs = [f.toarray() if sparse.issparse(f) else f for f in Xs]
config = _get_output_config('transform', self)
if config['dense'] == 'pandas' and all((hasattr(X, 'iloc') for X in Xs)):
pd = check_pandas_support('transform')
output = pd.concat(Xs, axis=1)
if not self.verbose_feature_names_out:
return output
transformer_names = [t[0] for t in self._iter(fitted=True, replace_strings=True)]
feature_names_outs = [X.columns for X in Xs if X.shape[1] != 0]
<DeepExtract>
if self.verbose_feature_names_out:
names = list(chain.from_iterable(((f'{name}__{i}' for i in feature_names_out) for (name, feature_names_out) in list(zip(transformer_names, feature_names_outs)))))
names_out = np.asarray(names, dtype=object)
feature_names_count = Counter(chain.from_iterable((s for (_, s) in list(zip(transformer_names, feature_names_outs)))))
top_6_overlap = [name for (name, count) in feature_names_count.most_common(6) if count > 1]
top_6_overlap.sort()
if top_6_overlap:
if len(top_6_overlap) == 6:
names_repr = str(top_6_overlap[:5])[:-1] + ', ...]'
else:
names_repr = str(top_6_overlap)
raise ValueError(f'Output feature names: {names_repr} are not unique. Please set verbose_feature_names_out=True to add prefixes to feature names')
names_out = np.concatenate([name for (_, name) in list(zip(transformer_names, feature_names_outs))])
</DeepExtract>
output.columns = names_out
return output
return np.hstack(Xs)
|
def _retry_with_clean_cache(openml_path: str, data_home: Optional[str], no_retry_exception: Optional[Exception]=None) -> Callable:
"""If the first call to the decorated function fails, the local cached
file is removed, and the function is called again. If ``data_home`` is
``None``, then the function is called once. We can provide a specific
exception to not retry on usign `no_retry_exception` parameter.
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
if data_home is None:
return f(*args, **kw)
try:
return f(*args, **kw)
except URLError:
raise
except Exception as exc:
if no_retry_exception is not None and isinstance(exc, no_retry_exception):
raise
warn('Invalid cache, redownloading file', RuntimeWarning)
local_path = os.path.join(data_home, 'openml.org', openml_path + '.gz')
if os.path.exists(local_path):
os.unlink(local_path)
return f(*args, **kw)
return wrapper
return decorator
|
def _retry_with_clean_cache(openml_path: str, data_home: Optional[str], no_retry_exception: Optional[Exception]=None) -> Callable:
"""If the first call to the decorated function fails, the local cached
file is removed, and the function is called again. If ``data_home`` is
``None``, then the function is called once. We can provide a specific
exception to not retry on usign `no_retry_exception` parameter.
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
if data_home is None:
return f(*args, **kw)
try:
return f(*args, **kw)
except URLError:
raise
except Exception as exc:
if no_retry_exception is not None and isinstance(exc, no_retry_exception):
raise
warn('Invalid cache, redownloading file', RuntimeWarning)
<DeepExtract>
local_path = os.path.join(data_home, 'openml.org', openml_path + '.gz')
</DeepExtract>
if os.path.exists(local_path):
os.unlink(local_path)
return f(*args, **kw)
return wrapper
return decorator
|
def test_sanity_check_pls_canonical():
d = load_linnerud()
X = d.data
Y = d.target
pls = PLSCanonical(n_components=X.shape[1])
pls.fit(X, Y)
expected_x_weights = np.array([[-0.61330704, 0.25616119, -0.74715187], [-0.74697144, 0.11930791, 0.65406368], [-0.25668686, -0.95924297, -0.11817271]])
expected_x_rotations = np.array([[-0.61330704, 0.41591889, -0.62297525], [-0.74697144, 0.31388326, 0.77368233], [-0.25668686, -0.89237972, -0.24121788]])
expected_y_weights = np.array([[+0.58989127, 0.7890047, 0.1717553], [+0.77134053, -0.61351791, 0.16920272], [-0.2388767, -0.03267062, 0.97050016]])
expected_y_rotations = np.array([[+0.58989127, 0.7168115, 0.30665872], [+0.77134053, -0.70791757, 0.19786539], [-0.2388767, -0.00343595, 0.94162826]])
assert_array_almost_equal(np.abs(pls.x_rotations_), np.abs(expected_x_rotations))
assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))
assert_array_almost_equal(np.abs(pls.y_rotations_), np.abs(expected_y_rotations))
assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))
x_rotations_sign_flip = np.sign(pls.x_rotations_ / expected_x_rotations)
x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
y_rotations_sign_flip = np.sign(pls.y_rotations_ / expected_y_rotations)
y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)
assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)
K = np.dot(pls.x_weights_.T, pls.x_weights_)
assert_array_almost_equal(K, np.diag(np.diag(K)))
K = np.dot(pls.y_weights_.T, pls.y_weights_)
assert_array_almost_equal(K, np.diag(np.diag(K)))
K = np.dot(pls._x_scores.T, pls._x_scores)
assert_array_almost_equal(K, np.diag(np.diag(K)))
K = np.dot(pls._y_scores.T, pls._y_scores)
assert_array_almost_equal(K, np.diag(np.diag(K)))
</DeepExtract>
|
def test_sanity_check_pls_canonical():
d = load_linnerud()
X = d.data
Y = d.target
pls = PLSCanonical(n_components=X.shape[1])
pls.fit(X, Y)
expected_x_weights = np.array([[-0.61330704, 0.25616119, -0.74715187], [-0.74697144, 0.11930791, 0.65406368], [-0.25668686, -0.95924297, -0.11817271]])
expected_x_rotations = np.array([[-0.61330704, 0.41591889, -0.62297525], [-0.74697144, 0.31388326, 0.77368233], [-0.25668686, -0.89237972, -0.24121788]])
expected_y_weights = np.array([[+0.58989127, 0.7890047, 0.1717553], [+0.77134053, -0.61351791, 0.16920272], [-0.2388767, -0.03267062, 0.97050016]])
expected_y_rotations = np.array([[+0.58989127, 0.7168115, 0.30665872], [+0.77134053, -0.70791757, 0.19786539], [-0.2388767, -0.00343595, 0.94162826]])
assert_array_almost_equal(np.abs(pls.x_rotations_), np.abs(expected_x_rotations))
assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))
assert_array_almost_equal(np.abs(pls.y_rotations_), np.abs(expected_y_rotations))
assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))
x_rotations_sign_flip = np.sign(pls.x_rotations_ / expected_x_rotations)
x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
y_rotations_sign_flip = np.sign(pls.y_rotations_ / expected_y_rotations)
y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)
assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)
<DeepExtract>
K = np.dot(pls.x_weights_.T, pls.x_weights_)
assert_array_almost_equal(K, np.diag(np.diag(K)))
</DeepExtract>
<DeepExtract>
K = np.dot(pls.y_weights_.T, pls.y_weights_)
assert_array_almost_equal(K, np.diag(np.diag(K)))
</DeepExtract>
<DeepExtract>
K = np.dot(pls._x_scores.T, pls._x_scores)
assert_array_almost_equal(K, np.diag(np.diag(K)))
</DeepExtract>
<DeepExtract>
K = np.dot(pls._y_scores.T, pls._y_scores)
assert_array_almost_equal(K, np.diag(np.diag(K)))
</DeepExtract>
|
@pytest.mark.parametrize('klass', [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_sgd_averaged_computed_correctly_oneclass(klass):
eta = 0.001
nu = 0.05
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
clf = klass(learning_rate='constant', eta0=eta, nu=nu, fit_intercept=True, max_iter=1, average=True, shuffle=False)
clf.fit(X)
if coef_init is None:
coef = np.zeros(X.shape[1])
else:
coef = coef_init
average_coef = np.zeros(X.shape[1])
offset = offset_init
intercept = 1 - offset
average_intercept = 0.0
decay = 1.0
if klass == SparseSGDOneClassSVM:
decay = 0.01
for (i, entry) in enumerate(X):
p = np.dot(entry, coef)
p += intercept
if p <= 1.0:
gradient = -1
else:
gradient = 0
coef *= max(0, 1.0 - eta * nu / 2)
coef += -(eta * gradient * entry)
intercept += -(eta * (nu + gradient)) * decay
average_coef *= i
average_coef += coef
average_coef /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
(average_coef, average_offset) = (average_coef, 1 - average_intercept)
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
|
@pytest.mark.parametrize('klass', [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_sgd_averaged_computed_correctly_oneclass(klass):
eta = 0.001
nu = 0.05
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
clf = klass(learning_rate='constant', eta0=eta, nu=nu, fit_intercept=True, max_iter=1, average=True, shuffle=False)
clf.fit(X)
<DeepExtract>
if coef_init is None:
coef = np.zeros(X.shape[1])
else:
coef = coef_init
average_coef = np.zeros(X.shape[1])
offset = offset_init
intercept = 1 - offset
average_intercept = 0.0
decay = 1.0
if klass == SparseSGDOneClassSVM:
decay = 0.01
for (i, entry) in enumerate(X):
p = np.dot(entry, coef)
p += intercept
if p <= 1.0:
gradient = -1
else:
gradient = 0
coef *= max(0, 1.0 - eta * nu / 2)
coef += -(eta * gradient * entry)
intercept += -(eta * (nu + gradient)) * decay
average_coef *= i
average_coef += coef
average_coef /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
(average_coef, average_offset) = (average_coef, 1 - average_intercept)
</DeepExtract>
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
|
def threadpool_info():
if not hasattr(threadpoolctl, 'ThreadpoolController'):
controller = None
if not hasattr(sklearn, '_sklearn_threadpool_controller'):
sklearn._sklearn_threadpool_controller = threadpoolctl.ThreadpoolController()
controller = sklearn._sklearn_threadpool_controller
if controller is not None:
return controller.info()
else:
return threadpoolctl.threadpool_info()
|
def threadpool_info():
<DeepExtract>
if not hasattr(threadpoolctl, 'ThreadpoolController'):
controller = None
if not hasattr(sklearn, '_sklearn_threadpool_controller'):
sklearn._sklearn_threadpool_controller = threadpoolctl.ThreadpoolController()
controller = sklearn._sklearn_threadpool_controller
</DeepExtract>
if controller is not None:
return controller.info()
else:
return threadpoolctl.threadpool_info()
|
def validation_curve(estimator, X, y, *, param_name, param_range, groups=None, cv=None, scoring=None, n_jobs=None, pre_dispatch='all', verbose=0, error_score=np.nan, fit_params=None):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : str
Name of the parameter that will be varied.
param_range : array-like of shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
scoring : str or callable, default=None
A str (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the combinations of each parameter
value and each cross-validation split.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. versionadded:: 0.24
Returns
-------
train_scores : array of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array of shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`
"""
(X, y, groups) = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose)
results = parallel((delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, parameters={param_name: v}, fit_params=fit_params, return_train_score=True, error_score=error_score) for (train, test) in cv.split(X, y, groups) for v in param_range))
n_params = len(param_range)
results = {key: np.asarray([score[key] for score in results]) if isinstance(results[0][key], numbers.Number) else [score[key] for score in results] for key in results[0]}
train_scores = results['train_scores'].reshape(-1, n_params).T
test_scores = results['test_scores'].reshape(-1, n_params).T
return (train_scores, test_scores)
|
def validation_curve(estimator, X, y, *, param_name, param_range, groups=None, cv=None, scoring=None, n_jobs=None, pre_dispatch='all', verbose=0, error_score=np.nan, fit_params=None):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : str
Name of the parameter that will be varied.
param_range : array-like of shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
scoring : str or callable, default=None
A str (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the combinations of each parameter
value and each cross-validation split.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. versionadded:: 0.24
Returns
-------
train_scores : array of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array of shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`
"""
(X, y, groups) = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose)
results = parallel((delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, parameters={param_name: v}, fit_params=fit_params, return_train_score=True, error_score=error_score) for (train, test) in cv.split(X, y, groups) for v in param_range))
n_params = len(param_range)
<DeepExtract>
results = {key: np.asarray([score[key] for score in results]) if isinstance(results[0][key], numbers.Number) else [score[key] for score in results] for key in results[0]}
</DeepExtract>
train_scores = results['train_scores'].reshape(-1, n_params).T
test_scores = results['test_scores'].reshape(-1, n_params).T
return (train_scores, test_scores)
|
def test_sparse_input_dtype_enet_and_lassocv():
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(10, n_targets)
else:
w = random_state.randn(10)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, 10)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, 10)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
|
def test_sparse_input_dtype_enet_and_lassocv():
<DeepExtract>
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(10, n_targets)
else:
w = random_state.randn(10)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, 10)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, 10)
y_test = np.dot(X_test, w)
(X, y, _, _) = (X, y, X_test, y_test)
</DeepExtract>
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
|
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]['X']
X_sparse = DATASETS[dataset]['X_sparse']
y = DATASETS[dataset]['y']
if dataset in ['digits', 'diabetes']:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert s.tree_.node_count == d.tree_.node_count, '{0}: inequal number of node ({1} != {2})'.format('{0} with dense and sparse format gave different trees'.format(tree), s.tree_.node_count, d.tree_.node_count)
assert_array_equal(d.tree_.children_right, s.tree_.children_right, '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal children_right')
assert_array_equal(d.tree_.children_left, s.tree_.children_left, '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal children_left')
external = d.tree_.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.tree_.feature[internal], s.tree_.feature[internal], '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal features')
assert_array_equal(d.tree_.threshold[internal], s.tree_.threshold[internal], '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal threshold')
assert_array_equal(d.tree_.n_node_samples.sum(), s.tree_.n_node_samples.sum(), '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal sum(n_node_samples)')
assert_array_equal(d.tree_.n_node_samples, s.tree_.n_node_samples, '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal n_node_samples')
assert_almost_equal(d.tree_.impurity, s.tree_.impurity, err_msg='{0} with dense and sparse format gave different trees'.format(tree) + ': inequal impurity')
assert_array_almost_equal(d.tree_.value[external], s.tree_.value[external], err_msg='{0} with dense and sparse format gave different trees'.format(tree) + ': inequal value')
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test), y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test), y_log_proba)
|
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]['X']
X_sparse = DATASETS[dataset]['X_sparse']
y = DATASETS[dataset]['y']
if dataset in ['digits', 'diabetes']:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
<DeepExtract>
assert s.tree_.node_count == d.tree_.node_count, '{0}: inequal number of node ({1} != {2})'.format('{0} with dense and sparse format gave different trees'.format(tree), s.tree_.node_count, d.tree_.node_count)
assert_array_equal(d.tree_.children_right, s.tree_.children_right, '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal children_right')
assert_array_equal(d.tree_.children_left, s.tree_.children_left, '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal children_left')
external = d.tree_.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.tree_.feature[internal], s.tree_.feature[internal], '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal features')
assert_array_equal(d.tree_.threshold[internal], s.tree_.threshold[internal], '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal threshold')
assert_array_equal(d.tree_.n_node_samples.sum(), s.tree_.n_node_samples.sum(), '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal sum(n_node_samples)')
assert_array_equal(d.tree_.n_node_samples, s.tree_.n_node_samples, '{0} with dense and sparse format gave different trees'.format(tree) + ': inequal n_node_samples')
assert_almost_equal(d.tree_.impurity, s.tree_.impurity, err_msg='{0} with dense and sparse format gave different trees'.format(tree) + ': inequal impurity')
assert_array_almost_equal(d.tree_.value[external], s.tree_.value[external], err_msg='{0} with dense and sparse format gave different trees'.format(tree) + ': inequal value')
</DeepExtract>
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test), y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test), y_log_proba)
|
@pytest.mark.parametrize('base_loss', LOSSES)
@pytest.mark.parametrize('fit_intercept', [False, True])
@pytest.mark.parametrize('sample_weight', [None, 'range'])
@pytest.mark.parametrize('l2_reg_strength', [0, 1])
def test_loss_grad_hess_are_the_same(base_loss, fit_intercept, sample_weight, l2_reg_strength):
"""Test that loss and gradient are the same across different functions."""
loss = LinearModelLoss(base_loss=base_loss(), fit_intercept=fit_intercept)
rng = np.random.RandomState(42)
n_dof = 5 + loss.fit_intercept
X = make_low_rank_matrix(n_samples=10, n_features=5, random_state=rng)
coef = loss.init_zero_coef(X)
if loss.base_loss.is_multiclass:
n_classes = loss.base_loss.n_classes
coef.flat[:] = rng.uniform(low=coef_bound[0], high=coef_bound[1], size=n_classes * n_dof)
if loss.fit_intercept:
raw_prediction = X @ coef[:, :-1].T + coef[:, -1]
else:
raw_prediction = X @ coef.T
proba = loss.base_loss.link.inverse(raw_prediction)
def choice_vectorized(items, p):
s = p.cumsum(axis=1)
r = rng.rand(p.shape[0])[:, None]
k = (s < r).sum(axis=1)
(X, y, coef) = items[k]
y = choice_vectorized(np.arange(n_classes), p=proba).astype(np.float64)
else:
coef.flat[:] = rng.uniform(low=coef_bound[0], high=coef_bound[1], size=n_dof)
if loss.fit_intercept:
raw_prediction = X @ coef[:-1] + coef[-1]
else:
raw_prediction = X @ coef
y = loss.base_loss.link.inverse(raw_prediction + rng.uniform(low=-1, high=1, size=10))
(X, y, coef) = (X, y, coef)
if sample_weight == 'range':
sample_weight = np.linspace(1, y.shape[0], num=y.shape[0])
l1 = loss.loss(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
g1 = loss.gradient(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
(l2, g2) = loss.loss_gradient(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
(g3, h3) = loss.gradient_hessian_product(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
if not base_loss.is_multiclass:
(g4, h4, _) = loss.gradient_hessian(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
else:
with pytest.raises(NotImplementedError):
loss.gradient_hessian(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
assert_allclose(l1, l2)
assert_allclose(g1, g2)
assert_allclose(g1, g3)
if not base_loss.is_multiclass:
assert_allclose(g1, g4)
assert_allclose(h4 @ g4, h3(g3))
X = sparse.csr_matrix(X)
l1_sp = loss.loss(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
g1_sp = loss.gradient(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
(l2_sp, g2_sp) = loss.loss_gradient(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
(g3_sp, h3_sp) = loss.gradient_hessian_product(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
if not base_loss.is_multiclass:
(g4_sp, h4_sp, _) = loss.gradient_hessian(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
assert_allclose(l1, l1_sp)
assert_allclose(l1, l2_sp)
assert_allclose(g1, g1_sp)
assert_allclose(g1, g2_sp)
assert_allclose(g1, g3_sp)
assert_allclose(h3(g1), h3_sp(g1_sp))
if not base_loss.is_multiclass:
assert_allclose(g1, g4_sp)
assert_allclose(h4 @ g4, h4_sp @ g1_sp)
|
@pytest.mark.parametrize('base_loss', LOSSES)
@pytest.mark.parametrize('fit_intercept', [False, True])
@pytest.mark.parametrize('sample_weight', [None, 'range'])
@pytest.mark.parametrize('l2_reg_strength', [0, 1])
def test_loss_grad_hess_are_the_same(base_loss, fit_intercept, sample_weight, l2_reg_strength):
"""Test that loss and gradient are the same across different functions."""
loss = LinearModelLoss(base_loss=base_loss(), fit_intercept=fit_intercept)
<DeepExtract>
rng = np.random.RandomState(42)
n_dof = 5 + loss.fit_intercept
X = make_low_rank_matrix(n_samples=10, n_features=5, random_state=rng)
coef = loss.init_zero_coef(X)
if loss.base_loss.is_multiclass:
n_classes = loss.base_loss.n_classes
coef.flat[:] = rng.uniform(low=coef_bound[0], high=coef_bound[1], size=n_classes * n_dof)
if loss.fit_intercept:
raw_prediction = X @ coef[:, :-1].T + coef[:, -1]
else:
raw_prediction = X @ coef.T
proba = loss.base_loss.link.inverse(raw_prediction)
def choice_vectorized(items, p):
s = p.cumsum(axis=1)
r = rng.rand(p.shape[0])[:, None]
k = (s < r).sum(axis=1)
(X, y, coef) = items[k]
y = choice_vectorized(np.arange(n_classes), p=proba).astype(np.float64)
else:
coef.flat[:] = rng.uniform(low=coef_bound[0], high=coef_bound[1], size=n_dof)
if loss.fit_intercept:
raw_prediction = X @ coef[:-1] + coef[-1]
else:
raw_prediction = X @ coef
y = loss.base_loss.link.inverse(raw_prediction + rng.uniform(low=-1, high=1, size=10))
(X, y, coef) = (X, y, coef)
</DeepExtract>
if sample_weight == 'range':
sample_weight = np.linspace(1, y.shape[0], num=y.shape[0])
l1 = loss.loss(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
g1 = loss.gradient(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
(l2, g2) = loss.loss_gradient(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
(g3, h3) = loss.gradient_hessian_product(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
if not base_loss.is_multiclass:
(g4, h4, _) = loss.gradient_hessian(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
else:
with pytest.raises(NotImplementedError):
loss.gradient_hessian(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
assert_allclose(l1, l2)
assert_allclose(g1, g2)
assert_allclose(g1, g3)
if not base_loss.is_multiclass:
assert_allclose(g1, g4)
assert_allclose(h4 @ g4, h3(g3))
X = sparse.csr_matrix(X)
l1_sp = loss.loss(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
g1_sp = loss.gradient(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
(l2_sp, g2_sp) = loss.loss_gradient(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
(g3_sp, h3_sp) = loss.gradient_hessian_product(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
if not base_loss.is_multiclass:
(g4_sp, h4_sp, _) = loss.gradient_hessian(coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength)
assert_allclose(l1, l1_sp)
assert_allclose(l1, l2_sp)
assert_allclose(g1, g1_sp)
assert_allclose(g1, g2_sp)
assert_allclose(g1, g3_sp)
assert_allclose(h3(g1), h3_sp(g1_sp))
if not base_loss.is_multiclass:
assert_allclose(g1, g4_sp)
assert_allclose(h4 @ g4, h4_sp @ g1_sp)
|
@fails_if_pypy
def test_convert_arff_data_dataframe_warning_low_memory_pandas(monkeypatch):
"""Check that we raise a warning regarding the working memory when using
LIAC-ARFF parser."""
pytest.importorskip('pandas')
data_id = 1119
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
msg = 'Could not adhere to working_memory config.'
with pytest.warns(UserWarning, match=msg):
with config_context(working_memory=1e-06):
fetch_openml(data_id=data_id, as_frame=True, cache=False, parser='liac-arff')
|
@fails_if_pypy
def test_convert_arff_data_dataframe_warning_low_memory_pandas(monkeypatch):
"""Check that we raise a warning regarding the working memory when using
LIAC-ARFF parser."""
pytest.importorskip('pandas')
data_id = 1119
<DeepExtract>
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
</DeepExtract>
msg = 'Could not adhere to working_memory config.'
with pytest.warns(UserWarning, match=msg):
with config_context(working_memory=1e-06):
fetch_openml(data_id=data_id, as_frame=True, cache=False, parser='liac-arff')
|
def test_assert_argkmin_results_quasi_equality():
rtol = 1e-07
eps = 1e-07
_1m = 1.0 - eps
_1p = 1.0 + eps
_6_1m = 6.1 - eps
_6_1p = 6.1 + eps
ref_dist = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p], [_1m, _1m, 1, _1p, _1p]])
ref_indices = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert ref_dist.shape == ref_dist.shape == ref_indices.shape == ref_indices.shape, 'Arrays of results have various shapes.'
(n_queries, n_neighbors) = ref_dist.shape
for query_idx in range(n_queries):
ref_dist_row = ref_dist[query_idx]
dist_row = ref_dist[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = ref_indices[query_idx]
indices_row = ref_indices[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(n_neighbors):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape == np.array([[1.2, 2.5, 6.1, 6.1, 6.1]]).shape == np.array([[1, 2, 3, 4, 5]]).shape == np.array([[1, 2, 4, 5, 3]]).shape, 'Arrays of results have various shapes.'
(n_queries, n_neighbors) = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape
for query_idx in range(n_queries):
ref_dist_row = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]])[query_idx]
dist_row = np.array([[1.2, 2.5, 6.1, 6.1, 6.1]])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([[1, 2, 3, 4, 5]])[query_idx]
indices_row = np.array([[1, 2, 4, 5, 3]])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(n_neighbors):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert np.array([[_1m, _1m, 1, _1p, _1p]]).shape == np.array([[_1m, _1m, 1, _1p, _1p]]).shape == np.array([[6, 7, 8, 9, 10]]).shape == np.array([[6, 9, 7, 8, 10]]).shape, 'Arrays of results have various shapes.'
(n_queries, n_neighbors) = np.array([[_1m, _1m, 1, _1p, _1p]]).shape
for query_idx in range(n_queries):
ref_dist_row = np.array([[_1m, _1m, 1, _1p, _1p]])[query_idx]
dist_row = np.array([[_1m, _1m, 1, _1p, _1p]])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([[6, 7, 8, 9, 10]])[query_idx]
indices_row = np.array([[6, 9, 7, 8, 10]])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(n_neighbors):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
msg = 'Neighbors indices for query 0 are not matching'
with pytest.raises(AssertionError, match=msg):
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape == np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape == np.array([[1, 2, 3, 4, 5]]).shape == np.array([[2, 1, 3, 4, 5]]).shape, 'Arrays of results have various shapes.'
(n_queries, n_neighbors) = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape
for query_idx in range(n_queries):
ref_dist_row = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]])[query_idx]
dist_row = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([[1, 2, 3, 4, 5]])[query_idx]
indices_row = np.array([[2, 1, 3, 4, 5]])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(n_neighbors):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
msg = 'Neighbors indices for query 0 are not matching'
with pytest.raises(AssertionError, match=msg):
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape == np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape == np.array([[1, 2, 3, 4, 5]]).shape == np.array([[2, 1, 4, 5, 3]]).shape, 'Arrays of results have various shapes.'
(n_queries, n_neighbors) = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape
for query_idx in range(n_queries):
ref_dist_row = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]])[query_idx]
dist_row = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([[1, 2, 3, 4, 5]])[query_idx]
indices_row = np.array([[2, 1, 4, 5, 3]])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(n_neighbors):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
msg = "Distances aren't sorted on row 0"
with pytest.raises(AssertionError, match=msg):
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape == np.array([[2.5, 1.2, _6_1m, 6.1, _6_1p]]).shape == np.array([[1, 2, 3, 4, 5]]).shape == np.array([[2, 1, 4, 5, 3]]).shape, 'Arrays of results have various shapes.'
(n_queries, n_neighbors) = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape
for query_idx in range(n_queries):
ref_dist_row = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]])[query_idx]
dist_row = np.array([[2.5, 1.2, _6_1m, 6.1, _6_1p]])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([[1, 2, 3, 4, 5]])[query_idx]
indices_row = np.array([[2, 1, 4, 5, 3]])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(n_neighbors):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
</DeepExtract>
|
def test_assert_argkmin_results_quasi_equality():
rtol = 1e-07
eps = 1e-07
_1m = 1.0 - eps
_1p = 1.0 + eps
_6_1m = 6.1 - eps
_6_1p = 6.1 + eps
ref_dist = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p], [_1m, _1m, 1, _1p, _1p]])
ref_indices = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
<DeepExtract>
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert ref_dist.shape == ref_dist.shape == ref_indices.shape == ref_indices.shape, 'Arrays of results have various shapes.'
(n_queries, n_neighbors) = ref_dist.shape
for query_idx in range(n_queries):
ref_dist_row = ref_dist[query_idx]
dist_row = ref_dist[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = ref_indices[query_idx]
indices_row = ref_indices[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(n_neighbors):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
</DeepExtract>
<DeepExtract>
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape == np.array([[1.2, 2.5, 6.1, 6.1, 6.1]]).shape == np.array([[1, 2, 3, 4, 5]]).shape == np.array([[1, 2, 4, 5, 3]]).shape, 'Arrays of results have various shapes.'
(n_queries, n_neighbors) = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape
for query_idx in range(n_queries):
ref_dist_row = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]])[query_idx]
dist_row = np.array([[1.2, 2.5, 6.1, 6.1, 6.1]])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([[1, 2, 3, 4, 5]])[query_idx]
indices_row = np.array([[1, 2, 4, 5, 3]])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(n_neighbors):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
</DeepExtract>
<DeepExtract>
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert np.array([[_1m, _1m, 1, _1p, _1p]]).shape == np.array([[_1m, _1m, 1, _1p, _1p]]).shape == np.array([[6, 7, 8, 9, 10]]).shape == np.array([[6, 9, 7, 8, 10]]).shape, 'Arrays of results have various shapes.'
(n_queries, n_neighbors) = np.array([[_1m, _1m, 1, _1p, _1p]]).shape
for query_idx in range(n_queries):
ref_dist_row = np.array([[_1m, _1m, 1, _1p, _1p]])[query_idx]
dist_row = np.array([[_1m, _1m, 1, _1p, _1p]])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([[6, 7, 8, 9, 10]])[query_idx]
indices_row = np.array([[6, 9, 7, 8, 10]])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(n_neighbors):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
</DeepExtract>
msg = 'Neighbors indices for query 0 are not matching'
with pytest.raises(AssertionError, match=msg):
<DeepExtract>
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape == np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape == np.array([[1, 2, 3, 4, 5]]).shape == np.array([[2, 1, 3, 4, 5]]).shape, 'Arrays of results have various shapes.'
(n_queries, n_neighbors) = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape
for query_idx in range(n_queries):
ref_dist_row = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]])[query_idx]
dist_row = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([[1, 2, 3, 4, 5]])[query_idx]
indices_row = np.array([[2, 1, 3, 4, 5]])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(n_neighbors):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
</DeepExtract>
msg = 'Neighbors indices for query 0 are not matching'
with pytest.raises(AssertionError, match=msg):
<DeepExtract>
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape == np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape == np.array([[1, 2, 3, 4, 5]]).shape == np.array([[2, 1, 4, 5, 3]]).shape, 'Arrays of results have various shapes.'
(n_queries, n_neighbors) = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape
for query_idx in range(n_queries):
ref_dist_row = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]])[query_idx]
dist_row = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([[1, 2, 3, 4, 5]])[query_idx]
indices_row = np.array([[2, 1, 4, 5, 3]])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(n_neighbors):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
</DeepExtract>
msg = "Distances aren't sorted on row 0"
with pytest.raises(AssertionError, match=msg):
<DeepExtract>
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
n_significant_digits = -(int(floor(log10(abs(rtol)))) + 1)
assert np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape == np.array([[2.5, 1.2, _6_1m, 6.1, _6_1p]]).shape == np.array([[1, 2, 3, 4, 5]]).shape == np.array([[2, 1, 4, 5, 3]]).shape, 'Arrays of results have various shapes.'
(n_queries, n_neighbors) = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]).shape
for query_idx in range(n_queries):
ref_dist_row = np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]])[query_idx]
dist_row = np.array([[2.5, 1.2, _6_1m, 6.1, _6_1p]])[query_idx]
assert is_sorted(ref_dist_row), f"Reference distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row), f"Distances aren't sorted on row {query_idx}"
assert_allclose(ref_dist_row, dist_row, rtol=rtol)
ref_indices_row = np.array([[1, 2, 3, 4, 5]])[query_idx]
indices_row = np.array([[2, 1, 4, 5, 3]])[query_idx]
reference_neighbors_groups = defaultdict(set)
effective_neighbors_groups = defaultdict(set)
for neighbor_rank in range(n_neighbors):
rounded_dist = relative_rounding(ref_dist_row[neighbor_rank], n_significant_digits=n_significant_digits)
reference_neighbors_groups[rounded_dist].add(ref_indices_row[neighbor_rank])
effective_neighbors_groups[rounded_dist].add(indices_row[neighbor_rank])
msg = f'Neighbors indices for query {query_idx} are not matching when rounding distances at {n_significant_digits} significant digits derived from rtol={rtol:.1e}'
for rounded_distance in reference_neighbors_groups.keys():
assert reference_neighbors_groups[rounded_distance] == effective_neighbors_groups[rounded_distance], msg
</DeepExtract>
|
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
|
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
<DeepExtract>
</DeepExtract>
<DeepExtract>
</DeepExtract>
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
|
def fit(self, X, y=None, **fit_params):
"""Fit all transformers using X.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data, used to fit transformers.
y : array-like of shape (n_samples, n_outputs), default=None
Targets for supervised learning.
**fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
Returns
-------
self : object
FeatureUnion class instance.
"""
self.transformer_list = list(self.transformer_list)
self._validate_transformers()
self._validate_transformer_weights()
transformers = list(self._iter())
transformers = Parallel(n_jobs=self.n_jobs)((delayed(_fit_one)(transformer, X, y, weight, message_clsname='FeatureUnion', message=self._log_message(name, idx, len(transformers)), **fit_params) for (idx, (name, transformer, weight)) in enumerate(transformers, 1)))
if not transformers:
return self
transformers = iter(transformers)
self.transformer_list[:] = [(name, old if old == 'drop' else next(transformers)) for (name, old) in self.transformer_list]
return self
|
def fit(self, X, y=None, **fit_params):
"""Fit all transformers using X.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data, used to fit transformers.
y : array-like of shape (n_samples, n_outputs), default=None
Targets for supervised learning.
**fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
Returns
-------
self : object
FeatureUnion class instance.
"""
<DeepExtract>
self.transformer_list = list(self.transformer_list)
self._validate_transformers()
self._validate_transformer_weights()
transformers = list(self._iter())
transformers = Parallel(n_jobs=self.n_jobs)((delayed(_fit_one)(transformer, X, y, weight, message_clsname='FeatureUnion', message=self._log_message(name, idx, len(transformers)), **fit_params) for (idx, (name, transformer, weight)) in enumerate(transformers, 1)))
</DeepExtract>
if not transformers:
return self
<DeepExtract>
transformers = iter(transformers)
self.transformer_list[:] = [(name, old if old == 'drop' else next(transformers)) for (name, old) in self.transformer_list]
</DeepExtract>
return self
|
def test_grid_search_cv_results():
(X, y) = make_classification(n_samples=50, n_features=4, random_state=42)
n_splits = 3
n_grid_points = 6
params = [dict(kernel=['rbf'], C=[1, 10], gamma=[0.1, 1]), dict(kernel=['poly'], degree=[1, 2])]
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')
score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time')
n_candidates = n_grid_points
search = GridSearchCV(SVC(), cv=n_splits, param_grid=params, return_train_score=True)
search.fit(X, y)
cv_results = search.cv_results_
assert all(cv_results['rank_test_score'] >= 1)
assert (all(cv_results[k] >= 0) for k in score_keys if k != 'rank_test_score')
assert (all(cv_results[k] <= 1) for k in score_keys if 'time' not in k and k != 'rank_test_score')
cv_results = search.cv_results_
assert all((isinstance(cv_results[param], np.ma.MaskedArray) for param in param_keys))
assert all((cv_results[key].dtype == object for key in param_keys))
assert not any((isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys))
assert all((cv_results[key].dtype == np.float64 for key in score_keys if not key.startswith('rank')))
scorer_keys = search.scorer_.keys() if search.multimetric_ else ['score']
for key in scorer_keys:
assert cv_results['rank_test_%s' % key].dtype == np.int32
assert_array_equal(sorted(cv_results.keys()), sorted(param_keys + score_keys + ('params',)))
assert all((cv_results[key].shape == (n_candidates,) for key in param_keys + score_keys))
cv_results = search.cv_results_
n_candidates = len(search.cv_results_['params'])
assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and (not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear'))
assert all((not cv_results['param_C'].mask[i] and (not cv_results['param_gamma'].mask[i]) and cv_results['param_degree'].mask[i] for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf'))
|
def test_grid_search_cv_results():
(X, y) = make_classification(n_samples=50, n_features=4, random_state=42)
n_splits = 3
n_grid_points = 6
params = [dict(kernel=['rbf'], C=[1, 10], gamma=[0.1, 1]), dict(kernel=['poly'], degree=[1, 2])]
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')
score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time')
n_candidates = n_grid_points
search = GridSearchCV(SVC(), cv=n_splits, param_grid=params, return_train_score=True)
search.fit(X, y)
cv_results = search.cv_results_
assert all(cv_results['rank_test_score'] >= 1)
assert (all(cv_results[k] >= 0) for k in score_keys if k != 'rank_test_score')
assert (all(cv_results[k] <= 1) for k in score_keys if 'time' not in k and k != 'rank_test_score')
<DeepExtract>
cv_results = search.cv_results_
assert all((isinstance(cv_results[param], np.ma.MaskedArray) for param in param_keys))
assert all((cv_results[key].dtype == object for key in param_keys))
assert not any((isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys))
assert all((cv_results[key].dtype == np.float64 for key in score_keys if not key.startswith('rank')))
scorer_keys = search.scorer_.keys() if search.multimetric_ else ['score']
for key in scorer_keys:
assert cv_results['rank_test_%s' % key].dtype == np.int32
</DeepExtract>
<DeepExtract>
assert_array_equal(sorted(cv_results.keys()), sorted(param_keys + score_keys + ('params',)))
assert all((cv_results[key].shape == (n_candidates,) for key in param_keys + score_keys))
</DeepExtract>
cv_results = search.cv_results_
n_candidates = len(search.cv_results_['params'])
assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and (not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear'))
assert all((not cv_results['param_C'].mask[i] and (not cv_results['param_gamma'].mask[i]) and cv_results['param_degree'].mask[i] for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf'))
|
@pytest.mark.parametrize('params', [{'as_frame': True, 'parser': 'auto'}, {'as_frame': 'auto', 'parser': 'auto'}, {'as_frame': False, 'parser': 'pandas'}])
def test_fetch_openml_requires_pandas_error(monkeypatch, params):
"""Check that we raise the proper errors when we require pandas."""
data_id = 1119
try:
check_pandas_support('test_fetch_openml_requires_pandas')
except ImportError:
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
err_msg = 'requires pandas to be installed. Alternatively, explicitely'
with pytest.raises(ImportError, match=err_msg):
fetch_openml(data_id=data_id, **params)
else:
raise SkipTest('This test requires pandas to not be installed.')
|
@pytest.mark.parametrize('params', [{'as_frame': True, 'parser': 'auto'}, {'as_frame': 'auto', 'parser': 'auto'}, {'as_frame': False, 'parser': 'pandas'}])
def test_fetch_openml_requires_pandas_error(monkeypatch, params):
"""Check that we raise the proper errors when we require pandas."""
data_id = 1119
try:
check_pandas_support('test_fetch_openml_requires_pandas')
except ImportError:
<DeepExtract>
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
</DeepExtract>
err_msg = 'requires pandas to be installed. Alternatively, explicitely'
with pytest.raises(ImportError, match=err_msg):
fetch_openml(data_id=data_id, **params)
else:
raise SkipTest('This test requires pandas to not be installed.')
|
@ignore_warnings
def check_regressors_no_decision_function(name, regressor_orig):
rng = np.random.RandomState(0)
regressor = clone(regressor_orig)
X = rng.normal(size=(10, 4))
if '1darray' in _safe_tags(regressor_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(regressor_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(regressor_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if regressor_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(regressor_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(regressor_orig, key='pairwise'):
X = kernel(X, X)
X = X
if _safe_tags(regressor, key='requires_positive_y'):
X[:, 0] += 1 + abs(X[:, 0].min())
if _safe_tags(regressor, key='binary_only') and X[:, 0].size > 0:
X[:, 0] = np.where(X[:, 0] == X[:, 0].flat[0], X[:, 0], X[:, 0].flat[0] + 1)
if _safe_tags(regressor, key='multioutput_only'):
X[:, 0] = np.reshape(X[:, 0], (-1, 1))
X[:, 0] = X[:, 0]
regressor.fit(X, y)
funcs = ['decision_function', 'predict_proba', 'predict_log_proba']
for func_name in funcs:
assert not hasattr(regressor, func_name)
|
@ignore_warnings
def check_regressors_no_decision_function(name, regressor_orig):
rng = np.random.RandomState(0)
regressor = clone(regressor_orig)
X = rng.normal(size=(10, 4))
<DeepExtract>
if '1darray' in _safe_tags(regressor_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(regressor_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(regressor_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if regressor_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(regressor_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(regressor_orig, key='pairwise'):
X = kernel(X, X)
X = X
</DeepExtract>
<DeepExtract>
if _safe_tags(regressor, key='requires_positive_y'):
X[:, 0] += 1 + abs(X[:, 0].min())
if _safe_tags(regressor, key='binary_only') and X[:, 0].size > 0:
X[:, 0] = np.where(X[:, 0] == X[:, 0].flat[0], X[:, 0], X[:, 0].flat[0] + 1)
if _safe_tags(regressor, key='multioutput_only'):
X[:, 0] = np.reshape(X[:, 0], (-1, 1))
X[:, 0] = X[:, 0]
</DeepExtract>
regressor.fit(X, y)
funcs = ['decision_function', 'predict_proba', 'predict_log_proba']
for func_name in funcs:
assert not hasattr(regressor, func_name)
|
def _incremental_mean_and_var(X, last_mean, last_variance, last_sample_count, sample_weight=None):
"""Calculate mean update and a Youngs and Cramer variance update.
If sample_weight is given, the weighted mean and variance is computed.
Update a given mean and (possibly) variance according to new data given
in X. last_mean is always required to compute the new mean.
If last_variance is None, no variance is computed and None return for
updated_variance.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to use for variance update.
last_mean : array-like of shape (n_features,)
last_variance : array-like of shape (n_features,)
last_sample_count : array-like of shape (n_features,)
The number of samples encountered until now if sample_weight is None.
If sample_weight is not None, this is the sum of sample_weight
encountered.
sample_weight : array-like of shape (n_samples,) or None
Sample weights. If None, compute the unweighted mean/variance.
Returns
-------
updated_mean : ndarray of shape (n_features,)
updated_variance : ndarray of shape (n_features,)
None if last_variance was None.
updated_sample_count : ndarray of shape (n_features,)
Notes
-----
NaNs are ignored during the algorithm.
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
last_sum = last_mean * last_sample_count
X_nan_mask = np.isnan(X)
if np.any(X_nan_mask):
sum_op = np.nansum
else:
sum_op = np.sum
if sample_weight is not None:
if np.issubdtype(sample_weight.dtype, np.floating) and sample_weight.dtype.itemsize < 8:
result = np.matmul(sample_weight, *args, **kwargs, dtype=np.float64)
else:
result = np.matmul(sample_weight, *args, **kwargs)
new_sum = result
if np.issubdtype(sample_weight[:, None] * ~X_nan_mask.dtype, np.floating) and sample_weight[:, None] * ~X_nan_mask.dtype.itemsize < 8:
result = np.sum(sample_weight[:, None] * ~X_nan_mask, *args, **kwargs, dtype=np.float64)
else:
result = np.sum(sample_weight[:, None] * ~X_nan_mask, *args, **kwargs)
new_sample_count = result
else:
if np.issubdtype(X.dtype, np.floating) and X.dtype.itemsize < 8:
result = sum_op(X, *args, **kwargs, dtype=np.float64)
else:
result = sum_op(X, *args, **kwargs)
new_sum = result
n_samples = X.shape[0]
new_sample_count = n_samples - np.sum(X_nan_mask, axis=0)
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
T = new_sum / new_sample_count
temp = X - T
if sample_weight is not None:
if np.issubdtype(sample_weight.dtype, np.floating) and sample_weight.dtype.itemsize < 8:
result = np.matmul(sample_weight, *args, **kwargs, dtype=np.float64)
else:
result = np.matmul(sample_weight, *args, **kwargs)
correction = result
temp **= 2
if np.issubdtype(sample_weight.dtype, np.floating) and sample_weight.dtype.itemsize < 8:
result = np.matmul(sample_weight, *args, **kwargs, dtype=np.float64)
else:
result = np.matmul(sample_weight, *args, **kwargs)
new_unnormalized_variance = result
else:
if np.issubdtype(temp.dtype, np.floating) and temp.dtype.itemsize < 8:
result = sum_op(temp, *args, **kwargs, dtype=np.float64)
else:
result = sum_op(temp, *args, **kwargs)
correction = result
temp **= 2
if np.issubdtype(temp.dtype, np.floating) and temp.dtype.itemsize < 8:
result = sum_op(temp, *args, **kwargs, dtype=np.float64)
else:
result = sum_op(temp, *args, **kwargs)
new_unnormalized_variance = result
new_unnormalized_variance -= correction ** 2 / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
with np.errstate(divide='ignore', invalid='ignore'):
last_over_new_count = last_sample_count / new_sample_count
updated_unnormalized_variance = last_unnormalized_variance + new_unnormalized_variance + last_over_new_count / updated_sample_count * (last_sum / last_over_new_count - new_sum) ** 2
zeros = last_sample_count == 0
updated_unnormalized_variance[zeros] = new_unnormalized_variance[zeros]
updated_variance = updated_unnormalized_variance / updated_sample_count
return (updated_mean, updated_variance, updated_sample_count)
|
def _incremental_mean_and_var(X, last_mean, last_variance, last_sample_count, sample_weight=None):
"""Calculate mean update and a Youngs and Cramer variance update.
If sample_weight is given, the weighted mean and variance is computed.
Update a given mean and (possibly) variance according to new data given
in X. last_mean is always required to compute the new mean.
If last_variance is None, no variance is computed and None return for
updated_variance.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to use for variance update.
last_mean : array-like of shape (n_features,)
last_variance : array-like of shape (n_features,)
last_sample_count : array-like of shape (n_features,)
The number of samples encountered until now if sample_weight is None.
If sample_weight is not None, this is the sum of sample_weight
encountered.
sample_weight : array-like of shape (n_samples,) or None
Sample weights. If None, compute the unweighted mean/variance.
Returns
-------
updated_mean : ndarray of shape (n_features,)
updated_variance : ndarray of shape (n_features,)
None if last_variance was None.
updated_sample_count : ndarray of shape (n_features,)
Notes
-----
NaNs are ignored during the algorithm.
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
last_sum = last_mean * last_sample_count
X_nan_mask = np.isnan(X)
if np.any(X_nan_mask):
sum_op = np.nansum
else:
sum_op = np.sum
if sample_weight is not None:
<DeepExtract>
if np.issubdtype(sample_weight.dtype, np.floating) and sample_weight.dtype.itemsize < 8:
result = np.matmul(sample_weight, *args, **kwargs, dtype=np.float64)
else:
result = np.matmul(sample_weight, *args, **kwargs)
new_sum = result
</DeepExtract>
<DeepExtract>
if np.issubdtype(sample_weight[:, None] * ~X_nan_mask.dtype, np.floating) and sample_weight[:, None] * ~X_nan_mask.dtype.itemsize < 8:
result = np.sum(sample_weight[:, None] * ~X_nan_mask, *args, **kwargs, dtype=np.float64)
else:
result = np.sum(sample_weight[:, None] * ~X_nan_mask, *args, **kwargs)
new_sample_count = result
</DeepExtract>
else:
<DeepExtract>
if np.issubdtype(X.dtype, np.floating) and X.dtype.itemsize < 8:
result = sum_op(X, *args, **kwargs, dtype=np.float64)
else:
result = sum_op(X, *args, **kwargs)
new_sum = result
</DeepExtract>
n_samples = X.shape[0]
new_sample_count = n_samples - np.sum(X_nan_mask, axis=0)
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
T = new_sum / new_sample_count
temp = X - T
if sample_weight is not None:
<DeepExtract>
if np.issubdtype(sample_weight.dtype, np.floating) and sample_weight.dtype.itemsize < 8:
result = np.matmul(sample_weight, *args, **kwargs, dtype=np.float64)
else:
result = np.matmul(sample_weight, *args, **kwargs)
correction = result
</DeepExtract>
temp **= 2
<DeepExtract>
if np.issubdtype(sample_weight.dtype, np.floating) and sample_weight.dtype.itemsize < 8:
result = np.matmul(sample_weight, *args, **kwargs, dtype=np.float64)
else:
result = np.matmul(sample_weight, *args, **kwargs)
new_unnormalized_variance = result
</DeepExtract>
else:
<DeepExtract>
if np.issubdtype(temp.dtype, np.floating) and temp.dtype.itemsize < 8:
result = sum_op(temp, *args, **kwargs, dtype=np.float64)
else:
result = sum_op(temp, *args, **kwargs)
correction = result
</DeepExtract>
temp **= 2
<DeepExtract>
if np.issubdtype(temp.dtype, np.floating) and temp.dtype.itemsize < 8:
result = sum_op(temp, *args, **kwargs, dtype=np.float64)
else:
result = sum_op(temp, *args, **kwargs)
new_unnormalized_variance = result
</DeepExtract>
new_unnormalized_variance -= correction ** 2 / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
with np.errstate(divide='ignore', invalid='ignore'):
last_over_new_count = last_sample_count / new_sample_count
updated_unnormalized_variance = last_unnormalized_variance + new_unnormalized_variance + last_over_new_count / updated_sample_count * (last_sum / last_over_new_count - new_sum) ** 2
zeros = last_sample_count == 0
updated_unnormalized_variance[zeros] = new_unnormalized_variance[zeros]
updated_variance = updated_unnormalized_variance / updated_sample_count
return (updated_mean, updated_variance, updated_sample_count)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.