before
stringlengths 87
36.6k
| after
stringlengths 116
37.2k
|
|---|---|
def test_threshold(global_random_seed, global_dtype):
(X, y) = make_blobs(n_samples=80, centers=4, random_state=global_random_seed)
X = X.astype(global_dtype, copy=False)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
current_leaf = brc.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert 0.5 >= sc.radius
current_leaf = current_leaf.next_leaf_
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
current_leaf = brc.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert 5.0 >= sc.radius
current_leaf = current_leaf.next_leaf_
</DeepExtract>
|
def test_threshold(global_random_seed, global_dtype):
(X, y) = make_blobs(n_samples=80, centers=4, random_state=global_random_seed)
X = X.astype(global_dtype, copy=False)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
<DeepExtract>
current_leaf = brc.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert 0.5 >= sc.radius
current_leaf = current_leaf.next_leaf_
</DeepExtract>
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
<DeepExtract>
current_leaf = brc.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert 5.0 >= sc.radius
current_leaf = current_leaf.next_leaf_
</DeepExtract>
|
@fails_if_pypy
@pytest.mark.parametrize('data_id', [61, 1119, 40945])
def test_fetch_openml_consistency_parser(monkeypatch, data_id):
"""Check the consistency of the LIAC-ARFF and pandas parsers."""
pd = pytest.importorskip('pandas')
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
bunch_liac = fetch_openml(data_id=data_id, as_frame=True, cache=False, parser='liac-arff')
bunch_pandas = fetch_openml(data_id=data_id, as_frame=True, cache=False, parser='pandas')
(data_liac, data_pandas) = (bunch_liac.data, bunch_pandas.data)
def convert_numerical_dtypes(series):
pandas_series = data_pandas[series.name]
if pd.api.types.is_numeric_dtype(pandas_series):
return series.astype(pandas_series.dtype)
else:
return series
data_liac_with_fixed_dtypes = data_liac.apply(convert_numerical_dtypes)
pd.testing.assert_frame_equal(data_liac_with_fixed_dtypes, data_pandas)
(frame_liac, frame_pandas) = (bunch_liac.frame, bunch_pandas.frame)
pd.testing.assert_frame_equal(frame_pandas[bunch_pandas.feature_names], data_pandas)
def convert_numerical_and_categorical_dtypes(series):
pandas_series = frame_pandas[series.name]
if pd.api.types.is_numeric_dtype(pandas_series):
return series.astype(pandas_series.dtype)
elif pd.api.types.is_categorical_dtype(pandas_series):
return series.cat.rename_categories(pandas_series.cat.categories)
else:
return series
frame_liac_with_fixed_dtypes = frame_liac.apply(convert_numerical_and_categorical_dtypes)
pd.testing.assert_frame_equal(frame_liac_with_fixed_dtypes, frame_pandas)
|
@fails_if_pypy
@pytest.mark.parametrize('data_id', [61, 1119, 40945])
def test_fetch_openml_consistency_parser(monkeypatch, data_id):
"""Check the consistency of the LIAC-ARFF and pandas parsers."""
pd = pytest.importorskip('pandas')
<DeepExtract>
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
</DeepExtract>
bunch_liac = fetch_openml(data_id=data_id, as_frame=True, cache=False, parser='liac-arff')
bunch_pandas = fetch_openml(data_id=data_id, as_frame=True, cache=False, parser='pandas')
(data_liac, data_pandas) = (bunch_liac.data, bunch_pandas.data)
def convert_numerical_dtypes(series):
pandas_series = data_pandas[series.name]
if pd.api.types.is_numeric_dtype(pandas_series):
return series.astype(pandas_series.dtype)
else:
return series
data_liac_with_fixed_dtypes = data_liac.apply(convert_numerical_dtypes)
pd.testing.assert_frame_equal(data_liac_with_fixed_dtypes, data_pandas)
(frame_liac, frame_pandas) = (bunch_liac.frame, bunch_pandas.frame)
pd.testing.assert_frame_equal(frame_pandas[bunch_pandas.feature_names], data_pandas)
def convert_numerical_and_categorical_dtypes(series):
pandas_series = frame_pandas[series.name]
if pd.api.types.is_numeric_dtype(pandas_series):
return series.astype(pandas_series.dtype)
elif pd.api.types.is_categorical_dtype(pandas_series):
return series.cat.rename_categories(pandas_series.cat.categories)
else:
return series
frame_liac_with_fixed_dtypes = frame_liac.apply(convert_numerical_and_categorical_dtypes)
pd.testing.assert_frame_equal(frame_liac_with_fixed_dtypes, frame_pandas)
|
def check_fit_check_is_fitted(name, estimator_orig):
rng = np.random.RandomState(42)
estimator = clone(estimator_orig)
set_random_state(estimator)
if 'warm_start' in estimator.get_params():
estimator.set_params(warm_start=False)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
if '1darray' in _safe_tags(estimator, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator, key='pairwise'):
X = kernel(X, X)
X = X
if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
if not _safe_tags(estimator).get('stateless', False):
try:
check_is_fitted(estimator)
raise AssertionError(f'{estimator.__class__.__name__} passes check_is_fitted before being fit!')
except NotFittedError:
pass
estimator.fit(X, y)
try:
check_is_fitted(estimator)
except NotFittedError as e:
raise NotFittedError('Estimator fails to pass `check_is_fitted` even though it has been fit.') from e
|
def check_fit_check_is_fitted(name, estimator_orig):
rng = np.random.RandomState(42)
estimator = clone(estimator_orig)
set_random_state(estimator)
if 'warm_start' in estimator.get_params():
estimator.set_params(warm_start=False)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
<DeepExtract>
if '1darray' in _safe_tags(estimator, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator, key='pairwise'):
X = kernel(X, X)
X = X
</DeepExtract>
if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
<DeepExtract>
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
if not _safe_tags(estimator).get('stateless', False):
try:
check_is_fitted(estimator)
raise AssertionError(f'{estimator.__class__.__name__} passes check_is_fitted before being fit!')
except NotFittedError:
pass
estimator.fit(X, y)
try:
check_is_fitted(estimator)
except NotFittedError as e:
raise NotFittedError('Estimator fails to pass `check_is_fitted` even though it has been fit.') from e
|
def fit_transform(X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
"""
self._validate_params()
X = self._validate_data(X, accept_sparse=('csr', 'csc'), dtype=[np.float64, np.float32])
with config_context(assume_finite=True):
check_non_negative(X, 'NMF (input X)')
self._check_params(X)
if X.min() == 0 and self._beta_loss <= 0:
raise ValueError('When beta_loss <= 0 and X contains zeros, the solver may diverge. Please add small values to X, or use a positive beta_loss.')
(W, H) = self._check_w_h(X, W, H, update_H)
(l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H) = self._compute_regularization(X)
if self.solver == 'cd':
(W, H, n_iter) = _fit_coordinate_descent(X, W, H, self.tol, self.max_iter, l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H, update_H=update_H, verbose=self.verbose, shuffle=self.shuffle, random_state=self.random_state)
elif self.solver == 'mu':
(W, H, n_iter, *_) = _fit_multiplicative_update(X, W, H, self._beta_loss, self.max_iter, self.tol, l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H, update_H, self.verbose)
else:
raise ValueError("Invalid solver parameter '%s'." % self.solver)
if n_iter == self.max_iter and self.tol > 0:
warnings.warn('Maximum number of iterations %d reached. Increase it to improve convergence.' % self.max_iter, ConvergenceWarning)
(W, H, n_iter) = (W, H, n_iter)
self._beta_loss = _beta_loss_to_float(self._beta_loss)
if not sp.issparse(X):
X = np.atleast_2d(X)
W = np.atleast_2d(W)
H = np.atleast_2d(H)
if self._beta_loss == 2:
if sp.issparse(X):
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H)
cross_prod = trace_dot(X * H.T, W)
res = (norm_X + norm_WH - 2.0 * cross_prod) / 2.0
else:
res = squared_norm(X - np.dot(W, H)) / 2.0
if True:
self.reconstruction_err_ = np.sqrt(res * 2)
else:
self.reconstruction_err_ = res
if sp.issparse(X):
WH_data = _special_sparse_dot(W, H, X).data
X_data = X.data
else:
WH = np.dot(W, H)
WH_data = WH.ravel()
X_data = X.ravel()
indices = X_data > EPSILON
WH_data = WH_data[indices]
X_data = X_data[indices]
WH_data[WH_data < EPSILON] = EPSILON
if self._beta_loss == 1:
sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
div = X_data / WH_data
res = np.dot(X_data, np.log(div))
res += sum_WH - X_data.sum()
elif self._beta_loss == 0:
div = X_data / WH_data
res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div))
else:
if sp.issparse(X):
sum_WH_beta = 0
for i in range(X.shape[1]):
sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** self._beta_loss)
else:
sum_WH_beta = np.sum(WH ** self._beta_loss)
sum_X_WH = np.dot(X_data, WH_data ** (self._beta_loss - 1))
res = (X_data ** self._beta_loss).sum() - self._beta_loss * sum_X_WH
res += sum_WH_beta * (self._beta_loss - 1)
res /= self._beta_loss * (self._beta_loss - 1)
if True:
res = max(res, 0)
self.reconstruction_err_ = np.sqrt(2 * res)
else:
self.reconstruction_err_ = res
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter
return W
|
def fit_transform(X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
"""
self._validate_params()
X = self._validate_data(X, accept_sparse=('csr', 'csc'), dtype=[np.float64, np.float32])
with config_context(assume_finite=True):
<DeepExtract>
check_non_negative(X, 'NMF (input X)')
self._check_params(X)
if X.min() == 0 and self._beta_loss <= 0:
raise ValueError('When beta_loss <= 0 and X contains zeros, the solver may diverge. Please add small values to X, or use a positive beta_loss.')
(W, H) = self._check_w_h(X, W, H, update_H)
(l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H) = self._compute_regularization(X)
if self.solver == 'cd':
(W, H, n_iter) = _fit_coordinate_descent(X, W, H, self.tol, self.max_iter, l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H, update_H=update_H, verbose=self.verbose, shuffle=self.shuffle, random_state=self.random_state)
elif self.solver == 'mu':
(W, H, n_iter, *_) = _fit_multiplicative_update(X, W, H, self._beta_loss, self.max_iter, self.tol, l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H, update_H, self.verbose)
else:
raise ValueError("Invalid solver parameter '%s'." % self.solver)
if n_iter == self.max_iter and self.tol > 0:
warnings.warn('Maximum number of iterations %d reached. Increase it to improve convergence.' % self.max_iter, ConvergenceWarning)
(W, H, n_iter) = (W, H, n_iter)
</DeepExtract>
<DeepExtract>
self._beta_loss = _beta_loss_to_float(self._beta_loss)
if not sp.issparse(X):
X = np.atleast_2d(X)
W = np.atleast_2d(W)
H = np.atleast_2d(H)
if self._beta_loss == 2:
if sp.issparse(X):
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H)
cross_prod = trace_dot(X * H.T, W)
res = (norm_X + norm_WH - 2.0 * cross_prod) / 2.0
else:
res = squared_norm(X - np.dot(W, H)) / 2.0
if True:
self.reconstruction_err_ = np.sqrt(res * 2)
else:
self.reconstruction_err_ = res
if sp.issparse(X):
WH_data = _special_sparse_dot(W, H, X).data
X_data = X.data
else:
WH = np.dot(W, H)
WH_data = WH.ravel()
X_data = X.ravel()
indices = X_data > EPSILON
WH_data = WH_data[indices]
X_data = X_data[indices]
WH_data[WH_data < EPSILON] = EPSILON
if self._beta_loss == 1:
sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
div = X_data / WH_data
res = np.dot(X_data, np.log(div))
res += sum_WH - X_data.sum()
elif self._beta_loss == 0:
div = X_data / WH_data
res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div))
else:
if sp.issparse(X):
sum_WH_beta = 0
for i in range(X.shape[1]):
sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** self._beta_loss)
else:
sum_WH_beta = np.sum(WH ** self._beta_loss)
sum_X_WH = np.dot(X_data, WH_data ** (self._beta_loss - 1))
res = (X_data ** self._beta_loss).sum() - self._beta_loss * sum_X_WH
res += sum_WH_beta * (self._beta_loss - 1)
res /= self._beta_loss * (self._beta_loss - 1)
if True:
res = max(res, 0)
self.reconstruction_err_ = np.sqrt(2 * res)
else:
self.reconstruction_err_ = res
</DeepExtract>
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter
return W
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
self.length_scale = np.squeeze(self.length_scale).astype(float)
if np.ndim(self.length_scale) > 1:
raise ValueError('length_scale cannot be of dimension greater than 1')
if np.ndim(self.length_scale) == 1 and X.shape[1] != self.length_scale.shape[0]:
raise ValueError('Anisotropic kernel must have the same number of dimensions as data (%d!=%d)' % (self.length_scale.shape[0], X.shape[1]))
self.length_scale = self.length_scale
if Y is None:
dists = pdist(X / length_scale, metric='sqeuclidean')
K = np.exp(-0.5 * dists)
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError('Gradient can only be evaluated when Y is None.')
dists = cdist(X / length_scale, Y / length_scale, metric='sqeuclidean')
K = np.exp(-0.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
return (K, np.empty((X.shape[0], X.shape[0], 0)))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = (K * squareform(dists))[:, :, np.newaxis]
return (K, K_gradient)
elif self.anisotropic:
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / length_scale ** 2
K_gradient *= K[..., np.newaxis]
return (K, K_gradient)
else:
return K
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
<DeepExtract>
self.length_scale = np.squeeze(self.length_scale).astype(float)
if np.ndim(self.length_scale) > 1:
raise ValueError('length_scale cannot be of dimension greater than 1')
if np.ndim(self.length_scale) == 1 and X.shape[1] != self.length_scale.shape[0]:
raise ValueError('Anisotropic kernel must have the same number of dimensions as data (%d!=%d)' % (self.length_scale.shape[0], X.shape[1]))
self.length_scale = self.length_scale
</DeepExtract>
if Y is None:
dists = pdist(X / length_scale, metric='sqeuclidean')
K = np.exp(-0.5 * dists)
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError('Gradient can only be evaluated when Y is None.')
dists = cdist(X / length_scale, Y / length_scale, metric='sqeuclidean')
K = np.exp(-0.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
return (K, np.empty((X.shape[0], X.shape[0], 0)))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = (K * squareform(dists))[:, :, np.newaxis]
return (K, K_gradient)
elif self.anisotropic:
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / length_scale ** 2
K_gradient *= K[..., np.newaxis]
return (K, K_gradient)
else:
return K
|
def fit(self, X, y=None, sample_weight=None):
"""Compute knot positions of splines.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default = None
Individual weights for each sample. Used to calculate quantiles if
`knots="quantile"`. For `knots="uniform"`, zero weighted
observations are ignored for finding the min and max of `X`.
Returns
-------
self : object
Fitted transformer.
"""
self._validate_params()
X = self._validate_data(X, reset=True, accept_sparse=False, ensure_min_samples=2, ensure_2d=True)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
(_, n_features) = X.shape
if isinstance(self.knots, str):
if self.knots == 'quantile':
percentiles = 100 * np.linspace(start=0, stop=1, num=self.n_knots, dtype=np.float64)
if sample_weight is None:
self.knots = np.percentile(X, percentiles, axis=0)
else:
self.knots = np.array([_weighted_percentile(X, sample_weight, percentile) for percentile in percentiles])
else:
mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0
x_min = np.amin(X[mask], axis=0)
x_max = np.amax(X[mask], axis=0)
self.knots = np.linspace(start=x_min, stop=x_max, num=self.n_knots, endpoint=True, dtype=np.float64)
base_knots = self.knots
else:
base_knots = check_array(self.knots, dtype=np.float64)
if base_knots.shape[0] < 2:
raise ValueError('Number of knots, knots.shape[0], must be >= 2.')
elif base_knots.shape[1] != n_features:
raise ValueError('knots.shape[1] == n_features is violated.')
elif not np.all(np.diff(base_knots, axis=0) > 0):
raise ValueError('knots must be sorted without duplicates.')
n_knots = base_knots.shape[0]
if self.extrapolation == 'periodic' and n_knots <= self.degree:
raise ValueError(f'Periodic splines require degree < n_knots. Got n_knots={n_knots} and degree={self.degree}.')
if self.extrapolation != 'periodic':
n_splines = n_knots + self.degree - 1
else:
n_splines = n_knots - 1
degree = self.degree
n_out = n_features * n_splines
if self.extrapolation == 'periodic':
period = base_knots[-1] - base_knots[0]
knots = np.r_[base_knots[-(degree + 1):-1] - period, base_knots, base_knots[1:degree + 1] + period]
else:
dist_min = base_knots[1] - base_knots[0]
dist_max = base_knots[-1] - base_knots[-2]
knots = np.r_[np.linspace(base_knots[0] - degree * dist_min, base_knots[0] - dist_min, num=degree), base_knots, np.linspace(base_knots[-1] + dist_max, base_knots[-1] + degree * dist_max, num=degree)]
coef = np.eye(n_splines, dtype=np.float64)
if self.extrapolation == 'periodic':
coef = np.concatenate((coef, coef[:degree, :]))
extrapolate = self.extrapolation in ['periodic', 'continue']
bsplines = [BSpline.construct_fast(knots[:, i], coef, self.degree, extrapolate=extrapolate) for i in range(n_features)]
self.bsplines_ = bsplines
self.n_features_out_ = n_out - n_features * (1 - self.include_bias)
return self
|
def fit(self, X, y=None, sample_weight=None):
"""Compute knot positions of splines.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default = None
Individual weights for each sample. Used to calculate quantiles if
`knots="quantile"`. For `knots="uniform"`, zero weighted
observations are ignored for finding the min and max of `X`.
Returns
-------
self : object
Fitted transformer.
"""
self._validate_params()
X = self._validate_data(X, reset=True, accept_sparse=False, ensure_min_samples=2, ensure_2d=True)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
(_, n_features) = X.shape
if isinstance(self.knots, str):
<DeepExtract>
if self.knots == 'quantile':
percentiles = 100 * np.linspace(start=0, stop=1, num=self.n_knots, dtype=np.float64)
if sample_weight is None:
self.knots = np.percentile(X, percentiles, axis=0)
else:
self.knots = np.array([_weighted_percentile(X, sample_weight, percentile) for percentile in percentiles])
else:
mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0
x_min = np.amin(X[mask], axis=0)
x_max = np.amax(X[mask], axis=0)
self.knots = np.linspace(start=x_min, stop=x_max, num=self.n_knots, endpoint=True, dtype=np.float64)
base_knots = self.knots
</DeepExtract>
else:
base_knots = check_array(self.knots, dtype=np.float64)
if base_knots.shape[0] < 2:
raise ValueError('Number of knots, knots.shape[0], must be >= 2.')
elif base_knots.shape[1] != n_features:
raise ValueError('knots.shape[1] == n_features is violated.')
elif not np.all(np.diff(base_knots, axis=0) > 0):
raise ValueError('knots must be sorted without duplicates.')
n_knots = base_knots.shape[0]
if self.extrapolation == 'periodic' and n_knots <= self.degree:
raise ValueError(f'Periodic splines require degree < n_knots. Got n_knots={n_knots} and degree={self.degree}.')
if self.extrapolation != 'periodic':
n_splines = n_knots + self.degree - 1
else:
n_splines = n_knots - 1
degree = self.degree
n_out = n_features * n_splines
if self.extrapolation == 'periodic':
period = base_knots[-1] - base_knots[0]
knots = np.r_[base_knots[-(degree + 1):-1] - period, base_knots, base_knots[1:degree + 1] + period]
else:
dist_min = base_knots[1] - base_knots[0]
dist_max = base_knots[-1] - base_knots[-2]
knots = np.r_[np.linspace(base_knots[0] - degree * dist_min, base_knots[0] - dist_min, num=degree), base_knots, np.linspace(base_knots[-1] + dist_max, base_knots[-1] + degree * dist_max, num=degree)]
coef = np.eye(n_splines, dtype=np.float64)
if self.extrapolation == 'periodic':
coef = np.concatenate((coef, coef[:degree, :]))
extrapolate = self.extrapolation in ['periodic', 'continue']
bsplines = [BSpline.construct_fast(knots[:, i], coef, self.degree, extrapolate=extrapolate) for i in range(n_features)]
self.bsplines_ = bsplines
self.n_features_out_ = n_out - n_features * (1 - self.include_bias)
return self
|
def _fit_binary(X, y, alpha, C, sample_weight, learning_rate, max_iter):
"""Fit a binary classifier on X and y."""
(y_i, coef, intercept, average_coef, average_intercept) = _prepare_fit_binary(self, y, 1, input_dtye=X.dtype)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
self.random_state = check_random_state(self.random_state)
(dataset, intercept_decay) = make_dataset(X, y_i, sample_weight, random_state=self.random_state)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if validation_mask is None:
validation_mask = self._make_validation_split(y_i, sample_mask=sample_weight > 0)
classes = np.array([-1, 1], dtype=y_i.dtype)
validation_score_cb = self._make_validation_score_cb(validation_mask, X, y_i, sample_weight, classes=classes)
seed = self.random_state.randint(MAX_INT)
tol = self.tol if self.tol is not None else -np.inf
_plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype)
(coef, intercept, average_coef, average_intercept, n_iter_) = _plain_sgd(coef, intercept, average_coef, average_intercept, self.loss_function_, penalty_type, alpha, C, self.l1_ratio, dataset, validation_mask, self.early_stopping, validation_score_cb, int(self.n_iter_no_change), max_iter, tol, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, self._expanded_class_weight[1], self._expanded_class_weight[0], learning_rate_type, self.eta0, self.power_t, 0, self.t_, intercept_decay, self.average)
if self.average:
if len(self.classes_) == 2:
self._average_intercept[0] = average_intercept
else:
self._average_intercept[1] = average_intercept
(coef, intercept, n_iter_) = (coef, intercept, n_iter_)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self._average_coef.reshape(1, -1)
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef.reshape(1, -1)
self._standard_intercept = np.atleast_1d(intercept)
self.intercept_ = self._standard_intercept
else:
self.coef_ = coef.reshape(1, -1)
self.intercept_ = np.atleast_1d(intercept)
|
def _fit_binary(X, y, alpha, C, sample_weight, learning_rate, max_iter):
"""Fit a binary classifier on X and y."""
<DeepExtract>
(y_i, coef, intercept, average_coef, average_intercept) = _prepare_fit_binary(self, y, 1, input_dtye=X.dtype)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
self.random_state = check_random_state(self.random_state)
(dataset, intercept_decay) = make_dataset(X, y_i, sample_weight, random_state=self.random_state)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if validation_mask is None:
validation_mask = self._make_validation_split(y_i, sample_mask=sample_weight > 0)
classes = np.array([-1, 1], dtype=y_i.dtype)
validation_score_cb = self._make_validation_score_cb(validation_mask, X, y_i, sample_weight, classes=classes)
seed = self.random_state.randint(MAX_INT)
tol = self.tol if self.tol is not None else -np.inf
_plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype)
(coef, intercept, average_coef, average_intercept, n_iter_) = _plain_sgd(coef, intercept, average_coef, average_intercept, self.loss_function_, penalty_type, alpha, C, self.l1_ratio, dataset, validation_mask, self.early_stopping, validation_score_cb, int(self.n_iter_no_change), max_iter, tol, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, self._expanded_class_weight[1], self._expanded_class_weight[0], learning_rate_type, self.eta0, self.power_t, 0, self.t_, intercept_decay, self.average)
if self.average:
if len(self.classes_) == 2:
self._average_intercept[0] = average_intercept
else:
self._average_intercept[1] = average_intercept
(coef, intercept, n_iter_) = (coef, intercept, n_iter_)
</DeepExtract>
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self._average_coef.reshape(1, -1)
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef.reshape(1, -1)
self._standard_intercept = np.atleast_1d(intercept)
self.intercept_ = self._standard_intercept
else:
self.coef_ = coef.reshape(1, -1)
self.intercept_ = np.atleast_1d(intercept)
|
@fails_if_pypy
@pytest.mark.filterwarnings('ignore:Version 1 of dataset Australian is inactive')
@pytest.mark.parametrize('data_id, data_type', [(61, 'dataframe'), (292, 'sparse')])
def test_fetch_openml_auto_mode(monkeypatch, data_id, data_type):
"""Check the auto mode of `fetch_openml`."""
pd = pytest.importorskip('pandas')
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
data = fetch_openml(data_id=data_id, as_frame='auto', parser='auto', cache=False)
klass = pd.DataFrame if data_type == 'dataframe' else scipy.sparse.csr_matrix
assert isinstance(data.data, klass)
|
@fails_if_pypy
@pytest.mark.filterwarnings('ignore:Version 1 of dataset Australian is inactive')
@pytest.mark.parametrize('data_id, data_type', [(61, 'dataframe'), (292, 'sparse')])
def test_fetch_openml_auto_mode(monkeypatch, data_id, data_type):
"""Check the auto mode of `fetch_openml`."""
pd = pytest.importorskip('pandas')
<DeepExtract>
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
</DeepExtract>
data = fetch_openml(data_id=data_id, as_frame='auto', parser='auto', cache=False)
klass = pd.DataFrame if data_type == 'dataframe' else scipy.sparse.csr_matrix
assert isinstance(data.data, klass)
|
@fails_if_pypy
@pytest.mark.parametrize('data_id', [61, 2, 561, 40589, 1119])
@pytest.mark.parametrize('parser', ['liac-arff', 'pandas'])
def test_fetch_openml_equivalence_frame_return_X_y(monkeypatch, data_id, parser):
"""Check the behaviour of `return_X_y=True` when `as_frame=True`."""
pd = pytest.importorskip('pandas')
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False, return_X_y=False, parser=parser)
(X, y) = fetch_openml(data_id=data_id, as_frame=True, cache=False, return_X_y=True, parser=parser)
pd.testing.assert_frame_equal(bunch.data, X)
if isinstance(y, pd.Series):
pd.testing.assert_series_equal(bunch.target, y)
else:
pd.testing.assert_frame_equal(bunch.target, y)
|
@fails_if_pypy
@pytest.mark.parametrize('data_id', [61, 2, 561, 40589, 1119])
@pytest.mark.parametrize('parser', ['liac-arff', 'pandas'])
def test_fetch_openml_equivalence_frame_return_X_y(monkeypatch, data_id, parser):
"""Check the behaviour of `return_X_y=True` when `as_frame=True`."""
pd = pytest.importorskip('pandas')
<DeepExtract>
url_prefix_data_description = 'https://openml.org/api/v1/json/data/'
url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/'
url_prefix_download_data = 'https://openml.org/data/v1/'
url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/'
path_suffix = '.gz'
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}'
def _file_name(url, suffix):
output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix
return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act')
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header and True:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json')
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json')
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff')
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, '.json')
with _open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, 'rb')
decoded_s = decompressed_f.read().decode('utf-8')
json_data = json.loads(decoded_s)
if 'error' in json_data:
raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None)
with _open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, 'rb')
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == 'gzip'
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
if test_offline:
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
</DeepExtract>
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False, return_X_y=False, parser=parser)
(X, y) = fetch_openml(data_id=data_id, as_frame=True, cache=False, return_X_y=True, parser=parser)
pd.testing.assert_frame_equal(bunch.data, X)
if isinstance(y, pd.Series):
pd.testing.assert_series_equal(bunch.target, y)
else:
pd.testing.assert_frame_equal(bunch.target, y)
|
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs such arrays if n_outputs > 1
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
proba = proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
proba = all_proba
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
|
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs such arrays if n_outputs > 1
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
<DeepExtract>
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
proba = proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
proba = all_proba
</DeepExtract>
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
|
def test_lda_score_perplexity():
n_components = 3
block = np.full((3, 3), n_components, dtype=int)
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
(n_components, X) = (n_components, X)
lda = LatentDirichletAllocation(n_components=n_components, max_iter=10, random_state=0)
lda.fit(X)
perplexity_1 = lda.perplexity(X, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1.0 * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
|
def test_lda_score_perplexity():
<DeepExtract>
n_components = 3
block = np.full((3, 3), n_components, dtype=int)
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
(n_components, X) = (n_components, X)
</DeepExtract>
lda = LatentDirichletAllocation(n_components=n_components, max_iter=10, random_state=0)
lda.fit(X)
perplexity_1 = lda.perplexity(X, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1.0 * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
|
def _init_centroids(self, X, x_squared_norms, init, random_state, init_size=None, n_centroids=None, sample_weight=None):
"""Compute the initial centroids.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point. Pass it if you have it
at hands already to avoid it being recomputed here.
init : {'k-means++', 'random'}, callable or ndarray of shape (n_clusters, n_features)
Method for initialization.
random_state : RandomState instance
Determines random number generation for centroid initialization.
See :term:`Glossary <random_state>`.
init_size : int, default=None
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy).
n_centroids : int, default=None
Number of centroids to initialize.
If left to 'None' the number of centroids will be equal to
number of clusters to form (self.n_clusters)
sample_weight : ndarray of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
"""
n_samples = X.shape[0]
n_clusters = self.n_clusters if n_centroids is None else n_centroids
if init_size is not None and init_size < n_samples:
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
sample_weight = sample_weight[init_indices]
if isinstance(init, str) and init == 'k-means++':
(n_samples, n_features) = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
if n_local_trials is None:
n_local_trials = 2 + int(np.log(n_clusters))
center_id = random_state.choice(n_samples, p=sample_weight / sample_weight.sum())
indices = np.full(n_clusters, -1, dtype=int)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
indices[0] = center_id
closest_dist_sq = _euclidean_distances(centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms, squared=True)
current_pot = closest_dist_sq @ sample_weight
for c in range(1, n_clusters):
rand_vals = random_state.uniform(size=n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(sample_weight * closest_dist_sq), rand_vals)
np.clip(candidate_ids, None, closest_dist_sq.size - 1, out=candidate_ids)
distance_to_candidates = _euclidean_distances(X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
np.minimum(closest_dist_sq, distance_to_candidates, out=distance_to_candidates)
candidates_pot = distance_to_candidates @ sample_weight.reshape(-1, 1)
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
indices[c] = best_candidate
(centers, _) = (centers, indices)
elif isinstance(init, str) and init == 'random':
seeds = random_state.choice(n_samples, size=n_clusters, replace=False, p=sample_weight / sample_weight.sum())
centers = X[seeds]
elif _is_arraylike_not_scalar(self.init):
centers = init
elif callable(init):
centers = init(X, n_clusters, random_state=random_state)
centers = check_array(centers, dtype=X.dtype, copy=False, order='C')
if centers.shape[0] != self.n_clusters:
raise ValueError(f'The shape of the initial centers {centers.shape} does not match the number of clusters {self.n_clusters}.')
if centers.shape[1] != X.shape[1]:
raise ValueError(f'The shape of the initial centers {centers.shape} does not match the number of features of the data {X.shape[1]}.')
if sp.issparse(centers):
centers = centers.toarray()
return centers
|
def _init_centroids(self, X, x_squared_norms, init, random_state, init_size=None, n_centroids=None, sample_weight=None):
"""Compute the initial centroids.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point. Pass it if you have it
at hands already to avoid it being recomputed here.
init : {'k-means++', 'random'}, callable or ndarray of shape (n_clusters, n_features)
Method for initialization.
random_state : RandomState instance
Determines random number generation for centroid initialization.
See :term:`Glossary <random_state>`.
init_size : int, default=None
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy).
n_centroids : int, default=None
Number of centroids to initialize.
If left to 'None' the number of centroids will be equal to
number of clusters to form (self.n_clusters)
sample_weight : ndarray of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
"""
n_samples = X.shape[0]
n_clusters = self.n_clusters if n_centroids is None else n_centroids
if init_size is not None and init_size < n_samples:
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
sample_weight = sample_weight[init_indices]
if isinstance(init, str) and init == 'k-means++':
<DeepExtract>
(n_samples, n_features) = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
if n_local_trials is None:
n_local_trials = 2 + int(np.log(n_clusters))
center_id = random_state.choice(n_samples, p=sample_weight / sample_weight.sum())
indices = np.full(n_clusters, -1, dtype=int)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
indices[0] = center_id
closest_dist_sq = _euclidean_distances(centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms, squared=True)
current_pot = closest_dist_sq @ sample_weight
for c in range(1, n_clusters):
rand_vals = random_state.uniform(size=n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(sample_weight * closest_dist_sq), rand_vals)
np.clip(candidate_ids, None, closest_dist_sq.size - 1, out=candidate_ids)
distance_to_candidates = _euclidean_distances(X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
np.minimum(closest_dist_sq, distance_to_candidates, out=distance_to_candidates)
candidates_pot = distance_to_candidates @ sample_weight.reshape(-1, 1)
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
indices[c] = best_candidate
(centers, _) = (centers, indices)
</DeepExtract>
elif isinstance(init, str) and init == 'random':
seeds = random_state.choice(n_samples, size=n_clusters, replace=False, p=sample_weight / sample_weight.sum())
centers = X[seeds]
elif _is_arraylike_not_scalar(self.init):
centers = init
elif callable(init):
centers = init(X, n_clusters, random_state=random_state)
centers = check_array(centers, dtype=X.dtype, copy=False, order='C')
<DeepExtract>
if centers.shape[0] != self.n_clusters:
raise ValueError(f'The shape of the initial centers {centers.shape} does not match the number of clusters {self.n_clusters}.')
if centers.shape[1] != X.shape[1]:
raise ValueError(f'The shape of the initial centers {centers.shape} does not match the number of features of the data {X.shape[1]}.')
</DeepExtract>
if sp.issparse(centers):
centers = centers.toarray()
return centers
|
def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamlined = True
self.strRepr = None
return self
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn(instring, loc)
(nextLoc, tokens) = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = preloc + 1
else:
if nextLoc > loc:
matches += 1
yield (tokens, preloc, nextLoc)
if overlap:
nextloc = preparseFn(instring, loc)
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc + 1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
|
def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
<DeepExtract>
self.streamlined = True
self.strRepr = None
return self
</DeepExtract>
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn(instring, loc)
(nextLoc, tokens) = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = preloc + 1
else:
if nextLoc > loc:
matches += 1
yield (tokens, preloc, nextLoc)
if overlap:
nextloc = preparseFn(instring, loc)
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc + 1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
|
def test_huber_max_iter():
rng = np.random.RandomState(0)
(X, y) = make_regression(n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05)
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
(X, y) = (X, y)
huber = HuberRegressor(max_iter=1)
huber.fit(X, y)
assert huber.n_iter_ == huber.max_iter
|
def test_huber_max_iter():
<DeepExtract>
rng = np.random.RandomState(0)
(X, y) = make_regression(n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05)
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
(X, y) = (X, y)
</DeepExtract>
huber = HuberRegressor(max_iter=1)
huber.fit(X, y)
assert huber.n_iter_ == huber.max_iter
|
@ignore_warnings(category=FutureWarning)
def check_fit2d_predict1d(name, estimator_orig):
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
if '1darray' in _safe_tags(estimator_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator_orig, key='pairwise'):
X = kernel(X, X)
X = X
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
if hasattr(estimator, 'n_components'):
estimator.n_components = 1
if hasattr(estimator, 'n_clusters'):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ['predict', 'transform', 'decision_function', 'predict_proba']:
if hasattr(estimator, method):
assert_raise_message(ValueError, 'Reshape your data', getattr(estimator, method), X[0])
|
@ignore_warnings(category=FutureWarning)
def check_fit2d_predict1d(name, estimator_orig):
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
<DeepExtract>
if '1darray' in _safe_tags(estimator_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(estimator_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(estimator_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(estimator_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(estimator_orig, key='pairwise'):
X = kernel(X, X)
X = X
</DeepExtract>
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
<DeepExtract>
if _safe_tags(estimator, key='requires_positive_y'):
y += 1 + abs(y.min())
if _safe_tags(estimator, key='binary_only') and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
if _safe_tags(estimator, key='multioutput_only'):
y = np.reshape(y, (-1, 1))
y = y
</DeepExtract>
if hasattr(estimator, 'n_components'):
estimator.n_components = 1
if hasattr(estimator, 'n_clusters'):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ['predict', 'transform', 'decision_function', 'predict_proba']:
if hasattr(estimator, method):
assert_raise_message(ValueError, 'Reshape your data', getattr(estimator, method), X[0])
|
def score(self, X, y, sample_weight=None):
"""Compute D^2, the percentage of deviance explained.
D^2 is a generalization of the coefficient of determination R^2.
R^2 uses squared error and D^2 uses the deviance of this GLM, see the
:ref:`User Guide <regression_metrics>`.
D^2 is defined as
:math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`,
:math:`D_{null}` is the null deviance, i.e. the deviance of a model
with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`.
The mean :math:`\\bar{y}` is averaged by sample_weight.
Best possible score is 1.0 and it can be negative (because the model
can be arbitrarily worse).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True values of target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
D^2 of self.predict(X) w.r.t. y.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=['csr', 'csc', 'coo'], dtype=[np.float64, np.float32], ensure_2d=True, allow_nd=False, reset=False)
raw_prediction = X @ self.coef_ + self.intercept_
y = check_array(y, dtype=raw_prediction.dtype, order='C', ensure_2d=False)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype)
base_loss = self._base_loss
if not base_loss.in_y_true_range(y):
raise ValueError(f'Some value(s) of y are out of the valid range of the loss {base_loss.__name__}.')
constant = np.mean(base_loss.constant_to_optimal_zero(y_true=y))
if sample_weight is not None:
constant *= sample_weight.shape[0] / np.sum(sample_weight)
deviance = base_loss(y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=1)
y_mean = base_loss.link.link(np.average(y, weights=sample_weight))
deviance_null = base_loss(y_true=y, raw_prediction=np.tile(y_mean, y.shape[0]), sample_weight=sample_weight, n_threads=1)
return 1 - (deviance + constant) / (deviance_null + constant)
|
def score(self, X, y, sample_weight=None):
"""Compute D^2, the percentage of deviance explained.
D^2 is a generalization of the coefficient of determination R^2.
R^2 uses squared error and D^2 uses the deviance of this GLM, see the
:ref:`User Guide <regression_metrics>`.
D^2 is defined as
:math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`,
:math:`D_{null}` is the null deviance, i.e. the deviance of a model
with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`.
The mean :math:`\\bar{y}` is averaged by sample_weight.
Best possible score is 1.0 and it can be negative (because the model
can be arbitrarily worse).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True values of target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
D^2 of self.predict(X) w.r.t. y.
"""
<DeepExtract>
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=['csr', 'csc', 'coo'], dtype=[np.float64, np.float32], ensure_2d=True, allow_nd=False, reset=False)
raw_prediction = X @ self.coef_ + self.intercept_
</DeepExtract>
y = check_array(y, dtype=raw_prediction.dtype, order='C', ensure_2d=False)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype)
base_loss = self._base_loss
if not base_loss.in_y_true_range(y):
raise ValueError(f'Some value(s) of y are out of the valid range of the loss {base_loss.__name__}.')
constant = np.mean(base_loss.constant_to_optimal_zero(y_true=y))
if sample_weight is not None:
constant *= sample_weight.shape[0] / np.sum(sample_weight)
deviance = base_loss(y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=1)
y_mean = base_loss.link.link(np.average(y, weights=sample_weight))
deviance_null = base_loss(y_true=y, raw_prediction=np.tile(y_mean, y.shape[0]), sample_weight=sample_weight, n_threads=1)
return 1 - (deviance + constant) / (deviance_null + constant)
|
def fit(self, X, y, sample_weight=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,) default=None
Weights of training data.
.. versionadded:: 0.23
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
fit_start_time = time()
acc_find_split_time = 0.0
acc_apply_split_time = 0.0
acc_compute_hist_time = 0.0
acc_prediction_time = 0.0
(X, y) = self._validate_data(X, y, dtype=[X_DTYPE], force_all_finite=False)
pass
check_consistent_length(X, y)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64)
self._fitted_with_sw = True
sample_weight = sample_weight
rng = check_random_state(self.random_state)
if not (self.warm_start and self._is_fitted()):
self._random_seed = rng.randint(np.iinfo(np.uint32).max, dtype='u8')
if self.monotonic_cst is not None and self.n_trees_per_iteration_ != 1:
raise ValueError('monotonic constraints are not supported for multiclass classification.')
monotonic_cst = _check_monotonic_cst(self, self.monotonic_cst)
(n_samples, self._n_features) = X.shape
if self.categorical_features is None:
(self.is_categorical_, known_categories) = (None, None)
categorical_features = np.asarray(self.categorical_features)
if categorical_features.size == 0:
(self.is_categorical_, known_categories) = (None, None)
if categorical_features.dtype.kind not in ('i', 'b', 'U', 'O'):
raise ValueError(f'categorical_features must be an array-like of bool, int or str, got: {categorical_features.dtype.name}.')
if categorical_features.dtype.kind == 'O':
types = set((type(f) for f in categorical_features))
if types != {str}:
raise ValueError(f"categorical_features must be an array-like of bool, int or str, got: {', '.join(sorted((t.__name__ for t in types)))}.")
n_features = X.shape[1]
if categorical_features.dtype.kind in ('U', 'O'):
if not hasattr(self, 'feature_names_in_'):
raise ValueError('categorical_features should be passed as an array of integers or as a boolean mask when the model is fitted on data without feature names.')
is_categorical = np.zeros(n_features, dtype=bool)
feature_names = self.feature_names_in_.tolist()
for feature_name in categorical_features:
try:
is_categorical[feature_names.index(feature_name)] = True
except ValueError as e:
raise ValueError(f"categorical_features has a item value '{feature_name}' which is not a valid feature name of the training data. Observed feature names: {feature_names}") from e
elif categorical_features.dtype.kind == 'i':
if np.max(categorical_features) >= n_features or np.min(categorical_features) < 0:
raise ValueError('categorical_features set as integer indices must be in [0, n_features - 1]')
is_categorical = np.zeros(n_features, dtype=bool)
is_categorical[categorical_features] = True
else:
if categorical_features.shape[0] != n_features:
raise ValueError(f'categorical_features set as a boolean mask must have shape (n_features,), got: {categorical_features.shape}')
is_categorical = categorical_features
if not np.any(is_categorical):
(self.is_categorical_, known_categories) = (None, None)
known_categories = []
for f_idx in range(n_features):
if is_categorical[f_idx]:
categories = np.unique(X[:, f_idx])
missing = np.isnan(categories)
if missing.any():
categories = categories[~missing]
if hasattr(self, 'feature_names_in_'):
feature_name = f"'{self.feature_names_in_[f_idx]}'"
else:
feature_name = f'at index {f_idx}'
if categories.size > self.max_bins:
raise ValueError(f'Categorical feature {feature_name} is expected to have a cardinality <= {self.max_bins} but actually has a cardinality of {categories.size}.')
if (categories >= self.max_bins).any():
raise ValueError(f'Categorical feature {feature_name} is expected to be encoded with values < {self.max_bins} but the largest value for the encoded categories is {categories.max()}.')
else:
categories = None
known_categories.append(categories)
(self.is_categorical_, known_categories) = (is_categorical, known_categories)
if self.interaction_cst is None:
interaction_cst = None
if self.interaction_cst == 'no_interactions':
interaction_cst = [[i] for i in range(self._n_features)]
elif self.interaction_cst == 'pairwise':
interaction_cst = itertools.combinations(range(self._n_features), 2)
else:
interaction_cst = self.interaction_cst
try:
constraints = [set(group) for group in interaction_cst]
except TypeError:
raise ValueError(f'Interaction constraints must be a sequence of tuples or lists, got: {self.interaction_cst!r}.')
for group in constraints:
for x in group:
if not (isinstance(x, Integral) and 0 <= x < self._n_features):
raise ValueError(f'Interaction constraints must consist of integer indices in [0, n_features - 1] = [0, {self._n_features - 1}], specifying the position of features, got invalid indices: {group!r}')
rest = set(range(self._n_features)) - set().union(*constraints)
if len(rest) > 0:
constraints.append(rest)
interaction_cst = constraints
self._in_fit = True
n_threads = _openmp_effective_n_threads()
if isinstance(self.loss, str):
pass
elif isinstance(self.loss, BaseLoss):
self._loss = self.loss
if self.early_stopping == 'auto':
self.do_early_stopping_ = n_samples > 10000
else:
self.do_early_stopping_ = self.early_stopping
self._use_validation_data = self.validation_fraction is not None
if self.do_early_stopping_ and self._use_validation_data:
stratify = y if hasattr(self._loss, 'predict_proba') else None
if sample_weight is None:
(X_train, X_val, y_train, y_val) = train_test_split(X, y, test_size=self.validation_fraction, stratify=stratify, random_state=self._random_seed)
sample_weight_train = sample_weight_val = None
else:
(X_train, X_val, y_train, y_val, sample_weight_train, sample_weight_val) = train_test_split(X, y, sample_weight, test_size=self.validation_fraction, stratify=stratify, random_state=self._random_seed)
else:
(X_train, y_train, sample_weight_train) = (X, y, sample_weight)
X_val = y_val = sample_weight_val = None
n_bins = self.max_bins + 1
self._bin_mapper = _BinMapper(n_bins=n_bins, is_categorical=self.is_categorical_, known_categories=known_categories, random_state=self._random_seed, n_threads=n_threads)
description = 'training' if True else 'validation'
if self.verbose:
print('Binning {:.3f} GB of {} data: '.format(X_train.nbytes / 1000000000.0, description), end='', flush=True)
tic = time()
if True:
X_binned = self._bin_mapper.fit_transform(X_train)
else:
X_binned = self._bin_mapper.transform(X_train)
X_binned = np.ascontiguousarray(X_binned)
toc = time()
if self.verbose:
duration = toc - tic
print('{:.3f} s'.format(duration))
X_binned_train = X_binned
if X_val is not None:
description = 'training' if False else 'validation'
if self.verbose:
print('Binning {:.3f} GB of {} data: '.format(X_val.nbytes / 1000000000.0, description), end='', flush=True)
tic = time()
if False:
X_binned = self._bin_mapper.fit_transform(X_val)
else:
X_binned = self._bin_mapper.transform(X_val)
X_binned = np.ascontiguousarray(X_binned)
toc = time()
if self.verbose:
duration = toc - tic
print('{:.3f} s'.format(duration))
X_binned_val = X_binned
else:
X_binned_val = None
has_missing_values = (X_binned_train == self._bin_mapper.missing_values_bin_idx_).any(axis=0).astype(np.uint8)
if self.verbose:
print('Fitting gradient boosted rounds:')
n_samples = X_binned_train.shape[0]
if not (self._is_fitted() and self.warm_start):
for var in ('train_score_', 'validation_score_'):
if hasattr(self, var):
delattr(self, var)
self._baseline_prediction = self._loss.fit_intercept_only(y_true=y_train, sample_weight=sample_weight_train).reshape((1, -1))
raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions += self._baseline_prediction
self._predictors = predictors = []
self._scorer = None
raw_predictions_val = None
self.train_score_ = []
self.validation_score_ = []
if self.do_early_stopping_:
if self.scoring == 'loss':
if self._use_validation_data:
raw_predictions_val = np.zeros(shape=(X_binned_val.shape[0], self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions_val += self._baseline_prediction
self.train_score_.append(-self._loss(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, n_threads=n_threads))
if self._use_validation_data:
self.validation_score_.append(-self._loss(y_true=y_val, raw_prediction=raw_predictions_val, sample_weight=sample_weight_val, n_threads=n_threads))
return self._should_stop(self.validation_score_)
else:
return self._should_stop(self.train_score_)
else:
self._scorer = check_scoring(self, self.scoring)
subsample_size = 10000
if X_binned_train.shape[0] > subsample_size:
indices = np.arange(X_binned_train.shape[0])
stratify = y_train if is_classifier(self) else None
indices = resample(indices, n_samples=subsample_size, replace=False, random_state=self._random_seed, stratify=stratify)
X_binned_small_train = X_binned_train[indices]
y_small_train = y_train[indices]
if sample_weight_train is not None:
sample_weight_small_train = sample_weight_train[indices]
else:
sample_weight_small_train = None
X_binned_small_train = np.ascontiguousarray(X_binned_small_train)
(X_binned_small_train, y_small_train, sample_weight_small_train) = (X_binned_small_train, y_small_train, sample_weight_small_train)
else:
(X_binned_small_train, y_small_train, sample_weight_small_train) = (X_binned_train, y_train, sample_weight_train)
if is_classifier(self):
y_small_train = self.classes_[y_small_train.astype(int)]
if sample_weight_small_train is None:
self.train_score_.append(self._scorer(self, X_binned_small_train, y_small_train))
else:
self.train_score_.append(self._scorer(self, X_binned_small_train, y_small_train, sample_weight=sample_weight_small_train))
if self._use_validation_data:
if is_classifier(self):
y_val = self.classes_[y_val.astype(int)]
if sample_weight_val is None:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val))
else:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val, sample_weight=sample_weight_val))
return self._should_stop(self.validation_score_)
else:
return self._should_stop(self.train_score_)
begin_at_stage = 0
else:
if self.max_iter < self.n_iter_:
raise ValueError('max_iter=%d must be larger than or equal to n_iter_=%d when warm_start==True' % (self.max_iter, self.n_iter_))
self.train_score_ = self.train_score_.tolist()
self.validation_score_ = self.validation_score_.tolist()
is_binned = getattr(self, '_in_fit', False)
if not is_binned:
X_binned_train = self._validate_data(X_binned_train, dtype=X_DTYPE, force_all_finite=False, reset=False)
check_is_fitted(self)
if X_binned_train.shape[1] != self._n_features:
raise ValueError('X has {} features but this estimator was trained with {} features.'.format(X_binned_train.shape[1], self._n_features))
n_samples = X_binned_train.shape[0]
raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions += self._baseline_prediction
n_threads = _openmp_effective_n_threads(n_threads)
self._predict_iterations(X_binned_train, self._predictors, raw_predictions, is_binned, n_threads)
raw_predictions = raw_predictions
if self.do_early_stopping_ and self._use_validation_data:
is_binned = getattr(self, '_in_fit', False)
if not is_binned:
X_binned_val = self._validate_data(X_binned_val, dtype=X_DTYPE, force_all_finite=False, reset=False)
check_is_fitted(self)
if X_binned_val.shape[1] != self._n_features:
raise ValueError('X has {} features but this estimator was trained with {} features.'.format(X_binned_val.shape[1], self._n_features))
n_samples = X_binned_val.shape[0]
raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions += self._baseline_prediction
n_threads = _openmp_effective_n_threads(n_threads)
self._predict_iterations(X_binned_val, self._predictors, raw_predictions, is_binned, n_threads)
raw_predictions_val = raw_predictions
else:
raw_predictions_val = None
if self.do_early_stopping_ and self.scoring != 'loss':
subsample_size = 10000
if X_binned_train.shape[0] > subsample_size:
indices = np.arange(X_binned_train.shape[0])
stratify = y_train if is_classifier(self) else None
indices = resample(indices, n_samples=subsample_size, replace=False, random_state=self._random_seed, stratify=stratify)
X_binned_small_train = X_binned_train[indices]
y_small_train = y_train[indices]
if sample_weight_train is not None:
sample_weight_small_train = sample_weight_train[indices]
else:
sample_weight_small_train = None
X_binned_small_train = np.ascontiguousarray(X_binned_small_train)
(X_binned_small_train, y_small_train, sample_weight_small_train) = (X_binned_small_train, y_small_train, sample_weight_small_train)
else:
(X_binned_small_train, y_small_train, sample_weight_small_train) = (X_binned_train, y_train, sample_weight_train)
predictors = self._predictors
begin_at_stage = self.n_iter_
(gradient, hessian) = self._loss.init_gradient_and_hessian(n_samples=n_samples, dtype=G_H_DTYPE, order='F')
for iteration in range(begin_at_stage, self.max_iter):
if self.verbose:
iteration_start_time = time()
print('[{}/{}] '.format(iteration + 1, self.max_iter), end='', flush=True)
if self._loss.constant_hessian:
self._loss.gradient(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, gradient_out=gradient, n_threads=n_threads)
else:
self._loss.gradient_hessian(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, gradient_out=gradient, hessian_out=hessian, n_threads=n_threads)
predictors.append([])
if gradient.ndim == 1:
g_view = gradient.reshape((-1, 1))
h_view = hessian.reshape((-1, 1))
else:
g_view = gradient
h_view = hessian
for k in range(self.n_trees_per_iteration_):
grower = TreeGrower(X_binned=X_binned_train, gradients=g_view[:, k], hessians=h_view[:, k], n_bins=n_bins, n_bins_non_missing=self._bin_mapper.n_bins_non_missing_, has_missing_values=has_missing_values, is_categorical=self.is_categorical_, monotonic_cst=monotonic_cst, interaction_cst=interaction_cst, max_leaf_nodes=self.max_leaf_nodes, max_depth=self.max_depth, min_samples_leaf=self.min_samples_leaf, l2_regularization=self.l2_regularization, shrinkage=self.learning_rate, n_threads=n_threads)
grower.grow()
acc_apply_split_time += grower.total_apply_split_time
acc_find_split_time += grower.total_find_split_time
acc_compute_hist_time += grower.total_compute_hist_time
if self._loss.need_update_leaves_values:
for leaf in grower.finalized_leaves:
indices = leaf.sample_indices
if sample_weight_train is None:
sw = None
else:
sw = sample_weight_train[indices]
update = self._loss.fit_intercept_only(y_true=y_train[indices] - raw_predictions[:, k][indices], sample_weight=sw)
leaf.value = grower.shrinkage * update
predictor = grower.make_predictor(binning_thresholds=self._bin_mapper.bin_thresholds_)
predictors[-1].append(predictor)
tic_pred = time()
_update_raw_predictions(raw_predictions[:, k], grower, n_threads)
toc_pred = time()
acc_prediction_time += toc_pred - tic_pred
should_early_stop = False
if self.do_early_stopping_:
if self.scoring == 'loss':
if self._use_validation_data:
for (k, pred) in enumerate(self._predictors[-1]):
raw_predictions_val[:, k] += pred.predict_binned(X_binned_val, self._bin_mapper.missing_values_bin_idx_, n_threads)
self.train_score_.append(-self._loss(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, n_threads=n_threads))
if self._use_validation_data:
self.validation_score_.append(-self._loss(y_true=y_val, raw_prediction=raw_predictions_val, sample_weight=sample_weight_val, n_threads=n_threads))
should_early_stop = self._should_stop(self.validation_score_)
else:
should_early_stop = self._should_stop(self.train_score_)
else:
if is_classifier(self):
y_small_train = self.classes_[y_small_train.astype(int)]
if sample_weight_small_train is None:
self.train_score_.append(self._scorer(self, X_binned_small_train, y_small_train))
else:
self.train_score_.append(self._scorer(self, X_binned_small_train, y_small_train, sample_weight=sample_weight_small_train))
if self._use_validation_data:
if is_classifier(self):
y_val = self.classes_[y_val.astype(int)]
if sample_weight_val is None:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val))
else:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val, sample_weight=sample_weight_val))
should_early_stop = self._should_stop(self.validation_score_)
else:
should_early_stop = self._should_stop(self.train_score_)
if self.verbose:
log_msg = ''
predictors_of_ith_iteration = [predictors_list for predictors_list in self._predictors[-1] if predictors_list]
n_trees = len(predictors_of_ith_iteration)
max_depth = max((predictor.get_max_depth() for predictor in predictors_of_ith_iteration))
n_leaves = sum((predictor.get_n_leaf_nodes() for predictor in predictors_of_ith_iteration))
if n_trees == 1:
log_msg += '{} tree, {} leaves, '.format(n_trees, n_leaves)
else:
log_msg += '{} trees, {} leaves '.format(n_trees, n_leaves)
log_msg += '({} on avg), '.format(int(n_leaves / n_trees))
log_msg += 'max depth = {}, '.format(max_depth)
if self.do_early_stopping_:
if self.scoring == 'loss':
factor = -1
name = 'loss'
else:
factor = 1
name = 'score'
log_msg += 'train {}: {:.5f}, '.format(name, factor * self.train_score_[-1])
if self._use_validation_data:
log_msg += 'val {}: {:.5f}, '.format(name, factor * self.validation_score_[-1])
iteration_time = time() - iteration_start_time
log_msg += 'in {:0.3f}s'.format(iteration_time)
print(log_msg)
if should_early_stop:
break
if self.verbose:
duration = time() - fit_start_time
n_total_leaves = sum((predictor.get_n_leaf_nodes() for predictors_at_ith_iteration in self._predictors for predictor in predictors_at_ith_iteration))
n_predictors = sum((len(predictors_at_ith_iteration) for predictors_at_ith_iteration in self._predictors))
print('Fit {} trees in {:.3f} s, ({} total leaves)'.format(n_predictors, duration, n_total_leaves))
print('{:<32} {:.3f}s'.format('Time spent computing histograms:', acc_compute_hist_time))
print('{:<32} {:.3f}s'.format('Time spent finding best splits:', acc_find_split_time))
print('{:<32} {:.3f}s'.format('Time spent applying splits:', acc_apply_split_time))
print('{:<32} {:.3f}s'.format('Time spent predicting:', acc_prediction_time))
self.train_score_ = np.asarray(self.train_score_)
self.validation_score_ = np.asarray(self.validation_score_)
del self._in_fit
return self
|
def fit(self, X, y, sample_weight=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,) default=None
Weights of training data.
.. versionadded:: 0.23
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
fit_start_time = time()
acc_find_split_time = 0.0
acc_apply_split_time = 0.0
acc_compute_hist_time = 0.0
acc_prediction_time = 0.0
(X, y) = self._validate_data(X, y, dtype=[X_DTYPE], force_all_finite=False)
<DeepExtract>
pass
</DeepExtract>
check_consistent_length(X, y)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64)
self._fitted_with_sw = True
<DeepExtract>
sample_weight = sample_weight
</DeepExtract>
rng = check_random_state(self.random_state)
if not (self.warm_start and self._is_fitted()):
self._random_seed = rng.randint(np.iinfo(np.uint32).max, dtype='u8')
<DeepExtract>
if self.monotonic_cst is not None and self.n_trees_per_iteration_ != 1:
raise ValueError('monotonic constraints are not supported for multiclass classification.')
</DeepExtract>
monotonic_cst = _check_monotonic_cst(self, self.monotonic_cst)
(n_samples, self._n_features) = X.shape
<DeepExtract>
if self.categorical_features is None:
(self.is_categorical_, known_categories) = (None, None)
categorical_features = np.asarray(self.categorical_features)
if categorical_features.size == 0:
(self.is_categorical_, known_categories) = (None, None)
if categorical_features.dtype.kind not in ('i', 'b', 'U', 'O'):
raise ValueError(f'categorical_features must be an array-like of bool, int or str, got: {categorical_features.dtype.name}.')
if categorical_features.dtype.kind == 'O':
types = set((type(f) for f in categorical_features))
if types != {str}:
raise ValueError(f"categorical_features must be an array-like of bool, int or str, got: {', '.join(sorted((t.__name__ for t in types)))}.")
n_features = X.shape[1]
if categorical_features.dtype.kind in ('U', 'O'):
if not hasattr(self, 'feature_names_in_'):
raise ValueError('categorical_features should be passed as an array of integers or as a boolean mask when the model is fitted on data without feature names.')
is_categorical = np.zeros(n_features, dtype=bool)
feature_names = self.feature_names_in_.tolist()
for feature_name in categorical_features:
try:
is_categorical[feature_names.index(feature_name)] = True
except ValueError as e:
raise ValueError(f"categorical_features has a item value '{feature_name}' which is not a valid feature name of the training data. Observed feature names: {feature_names}") from e
elif categorical_features.dtype.kind == 'i':
if np.max(categorical_features) >= n_features or np.min(categorical_features) < 0:
raise ValueError('categorical_features set as integer indices must be in [0, n_features - 1]')
is_categorical = np.zeros(n_features, dtype=bool)
is_categorical[categorical_features] = True
else:
if categorical_features.shape[0] != n_features:
raise ValueError(f'categorical_features set as a boolean mask must have shape (n_features,), got: {categorical_features.shape}')
is_categorical = categorical_features
if not np.any(is_categorical):
(self.is_categorical_, known_categories) = (None, None)
known_categories = []
for f_idx in range(n_features):
if is_categorical[f_idx]:
categories = np.unique(X[:, f_idx])
missing = np.isnan(categories)
if missing.any():
categories = categories[~missing]
if hasattr(self, 'feature_names_in_'):
feature_name = f"'{self.feature_names_in_[f_idx]}'"
else:
feature_name = f'at index {f_idx}'
if categories.size > self.max_bins:
raise ValueError(f'Categorical feature {feature_name} is expected to have a cardinality <= {self.max_bins} but actually has a cardinality of {categories.size}.')
if (categories >= self.max_bins).any():
raise ValueError(f'Categorical feature {feature_name} is expected to be encoded with values < {self.max_bins} but the largest value for the encoded categories is {categories.max()}.')
else:
categories = None
known_categories.append(categories)
(self.is_categorical_, known_categories) = (is_categorical, known_categories)
</DeepExtract>
<DeepExtract>
if self.interaction_cst is None:
interaction_cst = None
if self.interaction_cst == 'no_interactions':
interaction_cst = [[i] for i in range(self._n_features)]
elif self.interaction_cst == 'pairwise':
interaction_cst = itertools.combinations(range(self._n_features), 2)
else:
interaction_cst = self.interaction_cst
try:
constraints = [set(group) for group in interaction_cst]
except TypeError:
raise ValueError(f'Interaction constraints must be a sequence of tuples or lists, got: {self.interaction_cst!r}.')
for group in constraints:
for x in group:
if not (isinstance(x, Integral) and 0 <= x < self._n_features):
raise ValueError(f'Interaction constraints must consist of integer indices in [0, n_features - 1] = [0, {self._n_features - 1}], specifying the position of features, got invalid indices: {group!r}')
rest = set(range(self._n_features)) - set().union(*constraints)
if len(rest) > 0:
constraints.append(rest)
interaction_cst = constraints
</DeepExtract>
self._in_fit = True
n_threads = _openmp_effective_n_threads()
if isinstance(self.loss, str):
<DeepExtract>
pass
</DeepExtract>
elif isinstance(self.loss, BaseLoss):
self._loss = self.loss
if self.early_stopping == 'auto':
self.do_early_stopping_ = n_samples > 10000
else:
self.do_early_stopping_ = self.early_stopping
self._use_validation_data = self.validation_fraction is not None
if self.do_early_stopping_ and self._use_validation_data:
stratify = y if hasattr(self._loss, 'predict_proba') else None
if sample_weight is None:
(X_train, X_val, y_train, y_val) = train_test_split(X, y, test_size=self.validation_fraction, stratify=stratify, random_state=self._random_seed)
sample_weight_train = sample_weight_val = None
else:
(X_train, X_val, y_train, y_val, sample_weight_train, sample_weight_val) = train_test_split(X, y, sample_weight, test_size=self.validation_fraction, stratify=stratify, random_state=self._random_seed)
else:
(X_train, y_train, sample_weight_train) = (X, y, sample_weight)
X_val = y_val = sample_weight_val = None
n_bins = self.max_bins + 1
self._bin_mapper = _BinMapper(n_bins=n_bins, is_categorical=self.is_categorical_, known_categories=known_categories, random_state=self._random_seed, n_threads=n_threads)
<DeepExtract>
description = 'training' if True else 'validation'
if self.verbose:
print('Binning {:.3f} GB of {} data: '.format(X_train.nbytes / 1000000000.0, description), end='', flush=True)
tic = time()
if True:
X_binned = self._bin_mapper.fit_transform(X_train)
else:
X_binned = self._bin_mapper.transform(X_train)
X_binned = np.ascontiguousarray(X_binned)
toc = time()
if self.verbose:
duration = toc - tic
print('{:.3f} s'.format(duration))
X_binned_train = X_binned
</DeepExtract>
if X_val is not None:
<DeepExtract>
description = 'training' if False else 'validation'
if self.verbose:
print('Binning {:.3f} GB of {} data: '.format(X_val.nbytes / 1000000000.0, description), end='', flush=True)
tic = time()
if False:
X_binned = self._bin_mapper.fit_transform(X_val)
else:
X_binned = self._bin_mapper.transform(X_val)
X_binned = np.ascontiguousarray(X_binned)
toc = time()
if self.verbose:
duration = toc - tic
print('{:.3f} s'.format(duration))
X_binned_val = X_binned
</DeepExtract>
else:
X_binned_val = None
has_missing_values = (X_binned_train == self._bin_mapper.missing_values_bin_idx_).any(axis=0).astype(np.uint8)
if self.verbose:
print('Fitting gradient boosted rounds:')
n_samples = X_binned_train.shape[0]
if not (self._is_fitted() and self.warm_start):
<DeepExtract>
for var in ('train_score_', 'validation_score_'):
if hasattr(self, var):
delattr(self, var)
</DeepExtract>
self._baseline_prediction = self._loss.fit_intercept_only(y_true=y_train, sample_weight=sample_weight_train).reshape((1, -1))
raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions += self._baseline_prediction
self._predictors = predictors = []
self._scorer = None
raw_predictions_val = None
self.train_score_ = []
self.validation_score_ = []
if self.do_early_stopping_:
if self.scoring == 'loss':
if self._use_validation_data:
raw_predictions_val = np.zeros(shape=(X_binned_val.shape[0], self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions_val += self._baseline_prediction
<DeepExtract>
self.train_score_.append(-self._loss(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, n_threads=n_threads))
if self._use_validation_data:
self.validation_score_.append(-self._loss(y_true=y_val, raw_prediction=raw_predictions_val, sample_weight=sample_weight_val, n_threads=n_threads))
return self._should_stop(self.validation_score_)
else:
return self._should_stop(self.train_score_)
</DeepExtract>
else:
self._scorer = check_scoring(self, self.scoring)
<DeepExtract>
subsample_size = 10000
if X_binned_train.shape[0] > subsample_size:
indices = np.arange(X_binned_train.shape[0])
stratify = y_train if is_classifier(self) else None
indices = resample(indices, n_samples=subsample_size, replace=False, random_state=self._random_seed, stratify=stratify)
X_binned_small_train = X_binned_train[indices]
y_small_train = y_train[indices]
if sample_weight_train is not None:
sample_weight_small_train = sample_weight_train[indices]
else:
sample_weight_small_train = None
X_binned_small_train = np.ascontiguousarray(X_binned_small_train)
(X_binned_small_train, y_small_train, sample_weight_small_train) = (X_binned_small_train, y_small_train, sample_weight_small_train)
else:
(X_binned_small_train, y_small_train, sample_weight_small_train) = (X_binned_train, y_train, sample_weight_train)
</DeepExtract>
<DeepExtract>
if is_classifier(self):
y_small_train = self.classes_[y_small_train.astype(int)]
if sample_weight_small_train is None:
self.train_score_.append(self._scorer(self, X_binned_small_train, y_small_train))
else:
self.train_score_.append(self._scorer(self, X_binned_small_train, y_small_train, sample_weight=sample_weight_small_train))
if self._use_validation_data:
if is_classifier(self):
y_val = self.classes_[y_val.astype(int)]
if sample_weight_val is None:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val))
else:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val, sample_weight=sample_weight_val))
return self._should_stop(self.validation_score_)
else:
return self._should_stop(self.train_score_)
</DeepExtract>
begin_at_stage = 0
else:
if self.max_iter < self.n_iter_:
raise ValueError('max_iter=%d must be larger than or equal to n_iter_=%d when warm_start==True' % (self.max_iter, self.n_iter_))
self.train_score_ = self.train_score_.tolist()
self.validation_score_ = self.validation_score_.tolist()
<DeepExtract>
is_binned = getattr(self, '_in_fit', False)
if not is_binned:
X_binned_train = self._validate_data(X_binned_train, dtype=X_DTYPE, force_all_finite=False, reset=False)
check_is_fitted(self)
if X_binned_train.shape[1] != self._n_features:
raise ValueError('X has {} features but this estimator was trained with {} features.'.format(X_binned_train.shape[1], self._n_features))
n_samples = X_binned_train.shape[0]
raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions += self._baseline_prediction
n_threads = _openmp_effective_n_threads(n_threads)
self._predict_iterations(X_binned_train, self._predictors, raw_predictions, is_binned, n_threads)
raw_predictions = raw_predictions
</DeepExtract>
if self.do_early_stopping_ and self._use_validation_data:
<DeepExtract>
is_binned = getattr(self, '_in_fit', False)
if not is_binned:
X_binned_val = self._validate_data(X_binned_val, dtype=X_DTYPE, force_all_finite=False, reset=False)
check_is_fitted(self)
if X_binned_val.shape[1] != self._n_features:
raise ValueError('X has {} features but this estimator was trained with {} features.'.format(X_binned_val.shape[1], self._n_features))
n_samples = X_binned_val.shape[0]
raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')
raw_predictions += self._baseline_prediction
n_threads = _openmp_effective_n_threads(n_threads)
self._predict_iterations(X_binned_val, self._predictors, raw_predictions, is_binned, n_threads)
raw_predictions_val = raw_predictions
</DeepExtract>
else:
raw_predictions_val = None
if self.do_early_stopping_ and self.scoring != 'loss':
<DeepExtract>
subsample_size = 10000
if X_binned_train.shape[0] > subsample_size:
indices = np.arange(X_binned_train.shape[0])
stratify = y_train if is_classifier(self) else None
indices = resample(indices, n_samples=subsample_size, replace=False, random_state=self._random_seed, stratify=stratify)
X_binned_small_train = X_binned_train[indices]
y_small_train = y_train[indices]
if sample_weight_train is not None:
sample_weight_small_train = sample_weight_train[indices]
else:
sample_weight_small_train = None
X_binned_small_train = np.ascontiguousarray(X_binned_small_train)
(X_binned_small_train, y_small_train, sample_weight_small_train) = (X_binned_small_train, y_small_train, sample_weight_small_train)
else:
(X_binned_small_train, y_small_train, sample_weight_small_train) = (X_binned_train, y_train, sample_weight_train)
</DeepExtract>
predictors = self._predictors
begin_at_stage = self.n_iter_
(gradient, hessian) = self._loss.init_gradient_and_hessian(n_samples=n_samples, dtype=G_H_DTYPE, order='F')
for iteration in range(begin_at_stage, self.max_iter):
if self.verbose:
iteration_start_time = time()
print('[{}/{}] '.format(iteration + 1, self.max_iter), end='', flush=True)
if self._loss.constant_hessian:
self._loss.gradient(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, gradient_out=gradient, n_threads=n_threads)
else:
self._loss.gradient_hessian(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, gradient_out=gradient, hessian_out=hessian, n_threads=n_threads)
predictors.append([])
if gradient.ndim == 1:
g_view = gradient.reshape((-1, 1))
h_view = hessian.reshape((-1, 1))
else:
g_view = gradient
h_view = hessian
for k in range(self.n_trees_per_iteration_):
grower = TreeGrower(X_binned=X_binned_train, gradients=g_view[:, k], hessians=h_view[:, k], n_bins=n_bins, n_bins_non_missing=self._bin_mapper.n_bins_non_missing_, has_missing_values=has_missing_values, is_categorical=self.is_categorical_, monotonic_cst=monotonic_cst, interaction_cst=interaction_cst, max_leaf_nodes=self.max_leaf_nodes, max_depth=self.max_depth, min_samples_leaf=self.min_samples_leaf, l2_regularization=self.l2_regularization, shrinkage=self.learning_rate, n_threads=n_threads)
grower.grow()
acc_apply_split_time += grower.total_apply_split_time
acc_find_split_time += grower.total_find_split_time
acc_compute_hist_time += grower.total_compute_hist_time
if self._loss.need_update_leaves_values:
<DeepExtract>
for leaf in grower.finalized_leaves:
indices = leaf.sample_indices
if sample_weight_train is None:
sw = None
else:
sw = sample_weight_train[indices]
update = self._loss.fit_intercept_only(y_true=y_train[indices] - raw_predictions[:, k][indices], sample_weight=sw)
leaf.value = grower.shrinkage * update
</DeepExtract>
predictor = grower.make_predictor(binning_thresholds=self._bin_mapper.bin_thresholds_)
predictors[-1].append(predictor)
tic_pred = time()
_update_raw_predictions(raw_predictions[:, k], grower, n_threads)
toc_pred = time()
acc_prediction_time += toc_pred - tic_pred
should_early_stop = False
if self.do_early_stopping_:
if self.scoring == 'loss':
if self._use_validation_data:
for (k, pred) in enumerate(self._predictors[-1]):
raw_predictions_val[:, k] += pred.predict_binned(X_binned_val, self._bin_mapper.missing_values_bin_idx_, n_threads)
<DeepExtract>
self.train_score_.append(-self._loss(y_true=y_train, raw_prediction=raw_predictions, sample_weight=sample_weight_train, n_threads=n_threads))
if self._use_validation_data:
self.validation_score_.append(-self._loss(y_true=y_val, raw_prediction=raw_predictions_val, sample_weight=sample_weight_val, n_threads=n_threads))
should_early_stop = self._should_stop(self.validation_score_)
else:
should_early_stop = self._should_stop(self.train_score_)
</DeepExtract>
else:
<DeepExtract>
if is_classifier(self):
y_small_train = self.classes_[y_small_train.astype(int)]
if sample_weight_small_train is None:
self.train_score_.append(self._scorer(self, X_binned_small_train, y_small_train))
else:
self.train_score_.append(self._scorer(self, X_binned_small_train, y_small_train, sample_weight=sample_weight_small_train))
if self._use_validation_data:
if is_classifier(self):
y_val = self.classes_[y_val.astype(int)]
if sample_weight_val is None:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val))
else:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val, sample_weight=sample_weight_val))
should_early_stop = self._should_stop(self.validation_score_)
else:
should_early_stop = self._should_stop(self.train_score_)
</DeepExtract>
if self.verbose:
<DeepExtract>
log_msg = ''
predictors_of_ith_iteration = [predictors_list for predictors_list in self._predictors[-1] if predictors_list]
n_trees = len(predictors_of_ith_iteration)
max_depth = max((predictor.get_max_depth() for predictor in predictors_of_ith_iteration))
n_leaves = sum((predictor.get_n_leaf_nodes() for predictor in predictors_of_ith_iteration))
if n_trees == 1:
log_msg += '{} tree, {} leaves, '.format(n_trees, n_leaves)
else:
log_msg += '{} trees, {} leaves '.format(n_trees, n_leaves)
log_msg += '({} on avg), '.format(int(n_leaves / n_trees))
log_msg += 'max depth = {}, '.format(max_depth)
if self.do_early_stopping_:
if self.scoring == 'loss':
factor = -1
name = 'loss'
else:
factor = 1
name = 'score'
log_msg += 'train {}: {:.5f}, '.format(name, factor * self.train_score_[-1])
if self._use_validation_data:
log_msg += 'val {}: {:.5f}, '.format(name, factor * self.validation_score_[-1])
iteration_time = time() - iteration_start_time
log_msg += 'in {:0.3f}s'.format(iteration_time)
print(log_msg)
</DeepExtract>
if should_early_stop:
break
if self.verbose:
duration = time() - fit_start_time
n_total_leaves = sum((predictor.get_n_leaf_nodes() for predictors_at_ith_iteration in self._predictors for predictor in predictors_at_ith_iteration))
n_predictors = sum((len(predictors_at_ith_iteration) for predictors_at_ith_iteration in self._predictors))
print('Fit {} trees in {:.3f} s, ({} total leaves)'.format(n_predictors, duration, n_total_leaves))
print('{:<32} {:.3f}s'.format('Time spent computing histograms:', acc_compute_hist_time))
print('{:<32} {:.3f}s'.format('Time spent finding best splits:', acc_find_split_time))
print('{:<32} {:.3f}s'.format('Time spent applying splits:', acc_apply_split_time))
print('{:<32} {:.3f}s'.format('Time spent predicting:', acc_prediction_time))
self.train_score_ = np.asarray(self.train_score_)
self.validation_score_ = np.asarray(self.validation_score_)
del self._in_fit
return self
|
def predict(self, X):
check_is_fitted(self)
X = check_array(X)
proba_shape = (X.shape[0], self.classes_.size)
y_proba = np.zeros(shape=proba_shape, dtype=np.float64)
y_proba[:, self._most_frequent_class_idx] = 1.0
y_proba = y_proba
y_pred = y_proba.argmax(axis=1)
return self.classes_[y_pred]
|
def predict(self, X):
<DeepExtract>
check_is_fitted(self)
X = check_array(X)
proba_shape = (X.shape[0], self.classes_.size)
y_proba = np.zeros(shape=proba_shape, dtype=np.float64)
y_proba[:, self._most_frequent_class_idx] = 1.0
y_proba = y_proba
</DeepExtract>
y_pred = y_proba.argmax(axis=1)
return self.classes_[y_pred]
|
def fit(self, X, y=None):
"""Fit the imputer on `X`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
if self.verbose != 'deprecated':
warnings.warn("The 'verbose' parameter was deprecated in version 1.1 and will be removed in 1.3. A warning will always be raised upon the removal of empty columns in the future version.", FutureWarning)
if self.strategy in ('most_frequent', 'constant'):
if isinstance(X, list) and any((isinstance(elem, str) for row in X for elem in row)):
dtype = object
else:
dtype = None
else:
dtype = FLOAT_DTYPES
if not True and self._fit_dtype.kind == 'O':
dtype = self._fit_dtype
if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
force_all_finite = 'allow-nan'
else:
force_all_finite = True
try:
X = self._validate_data(X, reset=True, accept_sparse='csc', dtype=dtype, force_all_finite=force_all_finite, copy=self.copy)
except ValueError as ve:
if 'could not convert' in str(ve):
new_ve = ValueError('Cannot use {} strategy with non-numeric data:\n{}'.format(self.strategy, ve))
raise new_ve from None
else:
raise ve
if True:
self._fit_dtype = X.dtype
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ('i', 'u', 'f', 'O'):
raise ValueError('SimpleImputer does not support data with dtype {0}. Please provide either a numeric array (with a floating point or integer dtype) or categorical data represented either as an array with integer dtype or an array of string values with an object dtype.'.format(X.dtype))
X = X
if self.fill_value is None:
if X.dtype.kind in ('i', 'u', 'f'):
fill_value = 0
else:
fill_value = 'missing_value'
else:
fill_value = self.fill_value
if self.strategy == 'constant' and X.dtype.kind in ('i', 'u', 'f') and (not isinstance(fill_value, numbers.Real)):
raise ValueError("'fill_value'={0} is invalid. Expected a numerical value when imputing numerical data".format(fill_value))
if sp.issparse(X):
if self.missing_values == 0:
raise ValueError('Imputation not possible when missing_values == 0 and input is sparse. Provide a dense array instead.')
else:
missing_mask = _get_mask(X, self.missing_values)
mask_data = missing_mask.data
n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
statistics = np.empty(X.shape[1])
if self.strategy == 'constant':
statistics.fill(fill_value)
else:
for i in range(X.shape[1]):
column = X.data[X.indptr[i]:X.indptr[i + 1]]
mask_column = mask_data[X.indptr[i]:X.indptr[i + 1]]
column = column[~mask_column]
mask_zeros = _get_mask(column, 0)
column = column[~mask_zeros]
n_explicit_zeros = mask_zeros.sum()
n_zeros = n_implicit_zeros[i] + n_explicit_zeros
if len(column) == 0 and self.keep_empty_features:
statistics[i] = 0
elif self.strategy == 'mean':
s = column.size + n_zeros
statistics[i] = np.nan if s == 0 else column.sum() / s
elif self.strategy == 'median':
statistics[i] = _get_median(column, n_zeros)
elif self.strategy == 'most_frequent':
statistics[i] = _most_frequent(column, 0, n_zeros)
super()._fit_indicator(missing_mask)
self.statistics_ = statistics
else:
missing_mask = _get_mask(X, self.missing_values)
masked_X = ma.masked_array(X, mask=missing_mask)
super()._fit_indicator(missing_mask)
if self.strategy == 'mean':
mean_masked = np.ma.mean(masked_X, axis=0)
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = 0 if self.keep_empty_features else np.nan
self.statistics_ = mean
elif self.strategy == 'median':
median_masked = np.ma.median(masked_X, axis=0)
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = 0 if self.keep_empty_features else np.nan
self.statistics_ = median
elif self.strategy == 'most_frequent':
X = X.transpose()
mask = missing_mask.transpose()
if X.dtype.kind == 'O':
most_frequent = np.empty(X.shape[0], dtype=object)
else:
most_frequent = np.empty(X.shape[0])
for (i, (row, row_mask)) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(bool)
row = row[row_mask]
if len(row) == 0 and self.keep_empty_features:
most_frequent[i] = 0
else:
most_frequent[i] = _most_frequent(row, np.nan, 0)
self.statistics_ = most_frequent
elif self.strategy == 'constant':
self.statistics_ = np.full(X.shape[1], fill_value, dtype=X.dtype)
return self
|
def fit(self, X, y=None):
"""Fit the imputer on `X`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
if self.verbose != 'deprecated':
warnings.warn("The 'verbose' parameter was deprecated in version 1.1 and will be removed in 1.3. A warning will always be raised upon the removal of empty columns in the future version.", FutureWarning)
<DeepExtract>
if self.strategy in ('most_frequent', 'constant'):
if isinstance(X, list) and any((isinstance(elem, str) for row in X for elem in row)):
dtype = object
else:
dtype = None
else:
dtype = FLOAT_DTYPES
if not True and self._fit_dtype.kind == 'O':
dtype = self._fit_dtype
if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
force_all_finite = 'allow-nan'
else:
force_all_finite = True
try:
X = self._validate_data(X, reset=True, accept_sparse='csc', dtype=dtype, force_all_finite=force_all_finite, copy=self.copy)
except ValueError as ve:
if 'could not convert' in str(ve):
new_ve = ValueError('Cannot use {} strategy with non-numeric data:\n{}'.format(self.strategy, ve))
raise new_ve from None
else:
raise ve
if True:
self._fit_dtype = X.dtype
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ('i', 'u', 'f', 'O'):
raise ValueError('SimpleImputer does not support data with dtype {0}. Please provide either a numeric array (with a floating point or integer dtype) or categorical data represented either as an array with integer dtype or an array of string values with an object dtype.'.format(X.dtype))
X = X
</DeepExtract>
if self.fill_value is None:
if X.dtype.kind in ('i', 'u', 'f'):
fill_value = 0
else:
fill_value = 'missing_value'
else:
fill_value = self.fill_value
if self.strategy == 'constant' and X.dtype.kind in ('i', 'u', 'f') and (not isinstance(fill_value, numbers.Real)):
raise ValueError("'fill_value'={0} is invalid. Expected a numerical value when imputing numerical data".format(fill_value))
if sp.issparse(X):
if self.missing_values == 0:
raise ValueError('Imputation not possible when missing_values == 0 and input is sparse. Provide a dense array instead.')
else:
<DeepExtract>
missing_mask = _get_mask(X, self.missing_values)
mask_data = missing_mask.data
n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
statistics = np.empty(X.shape[1])
if self.strategy == 'constant':
statistics.fill(fill_value)
else:
for i in range(X.shape[1]):
column = X.data[X.indptr[i]:X.indptr[i + 1]]
mask_column = mask_data[X.indptr[i]:X.indptr[i + 1]]
column = column[~mask_column]
mask_zeros = _get_mask(column, 0)
column = column[~mask_zeros]
n_explicit_zeros = mask_zeros.sum()
n_zeros = n_implicit_zeros[i] + n_explicit_zeros
if len(column) == 0 and self.keep_empty_features:
statistics[i] = 0
elif self.strategy == 'mean':
s = column.size + n_zeros
statistics[i] = np.nan if s == 0 else column.sum() / s
elif self.strategy == 'median':
statistics[i] = _get_median(column, n_zeros)
elif self.strategy == 'most_frequent':
statistics[i] = _most_frequent(column, 0, n_zeros)
super()._fit_indicator(missing_mask)
self.statistics_ = statistics
</DeepExtract>
else:
<DeepExtract>
missing_mask = _get_mask(X, self.missing_values)
masked_X = ma.masked_array(X, mask=missing_mask)
super()._fit_indicator(missing_mask)
if self.strategy == 'mean':
mean_masked = np.ma.mean(masked_X, axis=0)
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = 0 if self.keep_empty_features else np.nan
self.statistics_ = mean
elif self.strategy == 'median':
median_masked = np.ma.median(masked_X, axis=0)
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = 0 if self.keep_empty_features else np.nan
self.statistics_ = median
elif self.strategy == 'most_frequent':
X = X.transpose()
mask = missing_mask.transpose()
if X.dtype.kind == 'O':
most_frequent = np.empty(X.shape[0], dtype=object)
else:
most_frequent = np.empty(X.shape[0])
for (i, (row, row_mask)) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(bool)
row = row[row_mask]
if len(row) == 0 and self.keep_empty_features:
most_frequent[i] = 0
else:
most_frequent[i] = _most_frequent(row, np.nan, 0)
self.statistics_ = most_frequent
elif self.strategy == 'constant':
self.statistics_ = np.full(X.shape[1], fill_value, dtype=X.dtype)
</DeepExtract>
return self
|
def orthogonal_mp_gram(Gram, Xy, *, n_nonzero_coefs=None, tol=None, norms_squared=None, copy_Gram=True, copy_Xy=True, return_path=False, return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP).
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : ndarray of shape (n_features, n_features)
Gram matrix of the input data: X.T * X.
Xy : ndarray of shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y.
n_nonzero_coefs : int, default=None
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, default=None
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like of shape (n_targets,), default=None
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, default=True
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, default=True
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, default=False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
coef : ndarray of shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See Also
--------
OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP).
orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
lars_path : Compute Least Angle Regression or Lasso path using
LARS algorithm.
sklearn.decomposition.sparse_encode : Generic sparse coding.
Each column of the result is the solution to a Lasso problem.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if copy_Xy or not Xy.flags.writeable:
Xy = Xy.copy()
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError('Epsilon cannot be negative')
if tol is None and n_nonzero_coefs <= 0:
raise ValueError('The number of atoms must be positive')
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError('The number of atoms cannot be more than the number of features')
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)), dtype=Gram.dtype)
else:
coef = np.zeros((len(Gram), Xy.shape[1]), dtype=Gram.dtype)
n_iters = []
for k in range(Xy.shape[1]):
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if False or not Xy[:, k].flags.writeable:
Xy[:, k] = Xy[:, k].copy()
min_float = np.finfo(Gram.dtype).eps
(nrm2, swap) = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
(potrs,) = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram))
alpha = Xy[:, k]
tol_curr = norms_squared[k] if tol is not None else None
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.0
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, check_finite=False)
v = nrm2(L[n_active, :n_active]) ** 2
Lkk = Gram[lam, lam] - v
if Lkk <= min_float:
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = sqrt(Lkk)
else:
L[0, 0] = sqrt(Gram[lam, lam])
(Gram[n_active], Gram[lam]) = swap(Gram[n_active], Gram[lam])
(Gram.T[n_active], Gram.T[lam]) = swap(Gram.T[n_active], Gram.T[lam])
(indices[n_active], indices[lam]) = (indices[lam], indices[n_active])
(Xy[:, k][n_active], Xy[:, k][lam]) = (Xy[:, k][lam], Xy[:, k][n_active])
n_active += 1
(gamma, _) = potrs(L[:n_active, :n_active], Xy[:, k][:n_active], lower=True, overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy[:, k] - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
out = (gamma, indices[:n_active], coefs[:, :n_active], n_active)
else:
out = (gamma, indices[:n_active], n_active)
if return_path:
(_, idx, coefs, n_iter) = out
coef = coef[:, :, :len(idx)]
for (n_active, x) in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
(x, idx, n_iter) = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return (np.squeeze(coef), n_iters)
else:
return np.squeeze(coef)
|
def orthogonal_mp_gram(Gram, Xy, *, n_nonzero_coefs=None, tol=None, norms_squared=None, copy_Gram=True, copy_Xy=True, return_path=False, return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP).
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : ndarray of shape (n_features, n_features)
Gram matrix of the input data: X.T * X.
Xy : ndarray of shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y.
n_nonzero_coefs : int, default=None
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, default=None
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like of shape (n_targets,), default=None
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, default=True
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, default=True
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, default=False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
coef : ndarray of shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See Also
--------
OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP).
orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
lars_path : Compute Least Angle Regression or Lasso path using
LARS algorithm.
sklearn.decomposition.sparse_encode : Generic sparse coding.
Each column of the result is the solution to a Lasso problem.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if copy_Xy or not Xy.flags.writeable:
Xy = Xy.copy()
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError('Epsilon cannot be negative')
if tol is None and n_nonzero_coefs <= 0:
raise ValueError('The number of atoms must be positive')
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError('The number of atoms cannot be more than the number of features')
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)), dtype=Gram.dtype)
else:
coef = np.zeros((len(Gram), Xy.shape[1]), dtype=Gram.dtype)
n_iters = []
for k in range(Xy.shape[1]):
<DeepExtract>
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if False or not Xy[:, k].flags.writeable:
Xy[:, k] = Xy[:, k].copy()
min_float = np.finfo(Gram.dtype).eps
(nrm2, swap) = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
(potrs,) = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram))
alpha = Xy[:, k]
tol_curr = norms_squared[k] if tol is not None else None
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.0
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, check_finite=False)
v = nrm2(L[n_active, :n_active]) ** 2
Lkk = Gram[lam, lam] - v
if Lkk <= min_float:
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = sqrt(Lkk)
else:
L[0, 0] = sqrt(Gram[lam, lam])
(Gram[n_active], Gram[lam]) = swap(Gram[n_active], Gram[lam])
(Gram.T[n_active], Gram.T[lam]) = swap(Gram.T[n_active], Gram.T[lam])
(indices[n_active], indices[lam]) = (indices[lam], indices[n_active])
(Xy[:, k][n_active], Xy[:, k][lam]) = (Xy[:, k][lam], Xy[:, k][n_active])
n_active += 1
(gamma, _) = potrs(L[:n_active, :n_active], Xy[:, k][:n_active], lower=True, overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy[:, k] - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
out = (gamma, indices[:n_active], coefs[:, :n_active], n_active)
else:
out = (gamma, indices[:n_active], n_active)
</DeepExtract>
if return_path:
(_, idx, coefs, n_iter) = out
coef = coef[:, :, :len(idx)]
for (n_active, x) in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
(x, idx, n_iter) = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return (np.squeeze(coef), n_iters)
else:
return np.squeeze(coef)
|
def __lshift__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.skipWhitespace = True
self.whiteChars = self.expr.whiteChars
self.copyDefaultWhiteChars = False
return self
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
|
def __lshift__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
<DeepExtract>
self.skipWhitespace = True
self.whiteChars = self.expr.whiteChars
self.copyDefaultWhiteChars = False
return self
</DeepExtract>
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
|
def check_transformer_get_feature_names_out(name, transformer_orig):
tags = transformer_orig._get_tags()
if '2darray' not in tags['X_types'] or tags['no_validation']:
return
(X, y) = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
transformer = clone(transformer_orig)
if '1darray' in _safe_tags(transformer, key='X_types'):
X = X[:, 0]
if _safe_tags(transformer, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(transformer, key='X_types'):
X = (X - X.min()).astype(np.int32)
if transformer.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(transformer):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(transformer, key='pairwise'):
X = kernel(X, X)
X = X
n_features = X.shape[1]
set_random_state(transformer)
y_ = y
if name in CROSS_DECOMPOSITION:
y_ = np.c_[np.asarray(y), np.asarray(y)]
y_[::2, 1] *= 2
X_transform = transformer.fit_transform(X, y=y_)
input_features = [f'feature{i}' for i in range(n_features)]
with raises(ValueError, match='input_features should have length equal'):
transformer.get_feature_names_out(input_features[::2])
feature_names_out = transformer.get_feature_names_out(input_features)
assert feature_names_out is not None
assert isinstance(feature_names_out, np.ndarray)
assert feature_names_out.dtype == object
assert all((isinstance(name, str) for name in feature_names_out))
if isinstance(X_transform, tuple):
n_features_out = X_transform[0].shape[1]
else:
n_features_out = X_transform.shape[1]
assert len(feature_names_out) == n_features_out, f'Expected {n_features_out} feature names, got {len(feature_names_out)}'
|
def check_transformer_get_feature_names_out(name, transformer_orig):
tags = transformer_orig._get_tags()
if '2darray' not in tags['X_types'] or tags['no_validation']:
return
(X, y) = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
transformer = clone(transformer_orig)
<DeepExtract>
if '1darray' in _safe_tags(transformer, key='X_types'):
X = X[:, 0]
if _safe_tags(transformer, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(transformer, key='X_types'):
X = (X - X.min()).astype(np.int32)
if transformer.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(transformer):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(transformer, key='pairwise'):
X = kernel(X, X)
X = X
</DeepExtract>
n_features = X.shape[1]
set_random_state(transformer)
y_ = y
if name in CROSS_DECOMPOSITION:
y_ = np.c_[np.asarray(y), np.asarray(y)]
y_[::2, 1] *= 2
X_transform = transformer.fit_transform(X, y=y_)
input_features = [f'feature{i}' for i in range(n_features)]
with raises(ValueError, match='input_features should have length equal'):
transformer.get_feature_names_out(input_features[::2])
feature_names_out = transformer.get_feature_names_out(input_features)
assert feature_names_out is not None
assert isinstance(feature_names_out, np.ndarray)
assert feature_names_out.dtype == object
assert all((isinstance(name, str) for name in feature_names_out))
if isinstance(X_transform, tuple):
n_features_out = X_transform[0].shape[1]
else:
n_features_out = X_transform.shape[1]
assert len(feature_names_out) == n_features_out, f'Expected {n_features_out} feature names, got {len(feature_names_out)}'
|
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
random_state = np.random.RandomState(seed)
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
(X, y) = (X, y)
(X_train, X_test) = (X[n_samples // 2:], X[:n_samples // 2])
(y_train, y_test) = (y[n_samples // 2:], y[:n_samples // 2])
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-07)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert s_clf.score(X_test, y_test) > 0.85
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-07)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert d_clf.score(X_test, y_test) > 0.85
assert np.sum(s_clf.coef_ != 0.0) == n_informative
|
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
<DeepExtract>
random_state = np.random.RandomState(seed)
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
(X, y) = (X, y)
</DeepExtract>
(X_train, X_test) = (X[n_samples // 2:], X[:n_samples // 2])
(y_train, y_test) = (y[n_samples // 2:], y[:n_samples // 2])
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-07)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert s_clf.score(X_test, y_test) > 0.85
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-07)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert d_clf.score(X_test, y_test) > 0.85
assert np.sum(s_clf.coef_ != 0.0) == n_informative
|
def _make_estimator(self, append=True, random_state=None):
"""Make and configure a copy of the `estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone(self.estimator_)
estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params})
if random_state is not None:
random_state = check_random_state(random_state)
to_set = {}
for key in sorted(estimator.get_params(deep=True)):
if key == 'random_state' or key.endswith('__random_state'):
to_set[key] = random_state.randint(np.iinfo(np.int32).max)
if to_set:
estimator.set_params(**to_set)
if append:
self.estimators_.append(estimator)
return estimator
|
def _make_estimator(self, append=True, random_state=None):
"""Make and configure a copy of the `estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone(self.estimator_)
estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params})
if random_state is not None:
<DeepExtract>
random_state = check_random_state(random_state)
to_set = {}
for key in sorted(estimator.get_params(deep=True)):
if key == 'random_state' or key.endswith('__random_state'):
to_set[key] = random_state.randint(np.iinfo(np.int32).max)
if to_set:
estimator.set_params(**to_set)
</DeepExtract>
if append:
self.estimators_.append(estimator)
return estimator
|
def check_transformer_preserve_dtypes(name, transformer_orig):
(X, y) = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
if '1darray' in _safe_tags(transformer_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(transformer_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(transformer_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if transformer_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(transformer_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(transformer_orig, key='pairwise'):
X = kernel(X, X)
X = X
for dtype in _safe_tags(transformer_orig, key='preserves_dtype'):
X_cast = X.astype(dtype)
transformer = clone(transformer_orig)
set_random_state(transformer)
X_trans1 = transformer.fit_transform(X_cast, y)
X_trans2 = transformer.fit(X_cast, y).transform(X_cast)
for (Xt, method) in zip([X_trans1, X_trans2], ['fit_transform', 'transform']):
if isinstance(Xt, tuple):
Xt = Xt[0]
assert Xt.dtype == dtype, f'{name} (method={method}) does not preserve dtype. Original/Expected dtype={dtype.__name__}, got dtype={Xt.dtype}.'
|
def check_transformer_preserve_dtypes(name, transformer_orig):
(X, y) = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
<DeepExtract>
if '1darray' in _safe_tags(transformer_orig, key='X_types'):
X = X[:, 0]
if _safe_tags(transformer_orig, key='requires_positive_X'):
X = X - X.min()
if 'categorical' in _safe_tags(transformer_orig, key='X_types'):
X = (X - X.min()).astype(np.int32)
if transformer_orig.__class__.__name__ == 'SkewedChi2Sampler':
X = X - X.min()
if _is_pairwise_metric(transformer_orig):
X = pairwise_distances(X, metric='euclidean')
elif _safe_tags(transformer_orig, key='pairwise'):
X = kernel(X, X)
X = X
</DeepExtract>
for dtype in _safe_tags(transformer_orig, key='preserves_dtype'):
X_cast = X.astype(dtype)
transformer = clone(transformer_orig)
set_random_state(transformer)
X_trans1 = transformer.fit_transform(X_cast, y)
X_trans2 = transformer.fit(X_cast, y).transform(X_cast)
for (Xt, method) in zip([X_trans1, X_trans2], ['fit_transform', 'transform']):
if isinstance(Xt, tuple):
Xt = Xt[0]
assert Xt.dtype == dtype, f'{name} (method={method}) does not preserve dtype. Original/Expected dtype={dtype.__name__}, got dtype={Xt.dtype}.'
|
def fit(self, X, y, sample_weight=None):
"""Fit the model.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : ndarray of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.20
parameter *sample_weight* support to BayesianRidge.
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
(X, y) = self._validate_data(X, y, dtype=[np.float64, np.float32], y_numeric=True)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
(X, y, X_offset_, y_offset_, X_scale_) = _preprocess_data(X, y, self.fit_intercept, copy=self.copy_X, sample_weight=sample_weight)
if sample_weight is not None:
(X, y, _) = _rescale_data(X, y, sample_weight)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
(n_samples, n_features) = X.shape
eps = np.finfo(np.float64).eps
alpha_ = self.alpha_init
lambda_ = self.lambda_init
if alpha_ is None:
alpha_ = 1.0 / (np.var(y) + eps)
if lambda_ is None:
lambda_ = 1.0
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
(U, S, Vh) = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
for iter_ in range(self.n_iter):
if n_samples > n_features:
coef_ = np.linalg.multi_dot([Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis], XT_y])
else:
coef_ = np.linalg.multi_dot([X.T, U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T, y])
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
(coef_, rmse_) = (coef_, rmse_)
if self.compute_score:
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
if n_samples > n_features:
logdet_sigma = -np.sum(np.log(lambda_ + alpha_ * eigen_vals_))
else:
logdet_sigma = np.full(n_features, lambda_, dtype=np.array(lambda_).dtype)
logdet_sigma[:n_samples] += alpha_ * eigen_vals_
logdet_sigma = -np.sum(np.log(logdet_sigma))
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
score += 0.5 * (n_features * log(lambda_) + n_samples * log(alpha_) - alpha_ * rmse_ - lambda_ * np.sum(coef_ ** 2) + logdet_sigma - n_samples * log(2 * np.pi))
s = score
self.scores_.append(s)
gamma_ = np.sum(alpha_ * eigen_vals_ / (lambda_ + alpha_ * eigen_vals_))
lambda_ = (gamma_ + 2 * lambda_1) / (np.sum(coef_ ** 2) + 2 * lambda_2)
alpha_ = (n_samples - gamma_ + 2 * alpha_1) / (rmse_ + 2 * alpha_2)
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print('Convergence after ', str(iter_), ' iterations')
break
coef_old_ = np.copy(coef_)
self.n_iter_ = iter_ + 1
self.alpha_ = alpha_
self.lambda_ = lambda_
if n_samples > n_features:
coef_ = np.linalg.multi_dot([Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis], XT_y])
else:
coef_ = np.linalg.multi_dot([X.T, U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T, y])
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
(self.coef_, rmse_) = (coef_, rmse_)
if self.compute_score:
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
if n_samples > n_features:
logdet_sigma = -np.sum(np.log(lambda_ + alpha_ * eigen_vals_))
else:
logdet_sigma = np.full(n_features, lambda_, dtype=np.array(lambda_).dtype)
logdet_sigma[:n_samples] += alpha_ * eigen_vals_
logdet_sigma = -np.sum(np.log(logdet_sigma))
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
score += 0.5 * (n_features * log(lambda_) + n_samples * log(alpha_) - alpha_ * rmse_ - lambda_ * np.sum(coef_ ** 2) + logdet_sigma - n_samples * log(2 * np.pi))
s = score
self.scores_.append(s)
self.scores_ = np.array(self.scores_)
scaled_sigma_ = np.dot(Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis])
self.sigma_ = 1.0 / alpha_ * scaled_sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
|
def fit(self, X, y, sample_weight=None):
"""Fit the model.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : ndarray of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.20
parameter *sample_weight* support to BayesianRidge.
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
(X, y) = self._validate_data(X, y, dtype=[np.float64, np.float32], y_numeric=True)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
(X, y, X_offset_, y_offset_, X_scale_) = _preprocess_data(X, y, self.fit_intercept, copy=self.copy_X, sample_weight=sample_weight)
if sample_weight is not None:
(X, y, _) = _rescale_data(X, y, sample_weight)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
(n_samples, n_features) = X.shape
eps = np.finfo(np.float64).eps
alpha_ = self.alpha_init
lambda_ = self.lambda_init
if alpha_ is None:
alpha_ = 1.0 / (np.var(y) + eps)
if lambda_ is None:
lambda_ = 1.0
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
(U, S, Vh) = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
for iter_ in range(self.n_iter):
<DeepExtract>
if n_samples > n_features:
coef_ = np.linalg.multi_dot([Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis], XT_y])
else:
coef_ = np.linalg.multi_dot([X.T, U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T, y])
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
(coef_, rmse_) = (coef_, rmse_)
</DeepExtract>
if self.compute_score:
<DeepExtract>
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
if n_samples > n_features:
logdet_sigma = -np.sum(np.log(lambda_ + alpha_ * eigen_vals_))
else:
logdet_sigma = np.full(n_features, lambda_, dtype=np.array(lambda_).dtype)
logdet_sigma[:n_samples] += alpha_ * eigen_vals_
logdet_sigma = -np.sum(np.log(logdet_sigma))
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
score += 0.5 * (n_features * log(lambda_) + n_samples * log(alpha_) - alpha_ * rmse_ - lambda_ * np.sum(coef_ ** 2) + logdet_sigma - n_samples * log(2 * np.pi))
s = score
</DeepExtract>
self.scores_.append(s)
gamma_ = np.sum(alpha_ * eigen_vals_ / (lambda_ + alpha_ * eigen_vals_))
lambda_ = (gamma_ + 2 * lambda_1) / (np.sum(coef_ ** 2) + 2 * lambda_2)
alpha_ = (n_samples - gamma_ + 2 * alpha_1) / (rmse_ + 2 * alpha_2)
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print('Convergence after ', str(iter_), ' iterations')
break
coef_old_ = np.copy(coef_)
self.n_iter_ = iter_ + 1
self.alpha_ = alpha_
self.lambda_ = lambda_
<DeepExtract>
if n_samples > n_features:
coef_ = np.linalg.multi_dot([Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis], XT_y])
else:
coef_ = np.linalg.multi_dot([X.T, U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T, y])
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
(self.coef_, rmse_) = (coef_, rmse_)
</DeepExtract>
if self.compute_score:
<DeepExtract>
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
if n_samples > n_features:
logdet_sigma = -np.sum(np.log(lambda_ + alpha_ * eigen_vals_))
else:
logdet_sigma = np.full(n_features, lambda_, dtype=np.array(lambda_).dtype)
logdet_sigma[:n_samples] += alpha_ * eigen_vals_
logdet_sigma = -np.sum(np.log(logdet_sigma))
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
score += 0.5 * (n_features * log(lambda_) + n_samples * log(alpha_) - alpha_ * rmse_ - lambda_ * np.sum(coef_ ** 2) + logdet_sigma - n_samples * log(2 * np.pi))
s = score
</DeepExtract>
self.scores_.append(s)
self.scores_ = np.array(self.scores_)
scaled_sigma_ = np.dot(Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis])
self.sigma_ = 1.0 / alpha_ * scaled_sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
|
def _check_inverse_transform(self, X):
"""Check that func and inverse_func are the inverse."""
idx_selected = slice(None, None, max(1, X.shape[0] // 100))
if self.validate:
self.transform(X[idx_selected]) = check_array(self.transform(X[idx_selected]), accept_sparse=self.accept_sparse)
X_round_trip = self._transform(self.transform(X[idx_selected]), func=self.inverse_func, kw_args=self.inv_kw_args)
if hasattr(X, 'dtype'):
dtypes = [X.dtype]
elif hasattr(X, 'dtypes'):
dtypes = X.dtypes
if not all((np.issubdtype(d, np.number) for d in dtypes)):
raise ValueError("'check_inverse' is only supported when all the elements in `X` is numerical.")
if not _allclose_dense_sparse(X[idx_selected], X_round_trip):
warnings.warn("The provided functions are not strictly inverse of each other. If you are sure you want to proceed regardless, set 'check_inverse=False'.", UserWarning)
|
def _check_inverse_transform(self, X):
"""Check that func and inverse_func are the inverse."""
idx_selected = slice(None, None, max(1, X.shape[0] // 100))
<DeepExtract>
if self.validate:
self.transform(X[idx_selected]) = check_array(self.transform(X[idx_selected]), accept_sparse=self.accept_sparse)
X_round_trip = self._transform(self.transform(X[idx_selected]), func=self.inverse_func, kw_args=self.inv_kw_args)
</DeepExtract>
if hasattr(X, 'dtype'):
dtypes = [X.dtype]
elif hasattr(X, 'dtypes'):
dtypes = X.dtypes
if not all((np.issubdtype(d, np.number) for d in dtypes)):
raise ValueError("'check_inverse' is only supported when all the elements in `X` is numerical.")
if not _allclose_dense_sparse(X[idx_selected], X_round_trip):
warnings.warn("The provided functions are not strictly inverse of each other. If you are sure you want to proceed regardless, set 'check_inverse=False'.", UserWarning)
|
def buchheim(tree):
if len(DrawTree(tree).children) == 0:
if DrawTree(tree).lmost_sibling:
DrawTree(tree).x = DrawTree(tree).lbrother().x + distance
else:
DrawTree(tree).x = 0.0
else:
default_ancestor = DrawTree(tree).children[0]
for w in DrawTree(tree).children:
first_walk(w)
default_ancestor = apportion(w, default_ancestor, distance)
execute_shifts(DrawTree(tree))
midpoint = (DrawTree(tree).children[0].x + DrawTree(tree).children[-1].x) / 2
w = DrawTree(tree).lbrother()
if w:
DrawTree(tree).x = w.x + distance
DrawTree(tree).mod = DrawTree(tree).x - midpoint
else:
DrawTree(tree).x = midpoint
dt = DrawTree(tree)
dt.x += m
dt.y = depth
if min is None or dt.x < min:
min = dt.x
for w in dt.children:
min = second_walk(w, m + dt.mod, depth + 1, min)
min = min
if min < 0:
dt.x += -min
for c in dt.children:
third_walk(c, -min)
return dt
|
def buchheim(tree):
<DeepExtract>
if len(DrawTree(tree).children) == 0:
if DrawTree(tree).lmost_sibling:
DrawTree(tree).x = DrawTree(tree).lbrother().x + distance
else:
DrawTree(tree).x = 0.0
else:
default_ancestor = DrawTree(tree).children[0]
for w in DrawTree(tree).children:
first_walk(w)
default_ancestor = apportion(w, default_ancestor, distance)
execute_shifts(DrawTree(tree))
midpoint = (DrawTree(tree).children[0].x + DrawTree(tree).children[-1].x) / 2
w = DrawTree(tree).lbrother()
if w:
DrawTree(tree).x = w.x + distance
DrawTree(tree).mod = DrawTree(tree).x - midpoint
else:
DrawTree(tree).x = midpoint
dt = DrawTree(tree)
</DeepExtract>
<DeepExtract>
dt.x += m
dt.y = depth
if min is None or dt.x < min:
min = dt.x
for w in dt.children:
min = second_walk(w, m + dt.mod, depth + 1, min)
min = min
</DeepExtract>
if min < 0:
<DeepExtract>
dt.x += -min
for c in dt.children:
third_walk(c, -min)
</DeepExtract>
return dt
|
End of preview. Expand
in Data Studio
- Downloads last month
- 18