before
stringlengths
87
36.6k
after
stringlengths
116
37.2k
@pytest.mark.filterwarnings('ignore:WMinkowskiDistance:FutureWarning:sklearn') @pytest.mark.parametrize('metric', sorted(set(neighbors.VALID_METRICS['ball_tree']).intersection(neighbors.VALID_METRICS['brute']) - set(['pyfunc', *BOOL_METRICS]))) def test_neighbors_metrics(global_dtype, metric, n_samples=20, n_features=3, n_query_pts=2, n_neighbors=5): algorithms = ['brute', 'ball_tree', 'kd_tree'] X_train = rng.rand(n_samples, n_features).astype(global_dtype, copy=False) X_test = rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False) rng = np.random.RandomState(1) weights = rng.random_sample(n_features) if metric == 'minkowski': minkowski_kwargs = [dict(p=1.5), dict(p=2), dict(p=3), dict(p=np.inf)] if sp_version >= parse_version('1.8.0.dev0'): minkowski_kwargs.append(dict(p=3, w=rng.rand(n_features))) metric_params_list = minkowski_kwargs if metric == 'wminkowski': weights /= weights.sum() wminkowski_kwargs = [dict(p=1.5, w=weights)] if sp_version < parse_version('1.8.0.dev0'): wminkowski_kwargs.append(dict(p=3, w=rng.rand(n_features))) metric_params_list = wminkowski_kwargs if metric == 'seuclidean': metric_params_list = [dict(V=rng.rand(n_features))] if metric == 'mahalanobis': A = rng.rand(n_features, n_features) VI = A + A.T + 3 * np.eye(n_features) metric_params_list = [dict(VI=VI)] metric_params_list = [{}] for metric_params in metric_params_list: exclude_kd_tree = metric not in neighbors.VALID_METRICS['kd_tree'] or ('minkowski' in metric and 'w' in metric_params) results = {} p = metric_params.pop('p', 2) for algorithm in algorithms: neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors, algorithm=algorithm, metric=metric, p=p, metric_params=metric_params) if exclude_kd_tree and algorithm == 'kd_tree': with pytest.raises(ValueError): neigh.fit(X_train) continue if metric == 'haversine': feature_sl = slice(None, 2) X_train = np.ascontiguousarray(X_train[:, feature_sl]) X_test = np.ascontiguousarray(X_test[:, feature_sl]) neigh.fit(X_train) if metric == 'wminkowski' and algorithm == 'brute' and (sp_version >= parse_version('1.6.0')): with pytest.warns((FutureWarning, DeprecationWarning)): results[algorithm] = neigh.kneighbors(X_test, return_distance=True) else: results[algorithm] = neigh.kneighbors(X_test, return_distance=True) (brute_dst, brute_idx) = results['brute'] (ball_tree_dst, ball_tree_idx) = results['ball_tree'] assert_allclose(brute_dst, ball_tree_dst) assert_array_equal(brute_idx, ball_tree_idx) if not exclude_kd_tree: (kd_tree_dst, kd_tree_idx) = results['kd_tree'] assert_allclose(brute_dst, kd_tree_dst) assert_array_equal(brute_idx, kd_tree_idx) assert_allclose(ball_tree_dst, kd_tree_dst) assert_array_equal(ball_tree_idx, kd_tree_idx)
@pytest.mark.filterwarnings('ignore:WMinkowskiDistance:FutureWarning:sklearn') @pytest.mark.parametrize('metric', sorted(set(neighbors.VALID_METRICS['ball_tree']).intersection(neighbors.VALID_METRICS['brute']) - set(['pyfunc', *BOOL_METRICS]))) def test_neighbors_metrics(global_dtype, metric, n_samples=20, n_features=3, n_query_pts=2, n_neighbors=5): algorithms = ['brute', 'ball_tree', 'kd_tree'] X_train = rng.rand(n_samples, n_features).astype(global_dtype, copy=False) X_test = rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False) <DeepExtract> rng = np.random.RandomState(1) weights = rng.random_sample(n_features) if metric == 'minkowski': minkowski_kwargs = [dict(p=1.5), dict(p=2), dict(p=3), dict(p=np.inf)] if sp_version >= parse_version('1.8.0.dev0'): minkowski_kwargs.append(dict(p=3, w=rng.rand(n_features))) metric_params_list = minkowski_kwargs if metric == 'wminkowski': weights /= weights.sum() wminkowski_kwargs = [dict(p=1.5, w=weights)] if sp_version < parse_version('1.8.0.dev0'): wminkowski_kwargs.append(dict(p=3, w=rng.rand(n_features))) metric_params_list = wminkowski_kwargs if metric == 'seuclidean': metric_params_list = [dict(V=rng.rand(n_features))] if metric == 'mahalanobis': A = rng.rand(n_features, n_features) VI = A + A.T + 3 * np.eye(n_features) metric_params_list = [dict(VI=VI)] metric_params_list = [{}] </DeepExtract> for metric_params in metric_params_list: exclude_kd_tree = metric not in neighbors.VALID_METRICS['kd_tree'] or ('minkowski' in metric and 'w' in metric_params) results = {} p = metric_params.pop('p', 2) for algorithm in algorithms: neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors, algorithm=algorithm, metric=metric, p=p, metric_params=metric_params) if exclude_kd_tree and algorithm == 'kd_tree': with pytest.raises(ValueError): neigh.fit(X_train) continue if metric == 'haversine': feature_sl = slice(None, 2) X_train = np.ascontiguousarray(X_train[:, feature_sl]) X_test = np.ascontiguousarray(X_test[:, feature_sl]) neigh.fit(X_train) if metric == 'wminkowski' and algorithm == 'brute' and (sp_version >= parse_version('1.6.0')): with pytest.warns((FutureWarning, DeprecationWarning)): results[algorithm] = neigh.kneighbors(X_test, return_distance=True) else: results[algorithm] = neigh.kneighbors(X_test, return_distance=True) (brute_dst, brute_idx) = results['brute'] (ball_tree_dst, ball_tree_idx) = results['ball_tree'] assert_allclose(brute_dst, ball_tree_dst) assert_array_equal(brute_idx, ball_tree_idx) if not exclude_kd_tree: (kd_tree_dst, kd_tree_idx) = results['kd_tree'] assert_allclose(brute_dst, kd_tree_dst) assert_array_equal(brute_idx, kd_tree_idx) assert_allclose(ball_tree_dst, kd_tree_dst) assert_array_equal(ball_tree_idx, kd_tree_idx)
@pytest.mark.parametrize('n_points', [100, 10000]) def test_matthews_corrcoef_overflow(n_points): rng = np.random.RandomState(20170906) def mcc_safe(y_true, y_pred): conf_matrix = confusion_matrix(y_true, y_pred) true_pos = conf_matrix[1, 1] false_pos = conf_matrix[1, 0] false_neg = conf_matrix[0, 1] n_points = len(y_true) pos_rate = (true_pos + false_neg) / n_points activity = (true_pos + false_pos) / n_points mcc_numerator = true_pos / n_points - pos_rate * activity mcc_denominator = activity * pos_rate * (1 - activity) * (1 - pos_rate) return mcc_numerator / np.sqrt(mcc_denominator) def random_ys(n_points): x_true = rng.random_sample(n_points) x_pred = x_true + 0.2 * (rng.random_sample(n_points) - 0.5) y_true = x_true > 0.5 y_pred = x_pred > 0.5 return (y_true, y_pred) arr = np.repeat([0.0, 1.0], n_points) assert_almost_equal(matthews_corrcoef(arr, arr), 1.0) arr = np.repeat([0.0, 1.0, 2.0], n_points) assert_almost_equal(matthews_corrcoef(arr, arr), 1.0) x_true = rng.random_sample(n_points) x_pred = x_true + 0.2 * (rng.random_sample(n_points) - 0.5) y_true = x_true > 0.5 y_pred = x_pred > 0.5 (y_true, y_pred) = (y_true, y_pred) assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0) assert_almost_equal(matthews_corrcoef(y_true, y_pred), mcc_safe(y_true, y_pred))
@pytest.mark.parametrize('n_points', [100, 10000]) def test_matthews_corrcoef_overflow(n_points): rng = np.random.RandomState(20170906) def mcc_safe(y_true, y_pred): conf_matrix = confusion_matrix(y_true, y_pred) true_pos = conf_matrix[1, 1] false_pos = conf_matrix[1, 0] false_neg = conf_matrix[0, 1] n_points = len(y_true) pos_rate = (true_pos + false_neg) / n_points activity = (true_pos + false_pos) / n_points mcc_numerator = true_pos / n_points - pos_rate * activity mcc_denominator = activity * pos_rate * (1 - activity) * (1 - pos_rate) return mcc_numerator / np.sqrt(mcc_denominator) def random_ys(n_points): x_true = rng.random_sample(n_points) x_pred = x_true + 0.2 * (rng.random_sample(n_points) - 0.5) y_true = x_true > 0.5 y_pred = x_pred > 0.5 return (y_true, y_pred) arr = np.repeat([0.0, 1.0], n_points) assert_almost_equal(matthews_corrcoef(arr, arr), 1.0) arr = np.repeat([0.0, 1.0, 2.0], n_points) assert_almost_equal(matthews_corrcoef(arr, arr), 1.0) <DeepExtract> x_true = rng.random_sample(n_points) x_pred = x_true + 0.2 * (rng.random_sample(n_points) - 0.5) y_true = x_true > 0.5 y_pred = x_pred > 0.5 (y_true, y_pred) = (y_true, y_pred) </DeepExtract> assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0) assert_almost_equal(matthews_corrcoef(y_true, y_pred), mcc_safe(y_true, y_pred))
def _pairwise_similarity(a, b, similarity): """Computes pairwise similarity matrix. result[i, j] is the Jaccard coefficient of a's bicluster i and b's bicluster j. """ check_consistent_length(*a) check_consistent_length(*b) checks = lambda x: check_array(x, ensure_2d=False) (a_rows, a_cols) = map(checks, a) (b_rows, b_cols) = map(checks, b) (a_rows, a_cols, b_rows, b_cols) = (a_rows, a_cols, b_rows, b_cols) n_a = a_rows.shape[0] n_b = b_rows.shape[0] result = np.array([[similarity(a_rows[i], a_cols[i], b_rows[j], b_cols[j]) for j in range(n_b)] for i in range(n_a)]) return result
def _pairwise_similarity(a, b, similarity): """Computes pairwise similarity matrix. result[i, j] is the Jaccard coefficient of a's bicluster i and b's bicluster j. """ <DeepExtract> check_consistent_length(*a) check_consistent_length(*b) checks = lambda x: check_array(x, ensure_2d=False) (a_rows, a_cols) = map(checks, a) (b_rows, b_cols) = map(checks, b) (a_rows, a_cols, b_rows, b_cols) = (a_rows, a_cols, b_rows, b_cols) </DeepExtract> n_a = a_rows.shape[0] n_b = b_rows.shape[0] result = np.array([[similarity(a_rows[i], a_cols[i], b_rows[j], b_cols[j]) for j in range(n_b)] for i in range(n_a)]) return result
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor]) def test_average_sparse(klass): eta = 0.001 alpha = 0.01 clf = klass(loss='squared_error', learning_rate='constant', eta0=eta, alpha=alpha, fit_intercept=True, max_iter=1, average=True, shuffle=False) n_samples = Y3.shape[0] clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)]) clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):]) if weight_init is None: weights = np.zeros(X3.shape[1]) else: weights = weight_init average_weights = np.zeros(X3.shape[1]) intercept = intercept_init average_intercept = 0.0 decay = 1.0 if klass in (SparseSGDClassifier, SparseSGDRegressor): decay = 0.01 for (i, entry) in enumerate(X3): p = np.dot(entry, weights) p += intercept gradient = p - Y3[i] weights *= 1.0 - eta * alpha weights += -(eta * gradient * entry) intercept += -(eta * gradient) * decay average_weights *= i average_weights += weights average_weights /= i + 1.0 average_intercept *= i average_intercept += intercept average_intercept /= i + 1.0 (average_weights, average_intercept) = (average_weights, average_intercept) assert_array_almost_equal(clf.coef_, average_weights, decimal=16) assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor]) def test_average_sparse(klass): eta = 0.001 alpha = 0.01 clf = klass(loss='squared_error', learning_rate='constant', eta0=eta, alpha=alpha, fit_intercept=True, max_iter=1, average=True, shuffle=False) n_samples = Y3.shape[0] clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)]) clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):]) <DeepExtract> if weight_init is None: weights = np.zeros(X3.shape[1]) else: weights = weight_init average_weights = np.zeros(X3.shape[1]) intercept = intercept_init average_intercept = 0.0 decay = 1.0 if klass in (SparseSGDClassifier, SparseSGDRegressor): decay = 0.01 for (i, entry) in enumerate(X3): p = np.dot(entry, weights) p += intercept gradient = p - Y3[i] weights *= 1.0 - eta * alpha weights += -(eta * gradient * entry) intercept += -(eta * gradient) * decay average_weights *= i average_weights += weights average_weights /= i + 1.0 average_intercept *= i average_intercept += intercept average_intercept /= i + 1.0 (average_weights, average_intercept) = (average_weights, average_intercept) </DeepExtract> assert_array_almost_equal(clf.coef_, average_weights, decimal=16) assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None, color=False, resize=None): """Perform the actual data loading for the LFW pairs dataset This operation is meant to be cached by a joblib wrapper. """ with open(index_file_path, 'rb') as index_file: split_lines = [ln.decode().strip().split('\t') for ln in index_file] pair_specs = [sl for sl in split_lines if len(sl) > 2] n_pairs = len(pair_specs) target = np.zeros(n_pairs, dtype=int) file_paths = list() for (i, components) in enumerate(pair_specs): if len(components) == 3: target[i] = 1 pair = ((components[0], int(components[1]) - 1), (components[0], int(components[2]) - 1)) elif len(components) == 4: target[i] = 0 pair = ((components[0], int(components[1]) - 1), (components[2], int(components[3]) - 1)) else: raise ValueError('invalid line %d: %r' % (i + 1, components)) for (j, (name, idx)) in enumerate(pair): try: person_folder = join(data_folder_path, name) except TypeError: person_folder = join(data_folder_path, str(name, 'UTF-8')) filenames = list(sorted(listdir(person_folder))) file_path = join(person_folder, filenames[idx]) file_paths.append(file_path) try: from PIL import Image except ImportError: raise ImportError('The Python Imaging Library (PIL) is required to load data from jpeg files. Please refer to https://pillow.readthedocs.io/en/stable/installation.html for installing PIL.') default_slice = (slice(0, 250), slice(0, 250)) if slice_ is None: slice_ = default_slice else: slice_ = tuple((s or ds for (s, ds) in zip(slice_, default_slice))) (h_slice, w_slice) = slice_ h = (h_slice.stop - h_slice.start) // (h_slice.step or 1) w = (w_slice.stop - w_slice.start) // (w_slice.step or 1) if resize is not None: resize = float(resize) h = int(resize * h) w = int(resize * w) n_faces = len(file_paths) if not color: faces = np.zeros((n_faces, h, w), dtype=np.float32) else: faces = np.zeros((n_faces, h, w, 3), dtype=np.float32) for (i, file_path) in enumerate(file_paths): if i % 1000 == 0: logger.debug('Loading face #%05d / %05d', i + 1, n_faces) pil_img = Image.open(file_path) pil_img = pil_img.crop((w_slice.start, h_slice.start, w_slice.stop, h_slice.stop)) if resize is not None: pil_img = pil_img.resize((w, h)) face = np.asarray(pil_img, dtype=np.float32) if face.ndim == 0: raise RuntimeError('Failed to read the image file %s, Please make sure that libjpeg is installed' % file_path) face /= 255.0 if not color: face = face.mean(axis=2) faces[i, ...] = face pairs = faces shape = list(pairs.shape) n_faces = shape.pop(0) shape.insert(0, 2) shape.insert(0, n_faces // 2) pairs.shape = shape return (pairs, target, np.array(['Different persons', 'Same person']))
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None, color=False, resize=None): """Perform the actual data loading for the LFW pairs dataset This operation is meant to be cached by a joblib wrapper. """ with open(index_file_path, 'rb') as index_file: split_lines = [ln.decode().strip().split('\t') for ln in index_file] pair_specs = [sl for sl in split_lines if len(sl) > 2] n_pairs = len(pair_specs) target = np.zeros(n_pairs, dtype=int) file_paths = list() for (i, components) in enumerate(pair_specs): if len(components) == 3: target[i] = 1 pair = ((components[0], int(components[1]) - 1), (components[0], int(components[2]) - 1)) elif len(components) == 4: target[i] = 0 pair = ((components[0], int(components[1]) - 1), (components[2], int(components[3]) - 1)) else: raise ValueError('invalid line %d: %r' % (i + 1, components)) for (j, (name, idx)) in enumerate(pair): try: person_folder = join(data_folder_path, name) except TypeError: person_folder = join(data_folder_path, str(name, 'UTF-8')) filenames = list(sorted(listdir(person_folder))) file_path = join(person_folder, filenames[idx]) file_paths.append(file_path) <DeepExtract> try: from PIL import Image except ImportError: raise ImportError('The Python Imaging Library (PIL) is required to load data from jpeg files. Please refer to https://pillow.readthedocs.io/en/stable/installation.html for installing PIL.') default_slice = (slice(0, 250), slice(0, 250)) if slice_ is None: slice_ = default_slice else: slice_ = tuple((s or ds for (s, ds) in zip(slice_, default_slice))) (h_slice, w_slice) = slice_ h = (h_slice.stop - h_slice.start) // (h_slice.step or 1) w = (w_slice.stop - w_slice.start) // (w_slice.step or 1) if resize is not None: resize = float(resize) h = int(resize * h) w = int(resize * w) n_faces = len(file_paths) if not color: faces = np.zeros((n_faces, h, w), dtype=np.float32) else: faces = np.zeros((n_faces, h, w, 3), dtype=np.float32) for (i, file_path) in enumerate(file_paths): if i % 1000 == 0: logger.debug('Loading face #%05d / %05d', i + 1, n_faces) pil_img = Image.open(file_path) pil_img = pil_img.crop((w_slice.start, h_slice.start, w_slice.stop, h_slice.stop)) if resize is not None: pil_img = pil_img.resize((w, h)) face = np.asarray(pil_img, dtype=np.float32) if face.ndim == 0: raise RuntimeError('Failed to read the image file %s, Please make sure that libjpeg is installed' % file_path) face /= 255.0 if not color: face = face.mean(axis=2) faces[i, ...] = face pairs = faces </DeepExtract> shape = list(pairs.shape) n_faces = shape.pop(0) shape.insert(0, 2) shape.insert(0, n_faces // 2) pairs.shape = shape return (pairs, target, np.array(['Different persons', 'Same person']))
def predict(self, X): """Predict using the kernel ridge model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. If kernel == "precomputed" this is instead a precomputed kernel matrix, shape = [n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for this estimator. Returns ------- C : ndarray of shape (n_samples,) or (n_samples, n_targets) Returns predicted values. """ check_is_fitted(self) X = self._validate_data(X, accept_sparse=('csr', 'csc'), reset=False) if callable(self.kernel): params = self.kernel_params or {} else: params = {'gamma': self.gamma, 'degree': self.degree, 'coef0': self.coef0} K = pairwise_kernels(X, self.X_fit_, metric=self.kernel, filter_params=True, **params) return np.dot(K, self.dual_coef_)
def predict(self, X): """Predict using the kernel ridge model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. If kernel == "precomputed" this is instead a precomputed kernel matrix, shape = [n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for this estimator. Returns ------- C : ndarray of shape (n_samples,) or (n_samples, n_targets) Returns predicted values. """ check_is_fitted(self) X = self._validate_data(X, accept_sparse=('csr', 'csc'), reset=False) <DeepExtract> if callable(self.kernel): params = self.kernel_params or {} else: params = {'gamma': self.gamma, 'degree': self.degree, 'coef0': self.coef0} K = pairwise_kernels(X, self.X_fit_, metric=self.kernel, filter_params=True, **params) </DeepExtract> return np.dot(K, self.dual_coef_)
@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None], 'multioutput': [StrOptions({'raw_values', 'uniform_average'}), 'array-like']}) def mean_absolute_error(y_true, y_pred, *, sample_weight=None, multioutput='uniform_average'): """Mean absolute error regression loss. Read more in the :ref:`User Guide <mean_absolute_error>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. multioutput : {'raw_values', 'uniform_average'} or array-like of shape (n_outputs,), default='uniform_average' Defines aggregating of multiple output values. Array-like value defines weights used to average errors. 'raw_values' : Returns a full set of errors in case of multioutput input. 'uniform_average' : Errors of all outputs are averaged with uniform weight. Returns ------- loss : float or ndarray of floats If multioutput is 'raw_values', then mean absolute error is returned for each output separately. If multioutput is 'uniform_average' or an ndarray of weights, then the weighted average of all output errors is returned. MAE output is non-negative floating point. The best value is 0.0. Examples -------- >>> from sklearn.metrics import mean_absolute_error >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> mean_absolute_error(y_true, y_pred) 0.5 >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] >>> y_pred = [[0, 2], [-1, 2], [8, -5]] >>> mean_absolute_error(y_true, y_pred) 0.75 >>> mean_absolute_error(y_true, y_pred, multioutput='raw_values') array([0.5, 1. ]) >>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7]) 0.85... """ check_consistent_length(y_true, y_pred) y_true = check_array(y_true, ensure_2d=False, dtype=dtype) y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype) if y_true.ndim == 1: y_true = y_true.reshape((-1, 1)) if y_pred.ndim == 1: y_pred = y_pred.reshape((-1, 1)) if y_true.shape[1] != y_pred.shape[1]: raise ValueError('y_true and y_pred have different number of output ({0}!={1})'.format(y_true.shape[1], y_pred.shape[1])) n_outputs = y_true.shape[1] allowed_multioutput_str = ('raw_values', 'uniform_average', 'variance_weighted') if isinstance(multioutput, str): if multioutput not in allowed_multioutput_str: raise ValueError("Allowed 'multioutput' string values are {}. You provided multioutput={!r}".format(allowed_multioutput_str, multioutput)) elif multioutput is not None: multioutput = check_array(multioutput, ensure_2d=False) if n_outputs == 1: raise ValueError('Custom weights are useful only in multi-output cases.') elif n_outputs != len(multioutput): raise ValueError('There must be equally many custom weights (%d) as outputs (%d).' % (len(multioutput), n_outputs)) y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput' (y_type, y_true, y_pred, multioutput) = (y_type, y_true, y_pred, multioutput) check_consistent_length(y_true, y_pred, sample_weight) output_errors = np.average(np.abs(y_pred - y_true), weights=sample_weight, axis=0) if isinstance(multioutput, str): if multioutput == 'raw_values': return output_errors elif multioutput == 'uniform_average': multioutput = None return np.average(output_errors, weights=multioutput)
@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None], 'multioutput': [StrOptions({'raw_values', 'uniform_average'}), 'array-like']}) def mean_absolute_error(y_true, y_pred, *, sample_weight=None, multioutput='uniform_average'): """Mean absolute error regression loss. Read more in the :ref:`User Guide <mean_absolute_error>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. multioutput : {'raw_values', 'uniform_average'} or array-like of shape (n_outputs,), default='uniform_average' Defines aggregating of multiple output values. Array-like value defines weights used to average errors. 'raw_values' : Returns a full set of errors in case of multioutput input. 'uniform_average' : Errors of all outputs are averaged with uniform weight. Returns ------- loss : float or ndarray of floats If multioutput is 'raw_values', then mean absolute error is returned for each output separately. If multioutput is 'uniform_average' or an ndarray of weights, then the weighted average of all output errors is returned. MAE output is non-negative floating point. The best value is 0.0. Examples -------- >>> from sklearn.metrics import mean_absolute_error >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> mean_absolute_error(y_true, y_pred) 0.5 >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] >>> y_pred = [[0, 2], [-1, 2], [8, -5]] >>> mean_absolute_error(y_true, y_pred) 0.75 >>> mean_absolute_error(y_true, y_pred, multioutput='raw_values') array([0.5, 1. ]) >>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7]) 0.85... """ <DeepExtract> check_consistent_length(y_true, y_pred) y_true = check_array(y_true, ensure_2d=False, dtype=dtype) y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype) if y_true.ndim == 1: y_true = y_true.reshape((-1, 1)) if y_pred.ndim == 1: y_pred = y_pred.reshape((-1, 1)) if y_true.shape[1] != y_pred.shape[1]: raise ValueError('y_true and y_pred have different number of output ({0}!={1})'.format(y_true.shape[1], y_pred.shape[1])) n_outputs = y_true.shape[1] allowed_multioutput_str = ('raw_values', 'uniform_average', 'variance_weighted') if isinstance(multioutput, str): if multioutput not in allowed_multioutput_str: raise ValueError("Allowed 'multioutput' string values are {}. You provided multioutput={!r}".format(allowed_multioutput_str, multioutput)) elif multioutput is not None: multioutput = check_array(multioutput, ensure_2d=False) if n_outputs == 1: raise ValueError('Custom weights are useful only in multi-output cases.') elif n_outputs != len(multioutput): raise ValueError('There must be equally many custom weights (%d) as outputs (%d).' % (len(multioutput), n_outputs)) y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput' (y_type, y_true, y_pred, multioutput) = (y_type, y_true, y_pred, multioutput) </DeepExtract> check_consistent_length(y_true, y_pred, sample_weight) output_errors = np.average(np.abs(y_pred - y_true), weights=sample_weight, axis=0) if isinstance(multioutput, str): if multioutput == 'raw_values': return output_errors elif multioutput == 'uniform_average': multioutput = None return np.average(output_errors, weights=multioutput)
@ignore_warnings(category=FutureWarning) def check_classifier_data_not_an_array(name, estimator_orig): X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1], [0, 3], [1, 0], [2, 0], [4, 4], [2, 3], [3, 2]]) if '1darray' in _safe_tags(estimator_orig, key='X_types'): X = X[:, 0] if _safe_tags(estimator_orig, key='requires_positive_X'): X = X - X.min() if 'categorical' in _safe_tags(estimator_orig, key='X_types'): X = (X - X.min()).astype(np.int32) if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler': X = X - X.min() if _is_pairwise_metric(estimator_orig): X = pairwise_distances(X, metric='euclidean') elif _safe_tags(estimator_orig, key='pairwise'): X = kernel(X, X) X = X y = np.array([1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2]) if _safe_tags(estimator_orig, key='requires_positive_y'): y += 1 + abs(y.min()) if _safe_tags(estimator_orig, key='binary_only') and y.size > 0: y = np.where(y == y.flat[0], y, y.flat[0] + 1) if _safe_tags(estimator_orig, key='multioutput_only'): y = np.reshape(y, (-1, 1)) y = y for obj_type in ['NotAnArray', 'PandasDataframe']: if name in CROSS_DECOMPOSITION: raise SkipTest('Skipping check_estimators_data_not_an_array for cross decomposition module as estimators are not deterministic.') estimator_1 = clone(estimator_orig) estimator_2 = clone(estimator_orig) set_random_state(estimator_1) set_random_state(estimator_2) if obj_type not in ['NotAnArray', 'PandasDataframe']: raise ValueError('Data type {0} not supported'.format(obj_type)) if obj_type == 'NotAnArray': y_ = _NotAnArray(np.asarray(y)) X_ = _NotAnArray(np.asarray(X)) else: try: import pandas as pd y_ = np.asarray(y) if y_.ndim == 1: y_ = pd.Series(y_) else: y_ = pd.DataFrame(y_) X_ = pd.DataFrame(np.asarray(X)) except ImportError: raise SkipTest('pandas is not installed: not checking estimators for pandas objects.') estimator_1.fit(X_, y_) pred1 = estimator_1.predict(X_) estimator_2.fit(X, y) pred2 = estimator_2.predict(X) assert_allclose(pred1, pred2, atol=0.01, err_msg=name) </DeepExtract>
@ignore_warnings(category=FutureWarning) def check_classifier_data_not_an_array(name, estimator_orig): X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1], [0, 3], [1, 0], [2, 0], [4, 4], [2, 3], [3, 2]]) <DeepExtract> if '1darray' in _safe_tags(estimator_orig, key='X_types'): X = X[:, 0] if _safe_tags(estimator_orig, key='requires_positive_X'): X = X - X.min() if 'categorical' in _safe_tags(estimator_orig, key='X_types'): X = (X - X.min()).astype(np.int32) if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler': X = X - X.min() if _is_pairwise_metric(estimator_orig): X = pairwise_distances(X, metric='euclidean') elif _safe_tags(estimator_orig, key='pairwise'): X = kernel(X, X) X = X </DeepExtract> y = np.array([1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2]) <DeepExtract> if _safe_tags(estimator_orig, key='requires_positive_y'): y += 1 + abs(y.min()) if _safe_tags(estimator_orig, key='binary_only') and y.size > 0: y = np.where(y == y.flat[0], y, y.flat[0] + 1) if _safe_tags(estimator_orig, key='multioutput_only'): y = np.reshape(y, (-1, 1)) y = y </DeepExtract> for obj_type in ['NotAnArray', 'PandasDataframe']: <DeepExtract> if name in CROSS_DECOMPOSITION: raise SkipTest('Skipping check_estimators_data_not_an_array for cross decomposition module as estimators are not deterministic.') estimator_1 = clone(estimator_orig) estimator_2 = clone(estimator_orig) set_random_state(estimator_1) set_random_state(estimator_2) if obj_type not in ['NotAnArray', 'PandasDataframe']: raise ValueError('Data type {0} not supported'.format(obj_type)) if obj_type == 'NotAnArray': y_ = _NotAnArray(np.asarray(y)) X_ = _NotAnArray(np.asarray(X)) else: try: import pandas as pd y_ = np.asarray(y) if y_.ndim == 1: y_ = pd.Series(y_) else: y_ = pd.DataFrame(y_) X_ = pd.DataFrame(np.asarray(X)) except ImportError: raise SkipTest('pandas is not installed: not checking estimators for pandas objects.') estimator_1.fit(X_, y_) pred1 = estimator_1.predict(X_) estimator_2.fit(X, y) pred2 = estimator_2.predict(X) assert_allclose(pred1, pred2, atol=0.01, err_msg=name) </DeepExtract>
def smacof(dissimilarities, *, metric=True, n_components=2, init=None, n_init=8, n_jobs=None, max_iter=300, verbose=0, eps=0.001, random_state=None, return_n_iter=False, normalized_stress='warn'): """Compute multidimensional scaling using the SMACOF algorithm. The SMACOF (Scaling by MAjorizing a COmplicated Function) algorithm is a multidimensional scaling algorithm which minimizes an objective function (the *stress*) using a majorization technique. Stress majorization, also known as the Guttman Transform, guarantees a monotone convergence of stress, and is more powerful than traditional techniques such as gradient descent. The SMACOF algorithm for metric MDS can be summarized by the following steps: 1. Set an initial start configuration, randomly or not. 2. Compute the stress 3. Compute the Guttman Transform 4. Iterate 2 and 3 until convergence. The nonmetric algorithm adds a monotonic regression step before computing the stress. Parameters ---------- dissimilarities : ndarray of shape (n_samples, n_samples) Pairwise dissimilarities between the points. Must be symmetric. metric : bool, default=True Compute metric or nonmetric SMACOF algorithm. When ``False`` (i.e. non-metric MDS), dissimilarities with 0 are considered as missing values. n_components : int, default=2 Number of dimensions in which to immerse the dissimilarities. If an ``init`` array is provided, this option is overridden and the shape of ``init`` is used to determine the dimensionality of the embedding space. init : ndarray of shape (n_samples, n_components), default=None Starting configuration of the embedding to initialize the algorithm. By default, the algorithm is initialized with a randomly chosen array. n_init : int, default=8 Number of times the SMACOF algorithm will be run with different initializations. The final results will be the best output of the runs, determined by the run with the smallest final stress. If ``init`` is provided, this option is overridden and a single run is performed. n_jobs : int, default=None The number of jobs to use for the computation. If multiple initializations are used (``n_init``), each run of the algorithm is computed in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. max_iter : int, default=300 Maximum number of iterations of the SMACOF algorithm for a single run. verbose : int, default=0 Level of verbosity. eps : float, default=1e-3 Relative tolerance with respect to stress at which to declare convergence. The value of `eps` should be tuned separately depending on whether or not `normalized_stress` is being used. random_state : int, RandomState instance or None, default=None Determines the random number generator used to initialize the centers. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. return_n_iter : bool, default=False Whether or not to return the number of iterations. normalized_stress : bool or "auto" default=False Whether use and return normed stress value (Stress-1) instead of raw stress calculated by default. Only supported in non-metric MDS. .. versionadded:: 1.2 Returns ------- X : ndarray of shape (n_samples, n_components) Coordinates of the points in a ``n_components``-space. stress : float The final value of the stress (sum of squared distance of the disparities and the distances for all constrained points). If `normalized_stress=True`, and `metric=False` returns Stress-1. A value of 0 indicates "perfect" fit, 0.025 excellent, 0.05 good, 0.1 fair, and 0.2 poor [1]_. n_iter : int The number of iterations corresponding to the best stress. Returned only if ``return_n_iter`` is set to ``True``. References ---------- .. [1] "Nonmetric multidimensional scaling: a numerical method" Kruskal, J. Psychometrika, 29 (1964) .. [2] "Multidimensional scaling by optimizing goodness of fit to a nonmetric hypothesis" Kruskal, J. Psychometrika, 29, (1964) .. [3] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.; Groenen P. Springer Series in Statistics (1997) """ dissimilarities = check_array(dissimilarities) random_state = check_random_state(random_state) if normalized_stress == 'warn': warnings.warn("The default value of `normalized_stress` will change to `'auto'` in version 1.4. To suppress this warning, manually set the value of `normalized_stress`.", FutureWarning) normalized_stress = False if normalized_stress == 'auto': normalized_stress = not metric if normalized_stress and metric: raise ValueError('Normalized stress is not supported for metric MDS. Either set `normalized_stress=False` or use `metric=False`.') if hasattr(init, '__array__'): init = np.asarray(init).copy() if not n_init == 1: warnings.warn('Explicit initial positions passed: performing only one init of the MDS instead of %d' % n_init) n_init = 1 (best_pos, best_stress) = (None, None) if effective_n_jobs(n_jobs) == 1: for it in range(n_init): dissimilarities = check_symmetric(dissimilarities, raise_exception=True) n_samples = dissimilarities.shape[0] random_state = check_random_state(random_state) sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel() sim_flat_w = sim_flat[sim_flat != 0] if init is None: X = random_state.uniform(size=n_samples * n_components) X = X.reshape((n_samples, n_components)) else: n_components = init.shape[1] if n_samples != init.shape[0]: raise ValueError('init matrix should be of shape (%d, %d)' % (n_samples, n_components)) X = init old_stress = None ir = IsotonicRegression() for it in range(max_iter): dis = euclidean_distances(X) if metric: disparities = dissimilarities else: dis_flat = dis.ravel() dis_flat_w = dis_flat[sim_flat != 0] disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w) disparities = dis_flat.copy() disparities[sim_flat != 0] = disparities_flat disparities = disparities.reshape((n_samples, n_samples)) disparities *= np.sqrt(n_samples * (n_samples - 1) / 2 / (disparities ** 2).sum()) stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2 if normalized_stress: stress = np.sqrt(stress / ((disparities.ravel() ** 2).sum() / 2)) dis[dis == 0] = 1e-05 ratio = disparities / dis B = -ratio B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1) X = 1.0 / n_samples * np.dot(B, X) dis = np.sqrt((X ** 2).sum(axis=1)).sum() if verbose >= 2: print('it: %d, stress %s' % (it, stress)) if old_stress is not None: if old_stress - stress / dis < eps: if verbose: print('breaking at iteration %d with stress %s' % (it, stress)) break old_stress = stress / dis (pos, stress, n_iter_) = (X, stress, it + 1) if best_stress is None or stress < best_stress: best_stress = stress best_pos = pos.copy() best_iter = n_iter_ else: seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))((delayed(_smacof_single)(dissimilarities, metric=metric, n_components=n_components, init=init, max_iter=max_iter, verbose=verbose, eps=eps, random_state=seed, normalized_stress=normalized_stress) for seed in seeds)) (positions, stress, n_iters) = zip(*results) best = np.argmin(stress) best_stress = stress[best] best_pos = positions[best] best_iter = n_iters[best] if return_n_iter: return (best_pos, best_stress, best_iter) else: return (best_pos, best_stress)
def smacof(dissimilarities, *, metric=True, n_components=2, init=None, n_init=8, n_jobs=None, max_iter=300, verbose=0, eps=0.001, random_state=None, return_n_iter=False, normalized_stress='warn'): """Compute multidimensional scaling using the SMACOF algorithm. The SMACOF (Scaling by MAjorizing a COmplicated Function) algorithm is a multidimensional scaling algorithm which minimizes an objective function (the *stress*) using a majorization technique. Stress majorization, also known as the Guttman Transform, guarantees a monotone convergence of stress, and is more powerful than traditional techniques such as gradient descent. The SMACOF algorithm for metric MDS can be summarized by the following steps: 1. Set an initial start configuration, randomly or not. 2. Compute the stress 3. Compute the Guttman Transform 4. Iterate 2 and 3 until convergence. The nonmetric algorithm adds a monotonic regression step before computing the stress. Parameters ---------- dissimilarities : ndarray of shape (n_samples, n_samples) Pairwise dissimilarities between the points. Must be symmetric. metric : bool, default=True Compute metric or nonmetric SMACOF algorithm. When ``False`` (i.e. non-metric MDS), dissimilarities with 0 are considered as missing values. n_components : int, default=2 Number of dimensions in which to immerse the dissimilarities. If an ``init`` array is provided, this option is overridden and the shape of ``init`` is used to determine the dimensionality of the embedding space. init : ndarray of shape (n_samples, n_components), default=None Starting configuration of the embedding to initialize the algorithm. By default, the algorithm is initialized with a randomly chosen array. n_init : int, default=8 Number of times the SMACOF algorithm will be run with different initializations. The final results will be the best output of the runs, determined by the run with the smallest final stress. If ``init`` is provided, this option is overridden and a single run is performed. n_jobs : int, default=None The number of jobs to use for the computation. If multiple initializations are used (``n_init``), each run of the algorithm is computed in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. max_iter : int, default=300 Maximum number of iterations of the SMACOF algorithm for a single run. verbose : int, default=0 Level of verbosity. eps : float, default=1e-3 Relative tolerance with respect to stress at which to declare convergence. The value of `eps` should be tuned separately depending on whether or not `normalized_stress` is being used. random_state : int, RandomState instance or None, default=None Determines the random number generator used to initialize the centers. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. return_n_iter : bool, default=False Whether or not to return the number of iterations. normalized_stress : bool or "auto" default=False Whether use and return normed stress value (Stress-1) instead of raw stress calculated by default. Only supported in non-metric MDS. .. versionadded:: 1.2 Returns ------- X : ndarray of shape (n_samples, n_components) Coordinates of the points in a ``n_components``-space. stress : float The final value of the stress (sum of squared distance of the disparities and the distances for all constrained points). If `normalized_stress=True`, and `metric=False` returns Stress-1. A value of 0 indicates "perfect" fit, 0.025 excellent, 0.05 good, 0.1 fair, and 0.2 poor [1]_. n_iter : int The number of iterations corresponding to the best stress. Returned only if ``return_n_iter`` is set to ``True``. References ---------- .. [1] "Nonmetric multidimensional scaling: a numerical method" Kruskal, J. Psychometrika, 29 (1964) .. [2] "Multidimensional scaling by optimizing goodness of fit to a nonmetric hypothesis" Kruskal, J. Psychometrika, 29, (1964) .. [3] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.; Groenen P. Springer Series in Statistics (1997) """ dissimilarities = check_array(dissimilarities) random_state = check_random_state(random_state) if normalized_stress == 'warn': warnings.warn("The default value of `normalized_stress` will change to `'auto'` in version 1.4. To suppress this warning, manually set the value of `normalized_stress`.", FutureWarning) normalized_stress = False if normalized_stress == 'auto': normalized_stress = not metric if normalized_stress and metric: raise ValueError('Normalized stress is not supported for metric MDS. Either set `normalized_stress=False` or use `metric=False`.') if hasattr(init, '__array__'): init = np.asarray(init).copy() if not n_init == 1: warnings.warn('Explicit initial positions passed: performing only one init of the MDS instead of %d' % n_init) n_init = 1 (best_pos, best_stress) = (None, None) if effective_n_jobs(n_jobs) == 1: for it in range(n_init): <DeepExtract> dissimilarities = check_symmetric(dissimilarities, raise_exception=True) n_samples = dissimilarities.shape[0] random_state = check_random_state(random_state) sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel() sim_flat_w = sim_flat[sim_flat != 0] if init is None: X = random_state.uniform(size=n_samples * n_components) X = X.reshape((n_samples, n_components)) else: n_components = init.shape[1] if n_samples != init.shape[0]: raise ValueError('init matrix should be of shape (%d, %d)' % (n_samples, n_components)) X = init old_stress = None ir = IsotonicRegression() for it in range(max_iter): dis = euclidean_distances(X) if metric: disparities = dissimilarities else: dis_flat = dis.ravel() dis_flat_w = dis_flat[sim_flat != 0] disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w) disparities = dis_flat.copy() disparities[sim_flat != 0] = disparities_flat disparities = disparities.reshape((n_samples, n_samples)) disparities *= np.sqrt(n_samples * (n_samples - 1) / 2 / (disparities ** 2).sum()) stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2 if normalized_stress: stress = np.sqrt(stress / ((disparities.ravel() ** 2).sum() / 2)) dis[dis == 0] = 1e-05 ratio = disparities / dis B = -ratio B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1) X = 1.0 / n_samples * np.dot(B, X) dis = np.sqrt((X ** 2).sum(axis=1)).sum() if verbose >= 2: print('it: %d, stress %s' % (it, stress)) if old_stress is not None: if old_stress - stress / dis < eps: if verbose: print('breaking at iteration %d with stress %s' % (it, stress)) break old_stress = stress / dis (pos, stress, n_iter_) = (X, stress, it + 1) </DeepExtract> if best_stress is None or stress < best_stress: best_stress = stress best_pos = pos.copy() best_iter = n_iter_ else: seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))((delayed(_smacof_single)(dissimilarities, metric=metric, n_components=n_components, init=init, max_iter=max_iter, verbose=verbose, eps=eps, random_state=seed, normalized_stress=normalized_stress) for seed in seeds)) (positions, stress, n_iters) = zip(*results) best = np.argmin(stress) best_stress = stress[best] best_pos = positions[best] best_iter = n_iters[best] if return_n_iter: return (best_pos, best_stress, best_iter) else: return (best_pos, best_stress)
@pytest.mark.parametrize('klass', [SGDOneClassSVM, SparseSGDOneClassSVM]) def test_average_sparse_oneclass(klass): eta = 0.001 nu = 0.01 clf = klass(learning_rate='constant', eta0=eta, nu=nu, fit_intercept=True, max_iter=1, average=True, shuffle=False) n_samples = X3.shape[0] clf.partial_fit(X3[:int(n_samples / 2)]) clf.partial_fit(X3[int(n_samples / 2):]) if coef_init is None: coef = np.zeros(X3.shape[1]) else: coef = coef_init average_coef = np.zeros(X3.shape[1]) offset = offset_init intercept = 1 - offset average_intercept = 0.0 decay = 1.0 if klass == SparseSGDOneClassSVM: decay = 0.01 for (i, entry) in enumerate(X3): p = np.dot(entry, coef) p += intercept if p <= 1.0: gradient = -1 else: gradient = 0 coef *= max(0, 1.0 - eta * nu / 2) coef += -(eta * gradient * entry) intercept += -(eta * (nu + gradient)) * decay average_coef *= i average_coef += coef average_coef /= i + 1.0 average_intercept *= i average_intercept += intercept average_intercept /= i + 1.0 (average_coef, average_offset) = (average_coef, 1 - average_intercept) assert_allclose(clf.coef_, average_coef) assert_allclose(clf.offset_, average_offset)
@pytest.mark.parametrize('klass', [SGDOneClassSVM, SparseSGDOneClassSVM]) def test_average_sparse_oneclass(klass): eta = 0.001 nu = 0.01 clf = klass(learning_rate='constant', eta0=eta, nu=nu, fit_intercept=True, max_iter=1, average=True, shuffle=False) n_samples = X3.shape[0] clf.partial_fit(X3[:int(n_samples / 2)]) clf.partial_fit(X3[int(n_samples / 2):]) <DeepExtract> if coef_init is None: coef = np.zeros(X3.shape[1]) else: coef = coef_init average_coef = np.zeros(X3.shape[1]) offset = offset_init intercept = 1 - offset average_intercept = 0.0 decay = 1.0 if klass == SparseSGDOneClassSVM: decay = 0.01 for (i, entry) in enumerate(X3): p = np.dot(entry, coef) p += intercept if p <= 1.0: gradient = -1 else: gradient = 0 coef *= max(0, 1.0 - eta * nu / 2) coef += -(eta * gradient * entry) intercept += -(eta * (nu + gradient)) * decay average_coef *= i average_coef += coef average_coef /= i + 1.0 average_intercept *= i average_intercept += intercept average_intercept /= i + 1.0 (average_coef, average_offset) = (average_coef, 1 - average_intercept) </DeepExtract> assert_allclose(clf.coef_, average_coef) assert_allclose(clf.offset_, average_offset)
def plot_results(ax1, ax2, estimator, X, y, title, plot_title=False): ax1.set_title(title) ax1.scatter(X[:, 0], X[:, 1], s=5, marker='o', color=colors[y], alpha=0.8) ax1.set_xlim(-2.0, 2.0) ax1.set_ylim(-3.0, 3.0) ax1.set_xticks(()) ax1.set_yticks(()) for n in range(estimator.means_.shape[0]): (eig_vals, eig_vecs) = np.linalg.eigh(estimator.covariances_[n]) unit_eig_vec = eig_vecs[0] / np.linalg.norm(eig_vecs[0]) angle = np.arctan2(unit_eig_vec[1], unit_eig_vec[0]) angle = 180 * angle / np.pi eig_vals = 2 * np.sqrt(2) * np.sqrt(eig_vals) ell = mpl.patches.Ellipse(estimator.means_[n], eig_vals[0], eig_vals[1], angle=180 + angle, edgecolor='black') ell.set_clip_box(ax1.bbox) ell.set_alpha(estimator.weights_[n]) ell.set_facecolor('#56B4E9') ax1.add_artist(ell) ax2.get_xaxis().set_tick_params(direction='out') ax2.yaxis.grid(True, alpha=0.7) for (k, w) in enumerate(estimator.weights_): ax2.bar(k, w, width=0.9, color='#56B4E9', zorder=3, align='center', edgecolor='black') ax2.text(k, w + 0.007, '%.1f%%' % (w * 100.0), horizontalalignment='center') ax2.set_xlim(-0.6, 2 * n_components - 0.4) ax2.set_ylim(0.0, 1.1) ax2.tick_params(axis='y', which='both', left=False, right=False, labelleft=False) ax2.tick_params(axis='x', which='both', top=False) if plot_title: ax1.set_ylabel('Estimated Mixtures') ax2.set_ylabel('Weight of each component')
def plot_results(ax1, ax2, estimator, X, y, title, plot_title=False): ax1.set_title(title) ax1.scatter(X[:, 0], X[:, 1], s=5, marker='o', color=colors[y], alpha=0.8) ax1.set_xlim(-2.0, 2.0) ax1.set_ylim(-3.0, 3.0) ax1.set_xticks(()) ax1.set_yticks(()) <DeepExtract> for n in range(estimator.means_.shape[0]): (eig_vals, eig_vecs) = np.linalg.eigh(estimator.covariances_[n]) unit_eig_vec = eig_vecs[0] / np.linalg.norm(eig_vecs[0]) angle = np.arctan2(unit_eig_vec[1], unit_eig_vec[0]) angle = 180 * angle / np.pi eig_vals = 2 * np.sqrt(2) * np.sqrt(eig_vals) ell = mpl.patches.Ellipse(estimator.means_[n], eig_vals[0], eig_vals[1], angle=180 + angle, edgecolor='black') ell.set_clip_box(ax1.bbox) ell.set_alpha(estimator.weights_[n]) ell.set_facecolor('#56B4E9') ax1.add_artist(ell) </DeepExtract> ax2.get_xaxis().set_tick_params(direction='out') ax2.yaxis.grid(True, alpha=0.7) for (k, w) in enumerate(estimator.weights_): ax2.bar(k, w, width=0.9, color='#56B4E9', zorder=3, align='center', edgecolor='black') ax2.text(k, w + 0.007, '%.1f%%' % (w * 100.0), horizontalalignment='center') ax2.set_xlim(-0.6, 2 * n_components - 0.4) ax2.set_ylim(0.0, 1.1) ax2.tick_params(axis='y', which='both', left=False, right=False, labelleft=False) ax2.tick_params(axis='x', which='both', top=False) if plot_title: ax1.set_ylabel('Estimated Mixtures') ax2.set_ylabel('Weight of each component')
@pytest.mark.parametrize('missing_value', [None, np.nan, float('nan')]) @pytest.mark.parametrize('pickle_uniques', [True, False]) def test_check_unknown_missing_values(missing_value, pickle_uniques): values = np.array(['d', 'c', 'a', 'b', missing_value], dtype=object) uniques = np.array(['c', 'a', 'b', missing_value], dtype=object) if pickle_uniques: uniques = pickle.loads(pickle.dumps(uniques)) expected_diff = ['d'] expected_mask = [False, True, True, True, True] diff = _check_unknown(values, uniques) assert_array_equal(diff, expected_diff) (diff, valid_mask) = _check_unknown(values, uniques, return_mask=True) assert_array_equal(diff, expected_diff) assert_array_equal(valid_mask, expected_mask) values = np.array(['d', 'c', 'a', 'b', missing_value], dtype=object) uniques = np.array(['c', 'a', 'b'], dtype=object) if pickle_uniques: uniques = pickle.loads(pickle.dumps(uniques)) expected_diff = ['d', missing_value] expected_mask = [False, True, True, True, False] diff = _check_unknown(values, uniques) assert_array_equal(diff, expected_diff) (diff, valid_mask) = _check_unknown(values, uniques, return_mask=True) assert_array_equal(diff, expected_diff) assert_array_equal(valid_mask, expected_mask) values = np.array(['a', missing_value], dtype=object) uniques = np.array(['a', 'b', 'z'], dtype=object) if pickle_uniques: uniques = pickle.loads(pickle.dumps(uniques)) expected_diff = [missing_value] expected_mask = [True, False] diff = _check_unknown(values, uniques) assert_array_equal(diff, expected_diff) (diff, valid_mask) = _check_unknown(values, uniques, return_mask=True) assert_array_equal(diff, expected_diff) assert_array_equal(valid_mask, expected_mask) </DeepExtract>
@pytest.mark.parametrize('missing_value', [None, np.nan, float('nan')]) @pytest.mark.parametrize('pickle_uniques', [True, False]) def test_check_unknown_missing_values(missing_value, pickle_uniques): values = np.array(['d', 'c', 'a', 'b', missing_value], dtype=object) uniques = np.array(['c', 'a', 'b', missing_value], dtype=object) if pickle_uniques: uniques = pickle.loads(pickle.dumps(uniques)) expected_diff = ['d'] expected_mask = [False, True, True, True, True] <DeepExtract> diff = _check_unknown(values, uniques) assert_array_equal(diff, expected_diff) (diff, valid_mask) = _check_unknown(values, uniques, return_mask=True) assert_array_equal(diff, expected_diff) assert_array_equal(valid_mask, expected_mask) </DeepExtract> values = np.array(['d', 'c', 'a', 'b', missing_value], dtype=object) uniques = np.array(['c', 'a', 'b'], dtype=object) if pickle_uniques: uniques = pickle.loads(pickle.dumps(uniques)) expected_diff = ['d', missing_value] expected_mask = [False, True, True, True, False] <DeepExtract> diff = _check_unknown(values, uniques) assert_array_equal(diff, expected_diff) (diff, valid_mask) = _check_unknown(values, uniques, return_mask=True) assert_array_equal(diff, expected_diff) assert_array_equal(valid_mask, expected_mask) </DeepExtract> values = np.array(['a', missing_value], dtype=object) uniques = np.array(['a', 'b', 'z'], dtype=object) if pickle_uniques: uniques = pickle.loads(pickle.dumps(uniques)) expected_diff = [missing_value] expected_mask = [True, False] <DeepExtract> diff = _check_unknown(values, uniques) assert_array_equal(diff, expected_diff) (diff, valid_mask) = _check_unknown(values, uniques, return_mask=True) assert_array_equal(diff, expected_diff) assert_array_equal(valid_mask, expected_mask) </DeepExtract>
def __init__(self): super(LineEnd, self).__init__() self.skipWhitespace = True self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS.replace('\n', '') self.copyDefaultWhiteChars = False return self self.errmsg = 'Expected end of line'
def __init__(self): super(LineEnd, self).__init__() <DeepExtract> self.skipWhitespace = True self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS.replace('\n', '') self.copyDefaultWhiteChars = False return self </DeepExtract> self.errmsg = 'Expected end of line'
def update(self, event, model): if event == 'examples_loaded': for i in range(len(model.data)): (x, y, l) = model.data[i] if l == 1: color = 'w' elif l == -1: color = 'k' self.ax.plot([x], [y], '%so' % color, scalex=0.0, scaley=0.0) if event == 'example_added': (x, y, l) = model.data[-1] if l == 1: color = 'w' elif l == -1: color = 'k' self.ax.plot([x], [y], '%so' % color, scalex=0.0, scaley=0.0) if event == 'clear': self.ax.clear() self.ax.set_xticks([]) self.ax.set_yticks([]) self.contours = [] self.c_labels = None self.ax.text(-50, -60, 'Linear: $u^T v$') self.ax.text(-20, -60, 'RBF: $\\exp (-\\gamma \\| u-v \\|^2)$') self.ax.text(10, -60, 'Poly: $(\\gamma \\, u^T v + r)^d$') if event == 'surface': if len(self.contours) > 0: for contour in self.contours: if isinstance(contour, ContourSet): for lineset in contour.collections: lineset.remove() else: contour.remove() self.contours = [] cs = self.ax.scatter(model.clf.support_vectors_[:, 0], model.clf.support_vectors_[:, 1], s=80, edgecolors='k', facecolors='none') self.contours.append(cs) (X1, X2, Z) = model.surface if model.surface_type == 0: levels = [-1.0, 0.0, 1.0] linestyles = ['dashed', 'solid', 'dashed'] colors = 'k' self.contours.append(self.ax.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)) elif model.surface_type == 1: self.contours.append(self.ax.contourf(X1, X2, Z, 10, cmap=matplotlib.cm.bone, origin='lower', alpha=0.85)) self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k', linestyles=['solid'])) else: raise ValueError('surface type unknown') self.canvas.draw()
def update(self, event, model): if event == 'examples_loaded': for i in range(len(model.data)): <DeepExtract> (x, y, l) = model.data[i] if l == 1: color = 'w' elif l == -1: color = 'k' self.ax.plot([x], [y], '%so' % color, scalex=0.0, scaley=0.0) </DeepExtract> if event == 'example_added': <DeepExtract> (x, y, l) = model.data[-1] if l == 1: color = 'w' elif l == -1: color = 'k' self.ax.plot([x], [y], '%so' % color, scalex=0.0, scaley=0.0) </DeepExtract> if event == 'clear': self.ax.clear() self.ax.set_xticks([]) self.ax.set_yticks([]) self.contours = [] self.c_labels = None <DeepExtract> self.ax.text(-50, -60, 'Linear: $u^T v$') self.ax.text(-20, -60, 'RBF: $\\exp (-\\gamma \\| u-v \\|^2)$') self.ax.text(10, -60, 'Poly: $(\\gamma \\, u^T v + r)^d$') </DeepExtract> if event == 'surface': <DeepExtract> if len(self.contours) > 0: for contour in self.contours: if isinstance(contour, ContourSet): for lineset in contour.collections: lineset.remove() else: contour.remove() self.contours = [] </DeepExtract> <DeepExtract> cs = self.ax.scatter(model.clf.support_vectors_[:, 0], model.clf.support_vectors_[:, 1], s=80, edgecolors='k', facecolors='none') self.contours.append(cs) </DeepExtract> <DeepExtract> (X1, X2, Z) = model.surface if model.surface_type == 0: levels = [-1.0, 0.0, 1.0] linestyles = ['dashed', 'solid', 'dashed'] colors = 'k' self.contours.append(self.ax.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)) elif model.surface_type == 1: self.contours.append(self.ax.contourf(X1, X2, Z, 10, cmap=matplotlib.cm.bone, origin='lower', alpha=0.85)) self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k', linestyles=['solid'])) else: raise ValueError('surface type unknown') </DeepExtract> self.canvas.draw()
def predict_log_proba(self, X): """Predict class log-probabilities for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- p : ndarray of shape (n_samples, n_classes) The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`. Raises ------ AttributeError If the ``loss`` does not support probabilities. """ raw_predictions = self.decision_function(X) try: proba = self._loss._raw_prediction_to_proba(raw_predictions) except NotFittedError: raise except AttributeError as e: raise AttributeError('loss=%r does not support predict_proba' % self.loss) from e return np.log(proba)
def predict_log_proba(self, X): """Predict class log-probabilities for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- p : ndarray of shape (n_samples, n_classes) The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`. Raises ------ AttributeError If the ``loss`` does not support probabilities. """ <DeepExtract> raw_predictions = self.decision_function(X) try: proba = self._loss._raw_prediction_to_proba(raw_predictions) except NotFittedError: raise except AttributeError as e: raise AttributeError('loss=%r does not support predict_proba' % self.loss) from e </DeepExtract> return np.log(proba)
def adjusted_rand_score(labels_true, labels_pred): """Rand index adjusted for chance. The Rand Index computes a similarity measure between two clusterings by considering all pairs of samples and counting pairs that are assigned in the same or different clusters in the predicted and true clusterings. The raw RI score is then "adjusted for chance" into the ARI score using the following scheme:: ARI = (RI - Expected_RI) / (max(RI) - Expected_RI) The adjusted Rand index is thus ensured to have a value close to 0.0 for random labeling independently of the number of clusters and samples and exactly 1.0 when the clusterings are identical (up to a permutation). The adjusted Rand index is bounded below by -0.5 for especially discordant clusterings. ARI is a symmetric measure:: adjusted_rand_score(a, b) == adjusted_rand_score(b, a) Read more in the :ref:`User Guide <adjusted_rand_score>`. Parameters ---------- labels_true : int array, shape = [n_samples] Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,) Cluster labels to evaluate. Returns ------- ARI : float Similarity score between -0.5 and 1.0. Random labelings have an ARI close to 0.0. 1.0 stands for perfect match. See Also -------- adjusted_mutual_info_score : Adjusted Mutual Information. References ---------- .. [Hubert1985] L. Hubert and P. Arabie, Comparing Partitions, Journal of Classification 1985 https://link.springer.com/article/10.1007%2FBF01908075 .. [Steinley2004] D. Steinley, Properties of the Hubert-Arabie adjusted Rand index, Psychological Methods 2004 .. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index .. [Chacon] :doi:`Minimum adjusted Rand index for two clusterings of a given size, 2022, J. E. Chacón and A. I. Rastrojo <10.1007/s11634-022-00491-w>` Examples -------- Perfectly matching labelings have a score of 1 even >>> from sklearn.metrics.cluster import adjusted_rand_score >>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1]) 1.0 >>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0]) 1.0 Labelings that assign all classes members to the same clusters are complete but may not always be pure, hence penalized:: >>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) 0.57... ARI is symmetric, so labelings that have pure clusters with members coming from the same classes but unnecessary splits are penalized:: >>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) 0.57... If classes members are completely split across different clusters, the assignment is totally incomplete, hence the ARI is very low:: >>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3]) 0.0 ARI may take a negative value for especially discordant labelings that are a worse choice than the expected value of random labels:: >>> adjusted_rand_score([0, 0, 1, 1], [0, 1, 0, 1]) -0.5 """ (labels_true, labels_pred) = check_clusterings(labels_true, labels_pred) n_samples = np.int64(labels_true.shape[0]) contingency = contingency_matrix(labels_true, labels_pred, sparse=True, dtype=np.int64) n_c = np.ravel(contingency.sum(axis=1)) n_k = np.ravel(contingency.sum(axis=0)) sum_squares = (contingency.data ** 2).sum() C = np.empty((2, 2), dtype=np.int64) C[1, 1] = sum_squares - n_samples C[0, 1] = contingency.dot(n_k).sum() - sum_squares C[1, 0] = contingency.transpose().dot(n_c).sum() - sum_squares C[0, 0] = n_samples ** 2 - C[0, 1] - C[1, 0] - sum_squares ((tn, fp), (fn, tp)) = C (tn, fp, fn, tp) = (int(tn), int(fp), int(fn), int(tp)) if fn == 0 and fp == 0: return 1.0 return 2.0 * (tp * tn - fn * fp) / ((tp + fn) * (fn + tn) + (tp + fp) * (fp + tn))
def adjusted_rand_score(labels_true, labels_pred): """Rand index adjusted for chance. The Rand Index computes a similarity measure between two clusterings by considering all pairs of samples and counting pairs that are assigned in the same or different clusters in the predicted and true clusterings. The raw RI score is then "adjusted for chance" into the ARI score using the following scheme:: ARI = (RI - Expected_RI) / (max(RI) - Expected_RI) The adjusted Rand index is thus ensured to have a value close to 0.0 for random labeling independently of the number of clusters and samples and exactly 1.0 when the clusterings are identical (up to a permutation). The adjusted Rand index is bounded below by -0.5 for especially discordant clusterings. ARI is a symmetric measure:: adjusted_rand_score(a, b) == adjusted_rand_score(b, a) Read more in the :ref:`User Guide <adjusted_rand_score>`. Parameters ---------- labels_true : int array, shape = [n_samples] Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,) Cluster labels to evaluate. Returns ------- ARI : float Similarity score between -0.5 and 1.0. Random labelings have an ARI close to 0.0. 1.0 stands for perfect match. See Also -------- adjusted_mutual_info_score : Adjusted Mutual Information. References ---------- .. [Hubert1985] L. Hubert and P. Arabie, Comparing Partitions, Journal of Classification 1985 https://link.springer.com/article/10.1007%2FBF01908075 .. [Steinley2004] D. Steinley, Properties of the Hubert-Arabie adjusted Rand index, Psychological Methods 2004 .. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index .. [Chacon] :doi:`Minimum adjusted Rand index for two clusterings of a given size, 2022, J. E. Chacón and A. I. Rastrojo <10.1007/s11634-022-00491-w>` Examples -------- Perfectly matching labelings have a score of 1 even >>> from sklearn.metrics.cluster import adjusted_rand_score >>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1]) 1.0 >>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0]) 1.0 Labelings that assign all classes members to the same clusters are complete but may not always be pure, hence penalized:: >>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) 0.57... ARI is symmetric, so labelings that have pure clusters with members coming from the same classes but unnecessary splits are penalized:: >>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) 0.57... If classes members are completely split across different clusters, the assignment is totally incomplete, hence the ARI is very low:: >>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3]) 0.0 ARI may take a negative value for especially discordant labelings that are a worse choice than the expected value of random labels:: >>> adjusted_rand_score([0, 0, 1, 1], [0, 1, 0, 1]) -0.5 """ <DeepExtract> (labels_true, labels_pred) = check_clusterings(labels_true, labels_pred) n_samples = np.int64(labels_true.shape[0]) contingency = contingency_matrix(labels_true, labels_pred, sparse=True, dtype=np.int64) n_c = np.ravel(contingency.sum(axis=1)) n_k = np.ravel(contingency.sum(axis=0)) sum_squares = (contingency.data ** 2).sum() C = np.empty((2, 2), dtype=np.int64) C[1, 1] = sum_squares - n_samples C[0, 1] = contingency.dot(n_k).sum() - sum_squares C[1, 0] = contingency.transpose().dot(n_c).sum() - sum_squares C[0, 0] = n_samples ** 2 - C[0, 1] - C[1, 0] - sum_squares ((tn, fp), (fn, tp)) = C </DeepExtract> (tn, fp, fn, tp) = (int(tn), int(fp), int(fn), int(tp)) if fn == 0 and fp == 0: return 1.0 return 2.0 * (tp * tn - fn * fp) / ((tp + fn) * (fn + tn) + (tp + fp) * (fp + tn))
def transform(self, X): """ Transform X using one-hot encoding. If there are infrequent categories for a feature, the infrequent categories will be grouped into a single category. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to encode. Returns ------- X_out : {ndarray, sparse matrix} of shape (n_samples, n_encoded_features) Transformed input. If `sparse_output=True`, a sparse matrix will be returned. """ check_is_fitted(self) warn_on_unknown = self.drop is not None and self.handle_unknown in {'ignore', 'infrequent_if_exist'} self._check_feature_names(X, reset=False) self._check_n_features(X, reset=False) (X_list, n_samples, n_features) = self._check_X(X, force_all_finite='allow-nan') X_int = np.zeros((n_samples, n_features), dtype=int) X_mask = np.ones((n_samples, n_features), dtype=bool) columns_with_unknown = [] for i in range(n_features): Xi = X_list[i] (diff, valid_mask) = _check_unknown(Xi, self.categories_[i], return_mask=True) if not np.all(valid_mask): if self.handle_unknown == 'error': msg = 'Found unknown categories {0} in column {1} during transform'.format(diff, i) raise ValueError(msg) else: if warn_on_unknown: columns_with_unknown.append(i) X_mask[:, i] = valid_mask if self.categories_[i].dtype.kind in ('U', 'S') and self.categories_[i].itemsize > Xi.itemsize: Xi = Xi.astype(self.categories_[i].dtype) elif self.categories_[i].dtype.kind == 'O' and Xi.dtype.kind == 'U': Xi = Xi.astype('O') else: Xi = Xi.copy() Xi[~valid_mask] = self.categories_[i][0] X_int[:, i] = _encode(Xi, uniques=self.categories_[i], check_unknown=False) if columns_with_unknown: warnings.warn(f'Found unknown categories in columns {columns_with_unknown} during transform. These unknown categories will be encoded as all zeros', UserWarning) self._map_infrequent_categories(X_int, X_mask, ignore_category_indices) (X_int, X_mask) = (X_int, X_mask) (n_samples, n_features) = X_int.shape if self._drop_idx_after_grouping is not None: to_drop = self._drop_idx_after_grouping.copy() keep_cells = X_int != to_drop for (i, cats) in enumerate(self.categories_): if to_drop[i] is None: to_drop[i] = len(cats) to_drop = to_drop.reshape(1, -1) X_int[X_int > to_drop] -= 1 X_mask &= keep_cells mask = X_mask.ravel() feature_indices = np.cumsum([0] + self._n_features_outs) indices = (X_int + feature_indices[:-1]).ravel()[mask] indptr = np.empty(n_samples + 1, dtype=int) indptr[0] = 0 np.sum(X_mask, axis=1, out=indptr[1:], dtype=indptr.dtype) np.cumsum(indptr[1:], out=indptr[1:]) data = np.ones(indptr[-1]) out = sparse.csr_matrix((data, indices, indptr), shape=(n_samples, feature_indices[-1]), dtype=self.dtype) if not self.sparse_output: return out.toarray() else: return out
def transform(self, X): """ Transform X using one-hot encoding. If there are infrequent categories for a feature, the infrequent categories will be grouped into a single category. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to encode. Returns ------- X_out : {ndarray, sparse matrix} of shape (n_samples, n_encoded_features) Transformed input. If `sparse_output=True`, a sparse matrix will be returned. """ check_is_fitted(self) warn_on_unknown = self.drop is not None and self.handle_unknown in {'ignore', 'infrequent_if_exist'} <DeepExtract> self._check_feature_names(X, reset=False) self._check_n_features(X, reset=False) (X_list, n_samples, n_features) = self._check_X(X, force_all_finite='allow-nan') X_int = np.zeros((n_samples, n_features), dtype=int) X_mask = np.ones((n_samples, n_features), dtype=bool) columns_with_unknown = [] for i in range(n_features): Xi = X_list[i] (diff, valid_mask) = _check_unknown(Xi, self.categories_[i], return_mask=True) if not np.all(valid_mask): if self.handle_unknown == 'error': msg = 'Found unknown categories {0} in column {1} during transform'.format(diff, i) raise ValueError(msg) else: if warn_on_unknown: columns_with_unknown.append(i) X_mask[:, i] = valid_mask if self.categories_[i].dtype.kind in ('U', 'S') and self.categories_[i].itemsize > Xi.itemsize: Xi = Xi.astype(self.categories_[i].dtype) elif self.categories_[i].dtype.kind == 'O' and Xi.dtype.kind == 'U': Xi = Xi.astype('O') else: Xi = Xi.copy() Xi[~valid_mask] = self.categories_[i][0] X_int[:, i] = _encode(Xi, uniques=self.categories_[i], check_unknown=False) if columns_with_unknown: warnings.warn(f'Found unknown categories in columns {columns_with_unknown} during transform. These unknown categories will be encoded as all zeros', UserWarning) self._map_infrequent_categories(X_int, X_mask, ignore_category_indices) (X_int, X_mask) = (X_int, X_mask) </DeepExtract> (n_samples, n_features) = X_int.shape if self._drop_idx_after_grouping is not None: to_drop = self._drop_idx_after_grouping.copy() keep_cells = X_int != to_drop for (i, cats) in enumerate(self.categories_): if to_drop[i] is None: to_drop[i] = len(cats) to_drop = to_drop.reshape(1, -1) X_int[X_int > to_drop] -= 1 X_mask &= keep_cells mask = X_mask.ravel() feature_indices = np.cumsum([0] + self._n_features_outs) indices = (X_int + feature_indices[:-1]).ravel()[mask] indptr = np.empty(n_samples + 1, dtype=int) indptr[0] = 0 np.sum(X_mask, axis=1, out=indptr[1:], dtype=indptr.dtype) np.cumsum(indptr[1:], out=indptr[1:]) data = np.ones(indptr[-1]) out = sparse.csr_matrix((data, indices, indptr), shape=(n_samples, feature_indices[-1]), dtype=self.dtype) if not self.sparse_output: return out.toarray() else: return out
def fit(self, X, y=None, groups=None, **fit_params): """Run fit with all sets of parameters. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_output), optional Target relative to X for classification or regression; None for unsupervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`~sklearn.model_selection.GroupKFold`). **fit_params : dict of string -> object Parameters passed to the ``fit`` method of the estimator. Returns ------- self : object Instance of fitted estimator. """ self._validate_params() self._checked_cv_orig = check_cv(self.cv, y, classifier=is_classifier(self.estimator)) if not _yields_constant_splits(self._checked_cv_orig): raise ValueError('The cv parameter must yield consistent folds across calls to split(). Set its random_state to an int, or set shuffle=False.') if self.resource != 'n_samples' and self.resource not in self.estimator.get_params(): raise ValueError(f'Cannot use resource={self.resource} which is not supported by estimator {self.estimator.__class__.__name__}') if isinstance(self, HalvingRandomSearchCV): if self.min_resources == self.n_candidates == 'exhaust': raise ValueError("n_candidates and min_resources cannot be both set to 'exhaust'.") self.min_resources_ = self.min_resources if self.min_resources_ in ('smallest', 'exhaust'): if self.resource == 'n_samples': n_splits = self._checked_cv_orig.get_n_splits(X, y, groups) magic_factor = 2 self.min_resources_ = n_splits * magic_factor if is_classifier(self.estimator): y = self._validate_data(X='no_validation', y=y) check_classification_targets(y) n_classes = np.unique(y).shape[0] self.min_resources_ *= n_classes else: self.min_resources_ = 1 self.max_resources_ = self.max_resources if self.max_resources_ == 'auto': if not self.resource == 'n_samples': raise ValueError("resource can only be 'n_samples' when max_resources='auto'") self.max_resources_ = _num_samples(X) if self.min_resources_ > self.max_resources_: raise ValueError(f'min_resources_={self.min_resources_} is greater than max_resources_={self.max_resources_}.') if self.min_resources_ == 0: raise ValueError(f'min_resources_={self.min_resources_}: you might have passed an empty dataset X.') self._n_samples_orig = _num_samples(X) super().fit(X, y=y, groups=groups, **fit_params) self.best_score_ = self.cv_results_['mean_test_score'][self.best_index_] return self
def fit(self, X, y=None, groups=None, **fit_params): """Run fit with all sets of parameters. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_output), optional Target relative to X for classification or regression; None for unsupervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`~sklearn.model_selection.GroupKFold`). **fit_params : dict of string -> object Parameters passed to the ``fit`` method of the estimator. Returns ------- self : object Instance of fitted estimator. """ self._validate_params() self._checked_cv_orig = check_cv(self.cv, y, classifier=is_classifier(self.estimator)) <DeepExtract> if not _yields_constant_splits(self._checked_cv_orig): raise ValueError('The cv parameter must yield consistent folds across calls to split(). Set its random_state to an int, or set shuffle=False.') if self.resource != 'n_samples' and self.resource not in self.estimator.get_params(): raise ValueError(f'Cannot use resource={self.resource} which is not supported by estimator {self.estimator.__class__.__name__}') if isinstance(self, HalvingRandomSearchCV): if self.min_resources == self.n_candidates == 'exhaust': raise ValueError("n_candidates and min_resources cannot be both set to 'exhaust'.") self.min_resources_ = self.min_resources if self.min_resources_ in ('smallest', 'exhaust'): if self.resource == 'n_samples': n_splits = self._checked_cv_orig.get_n_splits(X, y, groups) magic_factor = 2 self.min_resources_ = n_splits * magic_factor if is_classifier(self.estimator): y = self._validate_data(X='no_validation', y=y) check_classification_targets(y) n_classes = np.unique(y).shape[0] self.min_resources_ *= n_classes else: self.min_resources_ = 1 self.max_resources_ = self.max_resources if self.max_resources_ == 'auto': if not self.resource == 'n_samples': raise ValueError("resource can only be 'n_samples' when max_resources='auto'") self.max_resources_ = _num_samples(X) if self.min_resources_ > self.max_resources_: raise ValueError(f'min_resources_={self.min_resources_} is greater than max_resources_={self.max_resources_}.') if self.min_resources_ == 0: raise ValueError(f'min_resources_={self.min_resources_}: you might have passed an empty dataset X.') </DeepExtract> self._n_samples_orig = _num_samples(X) super().fit(X, y=y, groups=groups, **fit_params) self.best_score_ = self.cv_results_['mean_test_score'][self.best_index_] return self
def test_fetch_openml_deprecation_parser(monkeypatch): """Check that we raise a deprecation warning for parser parameter.""" pytest.importorskip('pandas') data_id = 61 url_prefix_data_description = 'https://openml.org/api/v1/json/data/' url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/' url_prefix_download_data = 'https://openml.org/data/v1/' url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/' path_suffix = '.gz' read_fn = gzip.open data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}' def _file_name(url, suffix): output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act') def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix): assert url.startswith(expected_prefix) data_file_name = _file_name(url, suffix) with _open_binary(data_module, data_file_name) as f: if has_gzip_header and False: fp = BytesIO(f.read()) return _MockHTTPResponse(fp, True) else: decompressed_f = read_fn(f, 'rb') fp = BytesIO(decompressed_f.read()) return _MockHTTPResponse(fp, False) def _mock_urlopen_data_description(url, has_gzip_header): return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json') def _mock_urlopen_data_features(url, has_gzip_header): return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json') def _mock_urlopen_download_data(url, has_gzip_header): return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff') def _mock_urlopen_data_list(url, has_gzip_header): assert url.startswith(url_prefix_data_list) data_file_name = _file_name(url, '.json') with _open_binary(data_module, data_file_name) as f: decompressed_f = read_fn(f, 'rb') decoded_s = decompressed_f.read().decode('utf-8') json_data = json.loads(decoded_s) if 'error' in json_data: raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None) with _open_binary(data_module, data_file_name) as f: if has_gzip_header: fp = BytesIO(f.read()) return _MockHTTPResponse(fp, True) else: decompressed_f = read_fn(f, 'rb') fp = BytesIO(decompressed_f.read()) return _MockHTTPResponse(fp, False) def _mock_urlopen(request, *args, **kwargs): url = request.get_full_url() has_gzip_header = request.get_header('Accept-encoding') == 'gzip' if url.startswith(url_prefix_data_list): return _mock_urlopen_data_list(url, has_gzip_header) elif url.startswith(url_prefix_data_features): return _mock_urlopen_data_features(url, has_gzip_header) elif url.startswith(url_prefix_download_data): return _mock_urlopen_download_data(url, has_gzip_header) elif url.startswith(url_prefix_data_description): return _mock_urlopen_data_description(url, has_gzip_header) else: raise ValueError('Unknown mocking URL pattern: %s' % url) if test_offline: monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen) with pytest.warns(FutureWarning, match='The default value of `parser` will change'): sklearn.datasets.fetch_openml(data_id=data_id)
def test_fetch_openml_deprecation_parser(monkeypatch): """Check that we raise a deprecation warning for parser parameter.""" pytest.importorskip('pandas') data_id = 61 <DeepExtract> url_prefix_data_description = 'https://openml.org/api/v1/json/data/' url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/' url_prefix_download_data = 'https://openml.org/data/v1/' url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/' path_suffix = '.gz' read_fn = gzip.open data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}' def _file_name(url, suffix): output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act') def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix): assert url.startswith(expected_prefix) data_file_name = _file_name(url, suffix) with _open_binary(data_module, data_file_name) as f: if has_gzip_header and False: fp = BytesIO(f.read()) return _MockHTTPResponse(fp, True) else: decompressed_f = read_fn(f, 'rb') fp = BytesIO(decompressed_f.read()) return _MockHTTPResponse(fp, False) def _mock_urlopen_data_description(url, has_gzip_header): return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json') def _mock_urlopen_data_features(url, has_gzip_header): return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json') def _mock_urlopen_download_data(url, has_gzip_header): return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff') def _mock_urlopen_data_list(url, has_gzip_header): assert url.startswith(url_prefix_data_list) data_file_name = _file_name(url, '.json') with _open_binary(data_module, data_file_name) as f: decompressed_f = read_fn(f, 'rb') decoded_s = decompressed_f.read().decode('utf-8') json_data = json.loads(decoded_s) if 'error' in json_data: raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None) with _open_binary(data_module, data_file_name) as f: if has_gzip_header: fp = BytesIO(f.read()) return _MockHTTPResponse(fp, True) else: decompressed_f = read_fn(f, 'rb') fp = BytesIO(decompressed_f.read()) return _MockHTTPResponse(fp, False) def _mock_urlopen(request, *args, **kwargs): url = request.get_full_url() has_gzip_header = request.get_header('Accept-encoding') == 'gzip' if url.startswith(url_prefix_data_list): return _mock_urlopen_data_list(url, has_gzip_header) elif url.startswith(url_prefix_data_features): return _mock_urlopen_data_features(url, has_gzip_header) elif url.startswith(url_prefix_download_data): return _mock_urlopen_download_data(url, has_gzip_header) elif url.startswith(url_prefix_data_description): return _mock_urlopen_data_description(url, has_gzip_header) else: raise ValueError('Unknown mocking URL pattern: %s' % url) if test_offline: monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen) </DeepExtract> with pytest.warns(FutureWarning, match='The default value of `parser` will change'): sklearn.datasets.fetch_openml(data_id=data_id)
def plot_feature_errors(all_errors, batch_size, all_components, data): plt.figure() plt.plot(all_components, all_errors['pca'], label='PCA', marker='o') plt.plot(all_components, all_errors['ipca'], label='IncrementalPCA, bsize=%i' % batch_size, marker='o') plt.legend(loc='lower left') plt.suptitle('Algorithm error vs. n_components\nLFW, size %i x %i' % data.shape) plt.xlabel('Number of components (out of max %i)' % data.shape[1]) plt.ylabel('Mean absolute error')
def plot_feature_errors(all_errors, batch_size, all_components, data): plt.figure() <DeepExtract> plt.plot(all_components, all_errors['pca'], label='PCA', marker='o') </DeepExtract> <DeepExtract> plt.plot(all_components, all_errors['ipca'], label='IncrementalPCA, bsize=%i' % batch_size, marker='o') </DeepExtract> plt.legend(loc='lower left') plt.suptitle('Algorithm error vs. n_components\nLFW, size %i x %i' % data.shape) plt.xlabel('Number of components (out of max %i)' % data.shape[1]) plt.ylabel('Mean absolute error')
def get_shape(self, i): """Shape of the `i`'th bicluster. Parameters ---------- i : int The index of the cluster. Returns ------- n_rows : int Number of rows in the bicluster. n_cols : int Number of columns in the bicluster. """ rows = self.rows_[i] columns = self.columns_[i] indices = (np.nonzero(rows)[0], np.nonzero(columns)[0]) return tuple((len(i) for i in indices))
def get_shape(self, i): """Shape of the `i`'th bicluster. Parameters ---------- i : int The index of the cluster. Returns ------- n_rows : int Number of rows in the bicluster. n_cols : int Number of columns in the bicluster. """ <DeepExtract> rows = self.rows_[i] columns = self.columns_[i] indices = (np.nonzero(rows)[0], np.nonzero(columns)[0]) </DeepExtract> return tuple((len(i) for i in indices))
def _more_validate_params(self, for_partial_fit=False): """Validate input params.""" if self.early_stopping and for_partial_fit: raise ValueError('early_stopping should be False with partial_fit') if self.learning_rate in ('constant', 'invscaling', 'adaptive') and self.eta0 <= 0.0: raise ValueError('eta0 must be > 0') if self.learning_rate == 'optimal' and self.alpha == 0: raise ValueError("alpha must be > 0 since learning_rate is 'optimal'. alpha is used to compute the optimal learning rate.") self.penalty = str(self.penalty).lower() return PENALTY_TYPES[self.penalty] return LEARNING_RATE_TYPES[self.learning_rate] </DeepExtract>
def _more_validate_params(self, for_partial_fit=False): """Validate input params.""" if self.early_stopping and for_partial_fit: raise ValueError('early_stopping should be False with partial_fit') if self.learning_rate in ('constant', 'invscaling', 'adaptive') and self.eta0 <= 0.0: raise ValueError('eta0 must be > 0') if self.learning_rate == 'optimal' and self.alpha == 0: raise ValueError("alpha must be > 0 since learning_rate is 'optimal'. alpha is used to compute the optimal learning rate.") <DeepExtract> self.penalty = str(self.penalty).lower() return PENALTY_TYPES[self.penalty] </DeepExtract> <DeepExtract> return LEARNING_RATE_TYPES[self.learning_rate] </DeepExtract>
@validate_params({'y_true': ['array-like', 'sparse matrix'], 'y_pred': ['array-like', 'sparse matrix'], 'sample_weight': ['array-like', None], 'labels': ['array-like', None], 'samplewise': ['boolean']}) def multilabel_confusion_matrix(y_true, y_pred, *, sample_weight=None, labels=None, samplewise=False): """Compute a confusion matrix for each class or sample. .. versionadded:: 0.21 Compute class-wise (default) or sample-wise (samplewise=True) multilabel confusion matrix to evaluate the accuracy of a classification, and output confusion matrices for each class or sample. In multilabel confusion matrix :math:`MCM`, the count of true negatives is :math:`MCM_{:,0,0}`, false negatives is :math:`MCM_{:,1,0}`, true positives is :math:`MCM_{:,1,1}` and false positives is :math:`MCM_{:,0,1}`. Multiclass data will be treated as if binarized under a one-vs-rest transformation. Returned confusion matrices will be in the order of sorted unique labels in the union of (y_true, y_pred). Read more in the :ref:`User Guide <multilabel_confusion_matrix>`. Parameters ---------- y_true : {array-like, sparse matrix} of shape (n_samples, n_outputs) or (n_samples,) Ground truth (correct) target values. y_pred : {array-like, sparse matrix} of shape (n_samples, n_outputs) or (n_samples,) Estimated targets as returned by a classifier. sample_weight : array-like of shape (n_samples,), default=None Sample weights. labels : array-like of shape (n_classes,), default=None A list of classes or column indices to select some (or to force inclusion of classes absent from the data). samplewise : bool, default=False In the multilabel case, this calculates a confusion matrix per sample. Returns ------- multi_confusion : ndarray of shape (n_outputs, 2, 2) A 2x2 confusion matrix corresponding to each output in the input. When calculating class-wise multi_confusion (default), then n_outputs = n_labels; when calculating sample-wise multi_confusion (samplewise=True), n_outputs = n_samples. If ``labels`` is defined, the results will be returned in the order specified in ``labels``, otherwise the results will be returned in sorted order by default. See Also -------- confusion_matrix : Compute confusion matrix to evaluate the accuracy of a classifier. Notes ----- The `multilabel_confusion_matrix` calculates class-wise or sample-wise multilabel confusion matrices, and in multiclass tasks, labels are binarized under a one-vs-rest way; while :func:`~sklearn.metrics.confusion_matrix` calculates one confusion matrix for confusion between every two classes. Examples -------- Multilabel-indicator case: >>> import numpy as np >>> from sklearn.metrics import multilabel_confusion_matrix >>> y_true = np.array([[1, 0, 1], ... [0, 1, 0]]) >>> y_pred = np.array([[1, 0, 0], ... [0, 1, 1]]) >>> multilabel_confusion_matrix(y_true, y_pred) array([[[1, 0], [0, 1]], <BLANKLINE> [[1, 0], [0, 1]], <BLANKLINE> [[0, 1], [1, 0]]]) Multiclass case: >>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"] >>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"] >>> multilabel_confusion_matrix(y_true, y_pred, ... labels=["ant", "bird", "cat"]) array([[[3, 1], [0, 2]], <BLANKLINE> [[5, 0], [1, 0]], <BLANKLINE> [[2, 1], [1, 2]]]) """ check_consistent_length(y_true, y_pred) type_true = type_of_target(y_true, input_name='y_true') type_pred = type_of_target(y_pred, input_name='y_pred') y_type = {type_true, type_pred} if y_type == {'binary', 'multiclass'}: y_type = {'multiclass'} if len(y_type) > 1: raise ValueError("Classification metrics can't handle a mix of {0} and {1} targets".format(type_true, type_pred)) y_type = y_type.pop() if y_type not in ['binary', 'multiclass', 'multilabel-indicator']: raise ValueError('{0} is not supported'.format(y_type)) if y_type in ['binary', 'multiclass']: y_true = column_or_1d(y_true) y_pred = column_or_1d(y_pred) if y_type == 'binary': try: unique_values = np.union1d(y_true, y_pred) except TypeError as e: raise TypeError(f'Labels in y_true and y_pred should be of the same type. Got y_true={np.unique(y_true)} and y_pred={np.unique(y_pred)}. Make sure that the predictions provided by the classifier coincides with the true labels.') from e if len(unique_values) > 2: y_type = 'multiclass' if y_type.startswith('multilabel'): y_true = csr_matrix(y_true) y_pred = csr_matrix(y_pred) y_type = 'multilabel-indicator' (y_type, y_true, y_pred) = (y_type, y_true, y_pred) if sample_weight is not None: sample_weight = column_or_1d(sample_weight) check_consistent_length(y_true, y_pred, sample_weight) if y_type not in ('binary', 'multiclass', 'multilabel-indicator'): raise ValueError('%s is not supported' % y_type) present_labels = unique_labels(y_true, y_pred) if labels is None: labels = present_labels n_labels = None else: n_labels = len(labels) labels = np.hstack([labels, np.setdiff1d(present_labels, labels, assume_unique=True)]) if y_true.ndim == 1: if samplewise: raise ValueError('Samplewise metrics are not available outside of multilabel classification.') le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) y_pred = le.transform(y_pred) sorted_labels = le.classes_ tp = y_true == y_pred tp_bins = y_true[tp] if sample_weight is not None: tp_bins_weights = np.asarray(sample_weight)[tp] else: tp_bins_weights = None if len(tp_bins): tp_sum = np.bincount(tp_bins, weights=tp_bins_weights, minlength=len(labels)) else: true_sum = pred_sum = tp_sum = np.zeros(len(labels)) if len(y_pred): pred_sum = np.bincount(y_pred, weights=sample_weight, minlength=len(labels)) if len(y_true): true_sum = np.bincount(y_true, weights=sample_weight, minlength=len(labels)) indices = np.searchsorted(sorted_labels, labels[:n_labels]) tp_sum = tp_sum[indices] true_sum = true_sum[indices] pred_sum = pred_sum[indices] else: sum_axis = 1 if samplewise else 0 if not np.array_equal(labels, present_labels): if np.max(labels) > np.max(present_labels): raise ValueError('All labels must be in [0, n labels) for multilabel targets. Got %d > %d' % (np.max(labels), np.max(present_labels))) if np.min(labels) < 0: raise ValueError('All labels must be in [0, n labels) for multilabel targets. Got %d < 0' % np.min(labels)) if n_labels is not None: y_true = y_true[:, labels[:n_labels]] y_pred = y_pred[:, labels[:n_labels]] true_and_pred = y_true.multiply(y_pred) tp_sum = count_nonzero(true_and_pred, axis=sum_axis, sample_weight=sample_weight) pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight) true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight) fp = pred_sum - tp_sum fn = true_sum - tp_sum tp = tp_sum if sample_weight is not None and samplewise: sample_weight = np.array(sample_weight) tp = np.array(tp) fp = np.array(fp) fn = np.array(fn) tn = sample_weight * y_true.shape[1] - tp - fp - fn elif sample_weight is not None: tn = sum(sample_weight) - tp - fp - fn elif samplewise: tn = y_true.shape[1] - tp - fp - fn else: tn = y_true.shape[0] - tp - fp - fn return np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)
@validate_params({'y_true': ['array-like', 'sparse matrix'], 'y_pred': ['array-like', 'sparse matrix'], 'sample_weight': ['array-like', None], 'labels': ['array-like', None], 'samplewise': ['boolean']}) def multilabel_confusion_matrix(y_true, y_pred, *, sample_weight=None, labels=None, samplewise=False): """Compute a confusion matrix for each class or sample. .. versionadded:: 0.21 Compute class-wise (default) or sample-wise (samplewise=True) multilabel confusion matrix to evaluate the accuracy of a classification, and output confusion matrices for each class or sample. In multilabel confusion matrix :math:`MCM`, the count of true negatives is :math:`MCM_{:,0,0}`, false negatives is :math:`MCM_{:,1,0}`, true positives is :math:`MCM_{:,1,1}` and false positives is :math:`MCM_{:,0,1}`. Multiclass data will be treated as if binarized under a one-vs-rest transformation. Returned confusion matrices will be in the order of sorted unique labels in the union of (y_true, y_pred). Read more in the :ref:`User Guide <multilabel_confusion_matrix>`. Parameters ---------- y_true : {array-like, sparse matrix} of shape (n_samples, n_outputs) or (n_samples,) Ground truth (correct) target values. y_pred : {array-like, sparse matrix} of shape (n_samples, n_outputs) or (n_samples,) Estimated targets as returned by a classifier. sample_weight : array-like of shape (n_samples,), default=None Sample weights. labels : array-like of shape (n_classes,), default=None A list of classes or column indices to select some (or to force inclusion of classes absent from the data). samplewise : bool, default=False In the multilabel case, this calculates a confusion matrix per sample. Returns ------- multi_confusion : ndarray of shape (n_outputs, 2, 2) A 2x2 confusion matrix corresponding to each output in the input. When calculating class-wise multi_confusion (default), then n_outputs = n_labels; when calculating sample-wise multi_confusion (samplewise=True), n_outputs = n_samples. If ``labels`` is defined, the results will be returned in the order specified in ``labels``, otherwise the results will be returned in sorted order by default. See Also -------- confusion_matrix : Compute confusion matrix to evaluate the accuracy of a classifier. Notes ----- The `multilabel_confusion_matrix` calculates class-wise or sample-wise multilabel confusion matrices, and in multiclass tasks, labels are binarized under a one-vs-rest way; while :func:`~sklearn.metrics.confusion_matrix` calculates one confusion matrix for confusion between every two classes. Examples -------- Multilabel-indicator case: >>> import numpy as np >>> from sklearn.metrics import multilabel_confusion_matrix >>> y_true = np.array([[1, 0, 1], ... [0, 1, 0]]) >>> y_pred = np.array([[1, 0, 0], ... [0, 1, 1]]) >>> multilabel_confusion_matrix(y_true, y_pred) array([[[1, 0], [0, 1]], <BLANKLINE> [[1, 0], [0, 1]], <BLANKLINE> [[0, 1], [1, 0]]]) Multiclass case: >>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"] >>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"] >>> multilabel_confusion_matrix(y_true, y_pred, ... labels=["ant", "bird", "cat"]) array([[[3, 1], [0, 2]], <BLANKLINE> [[5, 0], [1, 0]], <BLANKLINE> [[2, 1], [1, 2]]]) """ <DeepExtract> check_consistent_length(y_true, y_pred) type_true = type_of_target(y_true, input_name='y_true') type_pred = type_of_target(y_pred, input_name='y_pred') y_type = {type_true, type_pred} if y_type == {'binary', 'multiclass'}: y_type = {'multiclass'} if len(y_type) > 1: raise ValueError("Classification metrics can't handle a mix of {0} and {1} targets".format(type_true, type_pred)) y_type = y_type.pop() if y_type not in ['binary', 'multiclass', 'multilabel-indicator']: raise ValueError('{0} is not supported'.format(y_type)) if y_type in ['binary', 'multiclass']: y_true = column_or_1d(y_true) y_pred = column_or_1d(y_pred) if y_type == 'binary': try: unique_values = np.union1d(y_true, y_pred) except TypeError as e: raise TypeError(f'Labels in y_true and y_pred should be of the same type. Got y_true={np.unique(y_true)} and y_pred={np.unique(y_pred)}. Make sure that the predictions provided by the classifier coincides with the true labels.') from e if len(unique_values) > 2: y_type = 'multiclass' if y_type.startswith('multilabel'): y_true = csr_matrix(y_true) y_pred = csr_matrix(y_pred) y_type = 'multilabel-indicator' (y_type, y_true, y_pred) = (y_type, y_true, y_pred) </DeepExtract> if sample_weight is not None: sample_weight = column_or_1d(sample_weight) check_consistent_length(y_true, y_pred, sample_weight) if y_type not in ('binary', 'multiclass', 'multilabel-indicator'): raise ValueError('%s is not supported' % y_type) present_labels = unique_labels(y_true, y_pred) if labels is None: labels = present_labels n_labels = None else: n_labels = len(labels) labels = np.hstack([labels, np.setdiff1d(present_labels, labels, assume_unique=True)]) if y_true.ndim == 1: if samplewise: raise ValueError('Samplewise metrics are not available outside of multilabel classification.') le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) y_pred = le.transform(y_pred) sorted_labels = le.classes_ tp = y_true == y_pred tp_bins = y_true[tp] if sample_weight is not None: tp_bins_weights = np.asarray(sample_weight)[tp] else: tp_bins_weights = None if len(tp_bins): tp_sum = np.bincount(tp_bins, weights=tp_bins_weights, minlength=len(labels)) else: true_sum = pred_sum = tp_sum = np.zeros(len(labels)) if len(y_pred): pred_sum = np.bincount(y_pred, weights=sample_weight, minlength=len(labels)) if len(y_true): true_sum = np.bincount(y_true, weights=sample_weight, minlength=len(labels)) indices = np.searchsorted(sorted_labels, labels[:n_labels]) tp_sum = tp_sum[indices] true_sum = true_sum[indices] pred_sum = pred_sum[indices] else: sum_axis = 1 if samplewise else 0 if not np.array_equal(labels, present_labels): if np.max(labels) > np.max(present_labels): raise ValueError('All labels must be in [0, n labels) for multilabel targets. Got %d > %d' % (np.max(labels), np.max(present_labels))) if np.min(labels) < 0: raise ValueError('All labels must be in [0, n labels) for multilabel targets. Got %d < 0' % np.min(labels)) if n_labels is not None: y_true = y_true[:, labels[:n_labels]] y_pred = y_pred[:, labels[:n_labels]] true_and_pred = y_true.multiply(y_pred) tp_sum = count_nonzero(true_and_pred, axis=sum_axis, sample_weight=sample_weight) pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight) true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight) fp = pred_sum - tp_sum fn = true_sum - tp_sum tp = tp_sum if sample_weight is not None and samplewise: sample_weight = np.array(sample_weight) tp = np.array(tp) fp = np.array(fp) fn = np.array(fn) tn = sample_weight * y_true.shape[1] - tp - fp - fn elif sample_weight is not None: tn = sum(sample_weight) - tp - fp - fn elif samplewise: tn = y_true.shape[1] - tp - fp - fn else: tn = y_true.shape[0] - tp - fp - fn return np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)
def pairwise_distances_chunked(X, Y=None, *, reduce_func=None, metric='euclidean', n_jobs=None, working_memory=None, **kwds): """Generate a distance matrix chunk by chunk with optional reduction. In cases where not all of a pairwise distance matrix needs to be stored at once, this is used to calculate pairwise distances in ``working_memory``-sized chunks. If ``reduce_func`` is given, it is run on each chunk and its return values are concatenated into lists, arrays or sparse matrices. Parameters ---------- X : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_features) Array of pairwise distances between samples, or a feature array. The shape the array should be (n_samples_X, n_samples_X) if metric='precomputed' and (n_samples_X, n_features) otherwise. Y : ndarray of shape (n_samples_Y, n_features), default=None An optional second feature array. Only allowed if metric != "precomputed". reduce_func : callable, default=None The function which is applied on each chunk of the distance matrix, reducing it to needed values. ``reduce_func(D_chunk, start)`` is called repeatedly, where ``D_chunk`` is a contiguous vertical slice of the pairwise distance matrix, starting at row ``start``. It should return one of: None; an array, a list, or a sparse matrix of length ``D_chunk.shape[0]``; or a tuple of such objects. Returning None is useful for in-place operations, rather than reductions. If None, pairwise_distances_chunked returns a generator of vertical chunks of the distance matrix. metric : str or callable, default='euclidean' The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by scipy.spatial.distance.pdist for its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is "precomputed", X is assumed to be a distance matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. n_jobs : int, default=None The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. working_memory : int, default=None The sought maximum memory for temporary distance matrix chunks. When None (default), the value of ``sklearn.get_config()['working_memory']`` is used. **kwds : optional keyword parameters Any further parameters are passed directly to the distance function. If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for usage examples. Yields ------ D_chunk : {ndarray, sparse matrix} A contiguous slice of distance matrix, optionally processed by ``reduce_func``. Examples -------- Without reduce_func: >>> import numpy as np >>> from sklearn.metrics import pairwise_distances_chunked >>> X = np.random.RandomState(0).rand(5, 3) >>> D_chunk = next(pairwise_distances_chunked(X)) >>> D_chunk array([[0. ..., 0.29..., 0.41..., 0.19..., 0.57...], [0.29..., 0. ..., 0.57..., 0.41..., 0.76...], [0.41..., 0.57..., 0. ..., 0.44..., 0.90...], [0.19..., 0.41..., 0.44..., 0. ..., 0.51...], [0.57..., 0.76..., 0.90..., 0.51..., 0. ...]]) Retrieve all neighbors and average distance within radius r: >>> r = .2 >>> def reduce_func(D_chunk, start): ... neigh = [np.flatnonzero(d < r) for d in D_chunk] ... avg_dist = (D_chunk * (D_chunk < r)).mean(axis=1) ... return neigh, avg_dist >>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func) >>> neigh, avg_dist = next(gen) >>> neigh [array([0, 3]), array([1]), array([2]), array([0, 3]), array([4])] >>> avg_dist array([0.039..., 0. , 0. , 0.039..., 0. ]) Where r is defined per sample, we need to make use of ``start``: >>> r = [.2, .4, .4, .3, .1] >>> def reduce_func(D_chunk, start): ... neigh = [np.flatnonzero(d < r[i]) ... for i, d in enumerate(D_chunk, start)] ... return neigh >>> neigh = next(pairwise_distances_chunked(X, reduce_func=reduce_func)) >>> neigh [array([0, 3]), array([0, 1]), array([2]), array([0, 3]), array([4])] Force row-by-row generation by reducing ``working_memory``: >>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func, ... working_memory=0) >>> next(gen) [array([0, 3])] >>> next(gen) [array([0, 1])] """ n_samples_X = _num_samples(X) if metric == 'precomputed': slices = (slice(0, n_samples_X),) else: if Y is None: Y = X chunk_n_rows = get_chunk_n_rows(row_bytes=8 * _num_samples(Y), max_n_rows=n_samples_X, working_memory=working_memory) slices = gen_batches(n_samples_X, chunk_n_rows) if metric == 'seuclidean' and 'V' not in kwds: dtype = np.float64 if sp_version < parse_version('1.5') else None if X is Y: V = np.var(X, axis=0, ddof=1, dtype=dtype) else: raise ValueError("The 'V' parameter is required for the seuclidean metric when Y is passed.") params = {'V': V} if metric == 'mahalanobis' and 'VI' not in kwds: if X is Y: VI = np.linalg.inv(np.cov(X.T)).T else: raise ValueError("The 'VI' parameter is required for the mahalanobis metric when Y is passed.") params = {'VI': VI} params = {} kwds.update(**params) for sl in slices: if sl.start == 0 and sl.stop == n_samples_X: X_chunk = X else: X_chunk = X[sl] if metric not in _VALID_METRICS and (not callable(metric)) and (metric != 'precomputed'): raise ValueError("Unknown metric %s. Valid metrics are %s, or 'precomputed', or a callable" % (metric, _VALID_METRICS)) if metric == 'precomputed': (X_chunk, _) = check_pairwise_arrays(X_chunk, Y, precomputed=True, force_all_finite=force_all_finite) whom = '`pairwise_distances`. Precomputed distance need to have non-negative values.' check_non_negative(X_chunk, whom=whom) D_chunk = X_chunk elif metric in PAIRWISE_DISTANCE_FUNCTIONS: func = PAIRWISE_DISTANCE_FUNCTIONS[metric] elif callable(metric): func = partial(_pairwise_callable, metric=metric, force_all_finite=force_all_finite, **kwds) else: if issparse(X_chunk) or issparse(Y): raise TypeError('scipy distance metrics do not support sparse matrices.') dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None if dtype == bool and (X_chunk.dtype != bool or (Y is not None and Y.dtype != bool)): msg = 'Data was converted to boolean for metric %s' % metric warnings.warn(msg, DataConversionWarning) (X_chunk, Y) = check_pairwise_arrays(X_chunk, Y, dtype=dtype, force_all_finite=force_all_finite) params = _precompute_metric_params(X_chunk, Y, metric=metric, **kwds) kwds.update(**params) if effective_n_jobs(n_jobs) == 1 and X_chunk is Y: D_chunk = distance.squareform(distance.pdist(X_chunk, metric=metric, **kwds)) func = partial(distance.cdist, metric=metric, **kwds) D_chunk = _parallel_pairwise(X_chunk, Y, func, n_jobs, **kwds) if (X is Y or Y is None) and PAIRWISE_DISTANCE_FUNCTIONS.get(metric, None) is euclidean_distances: D_chunk.flat[sl.start::_num_samples(X) + 1] = 0 if reduce_func is not None: chunk_size = D_chunk.shape[0] D_chunk = reduce_func(D_chunk, sl.start) if D_chunk is None: return is_tuple = isinstance(D_chunk, tuple) if not is_tuple: D_chunk = (D_chunk,) if any((isinstance(r, tuple) or not hasattr(r, '__iter__') for r in D_chunk)): raise TypeError('reduce_func returned %r. Expected sequence(s) of length %d.' % (D_chunk if is_tuple else D_chunk[0], chunk_size)) if any((_num_samples(r) != chunk_size for r in D_chunk)): actual_size = tuple((_num_samples(r) for r in D_chunk)) raise ValueError('reduce_func returned object of length %s. Expected same length as input: %d.' % (actual_size if is_tuple else actual_size[0], chunk_size)) yield D_chunk
def pairwise_distances_chunked(X, Y=None, *, reduce_func=None, metric='euclidean', n_jobs=None, working_memory=None, **kwds): """Generate a distance matrix chunk by chunk with optional reduction. In cases where not all of a pairwise distance matrix needs to be stored at once, this is used to calculate pairwise distances in ``working_memory``-sized chunks. If ``reduce_func`` is given, it is run on each chunk and its return values are concatenated into lists, arrays or sparse matrices. Parameters ---------- X : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_features) Array of pairwise distances between samples, or a feature array. The shape the array should be (n_samples_X, n_samples_X) if metric='precomputed' and (n_samples_X, n_features) otherwise. Y : ndarray of shape (n_samples_Y, n_features), default=None An optional second feature array. Only allowed if metric != "precomputed". reduce_func : callable, default=None The function which is applied on each chunk of the distance matrix, reducing it to needed values. ``reduce_func(D_chunk, start)`` is called repeatedly, where ``D_chunk`` is a contiguous vertical slice of the pairwise distance matrix, starting at row ``start``. It should return one of: None; an array, a list, or a sparse matrix of length ``D_chunk.shape[0]``; or a tuple of such objects. Returning None is useful for in-place operations, rather than reductions. If None, pairwise_distances_chunked returns a generator of vertical chunks of the distance matrix. metric : str or callable, default='euclidean' The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by scipy.spatial.distance.pdist for its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is "precomputed", X is assumed to be a distance matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. n_jobs : int, default=None The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. working_memory : int, default=None The sought maximum memory for temporary distance matrix chunks. When None (default), the value of ``sklearn.get_config()['working_memory']`` is used. **kwds : optional keyword parameters Any further parameters are passed directly to the distance function. If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for usage examples. Yields ------ D_chunk : {ndarray, sparse matrix} A contiguous slice of distance matrix, optionally processed by ``reduce_func``. Examples -------- Without reduce_func: >>> import numpy as np >>> from sklearn.metrics import pairwise_distances_chunked >>> X = np.random.RandomState(0).rand(5, 3) >>> D_chunk = next(pairwise_distances_chunked(X)) >>> D_chunk array([[0. ..., 0.29..., 0.41..., 0.19..., 0.57...], [0.29..., 0. ..., 0.57..., 0.41..., 0.76...], [0.41..., 0.57..., 0. ..., 0.44..., 0.90...], [0.19..., 0.41..., 0.44..., 0. ..., 0.51...], [0.57..., 0.76..., 0.90..., 0.51..., 0. ...]]) Retrieve all neighbors and average distance within radius r: >>> r = .2 >>> def reduce_func(D_chunk, start): ... neigh = [np.flatnonzero(d < r) for d in D_chunk] ... avg_dist = (D_chunk * (D_chunk < r)).mean(axis=1) ... return neigh, avg_dist >>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func) >>> neigh, avg_dist = next(gen) >>> neigh [array([0, 3]), array([1]), array([2]), array([0, 3]), array([4])] >>> avg_dist array([0.039..., 0. , 0. , 0.039..., 0. ]) Where r is defined per sample, we need to make use of ``start``: >>> r = [.2, .4, .4, .3, .1] >>> def reduce_func(D_chunk, start): ... neigh = [np.flatnonzero(d < r[i]) ... for i, d in enumerate(D_chunk, start)] ... return neigh >>> neigh = next(pairwise_distances_chunked(X, reduce_func=reduce_func)) >>> neigh [array([0, 3]), array([0, 1]), array([2]), array([0, 3]), array([4])] Force row-by-row generation by reducing ``working_memory``: >>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func, ... working_memory=0) >>> next(gen) [array([0, 3])] >>> next(gen) [array([0, 1])] """ n_samples_X = _num_samples(X) if metric == 'precomputed': slices = (slice(0, n_samples_X),) else: if Y is None: Y = X chunk_n_rows = get_chunk_n_rows(row_bytes=8 * _num_samples(Y), max_n_rows=n_samples_X, working_memory=working_memory) slices = gen_batches(n_samples_X, chunk_n_rows) <DeepExtract> if metric == 'seuclidean' and 'V' not in kwds: dtype = np.float64 if sp_version < parse_version('1.5') else None if X is Y: V = np.var(X, axis=0, ddof=1, dtype=dtype) else: raise ValueError("The 'V' parameter is required for the seuclidean metric when Y is passed.") params = {'V': V} if metric == 'mahalanobis' and 'VI' not in kwds: if X is Y: VI = np.linalg.inv(np.cov(X.T)).T else: raise ValueError("The 'VI' parameter is required for the mahalanobis metric when Y is passed.") params = {'VI': VI} params = {} </DeepExtract> kwds.update(**params) for sl in slices: if sl.start == 0 and sl.stop == n_samples_X: X_chunk = X else: X_chunk = X[sl] <DeepExtract> if metric not in _VALID_METRICS and (not callable(metric)) and (metric != 'precomputed'): raise ValueError("Unknown metric %s. Valid metrics are %s, or 'precomputed', or a callable" % (metric, _VALID_METRICS)) if metric == 'precomputed': (X_chunk, _) = check_pairwise_arrays(X_chunk, Y, precomputed=True, force_all_finite=force_all_finite) whom = '`pairwise_distances`. Precomputed distance need to have non-negative values.' check_non_negative(X_chunk, whom=whom) D_chunk = X_chunk elif metric in PAIRWISE_DISTANCE_FUNCTIONS: func = PAIRWISE_DISTANCE_FUNCTIONS[metric] elif callable(metric): func = partial(_pairwise_callable, metric=metric, force_all_finite=force_all_finite, **kwds) else: if issparse(X_chunk) or issparse(Y): raise TypeError('scipy distance metrics do not support sparse matrices.') dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None if dtype == bool and (X_chunk.dtype != bool or (Y is not None and Y.dtype != bool)): msg = 'Data was converted to boolean for metric %s' % metric warnings.warn(msg, DataConversionWarning) (X_chunk, Y) = check_pairwise_arrays(X_chunk, Y, dtype=dtype, force_all_finite=force_all_finite) params = _precompute_metric_params(X_chunk, Y, metric=metric, **kwds) kwds.update(**params) if effective_n_jobs(n_jobs) == 1 and X_chunk is Y: D_chunk = distance.squareform(distance.pdist(X_chunk, metric=metric, **kwds)) func = partial(distance.cdist, metric=metric, **kwds) D_chunk = _parallel_pairwise(X_chunk, Y, func, n_jobs, **kwds) </DeepExtract> if (X is Y or Y is None) and PAIRWISE_DISTANCE_FUNCTIONS.get(metric, None) is euclidean_distances: D_chunk.flat[sl.start::_num_samples(X) + 1] = 0 if reduce_func is not None: chunk_size = D_chunk.shape[0] D_chunk = reduce_func(D_chunk, sl.start) <DeepExtract> if D_chunk is None: return is_tuple = isinstance(D_chunk, tuple) if not is_tuple: D_chunk = (D_chunk,) if any((isinstance(r, tuple) or not hasattr(r, '__iter__') for r in D_chunk)): raise TypeError('reduce_func returned %r. Expected sequence(s) of length %d.' % (D_chunk if is_tuple else D_chunk[0], chunk_size)) if any((_num_samples(r) != chunk_size for r in D_chunk)): actual_size = tuple((_num_samples(r) for r in D_chunk)) raise ValueError('reduce_func returned object of length %s. Expected same length as input: %d.' % (actual_size if is_tuple else actual_size[0], chunk_size)) </DeepExtract> yield D_chunk
def decision_function(self, X): """Apply decision function to an array of samples. The decision function is equal (up to a constant factor) to the log-posterior of the model, i.e. `log p(y = k | x)`. In a binary classification setting this instead corresponds to the difference `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`. Parameters ---------- X : array-like of shape (n_samples, n_features) Array of samples (test vectors). Returns ------- C : ndarray of shape (n_samples,) or (n_samples, n_classes) Decision function values related to each class, per sample. In the two-class case, the shape is (n_samples,), giving the log likelihood ratio of the positive class. """ check_is_fitted(self) X = self._validate_data(X, reset=False) norm2 = [] for i in range(len(self.classes_)): R = self.rotations_[i] S = self.scalings_[i] Xm = X - self.means_[i] X2 = np.dot(Xm, R * S ** (-0.5)) norm2.append(np.sum(X2 ** 2, axis=1)) norm2 = np.array(norm2).T u = np.asarray([np.sum(np.log(s)) for s in self.scalings_]) dec_func = -0.5 * (norm2 + u) + np.log(self.priors_) if len(self.classes_) == 2: return dec_func[:, 1] - dec_func[:, 0] return dec_func
def decision_function(self, X): """Apply decision function to an array of samples. The decision function is equal (up to a constant factor) to the log-posterior of the model, i.e. `log p(y = k | x)`. In a binary classification setting this instead corresponds to the difference `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`. Parameters ---------- X : array-like of shape (n_samples, n_features) Array of samples (test vectors). Returns ------- C : ndarray of shape (n_samples,) or (n_samples, n_classes) Decision function values related to each class, per sample. In the two-class case, the shape is (n_samples,), giving the log likelihood ratio of the positive class. """ <DeepExtract> check_is_fitted(self) X = self._validate_data(X, reset=False) norm2 = [] for i in range(len(self.classes_)): R = self.rotations_[i] S = self.scalings_[i] Xm = X - self.means_[i] X2 = np.dot(Xm, R * S ** (-0.5)) norm2.append(np.sum(X2 ** 2, axis=1)) norm2 = np.array(norm2).T u = np.asarray([np.sum(np.log(s)) for s in self.scalings_]) dec_func = -0.5 * (norm2 + u) + np.log(self.priors_) </DeepExtract> if len(self.classes_) == 2: return dec_func[:, 1] - dec_func[:, 0] return dec_func
def benchmark(configuration): """Run the whole benchmark.""" if verbose: print('generating dataset...') (X, y, coef) = make_regression(n_samples=configuration['n_train'] + configuration['n_test'], n_features=configuration['n_features'], noise=noise, coef=True) random_seed = 13 (X_train, X_test, y_train, y_test) = train_test_split(X, y, train_size=configuration['n_train'], test_size=configuration['n_test'], random_state=random_seed) (X_train, y_train) = shuffle(X_train, y_train, random_state=random_seed) X_scaler = StandardScaler() X_train = X_scaler.fit_transform(X_train) X_test = X_scaler.transform(X_test) y_scaler = StandardScaler() y_train = y_scaler.fit_transform(y_train[:, None])[:, 0] y_test = y_scaler.transform(y_test[:, None])[:, 0] gc.collect() if verbose: print('ok') (X_train, y_train, X_test, y_test) = (X_train, y_train, X_test, y_test) stats = {} for estimator_conf in configuration['estimators']: print('Benchmarking', estimator_conf['instance']) estimator_conf['instance'].fit(X_train, y_train) gc.collect() atomic_runtimes = atomic_benchmark_estimator(estimator_conf['instance'], X_test, verbose) bulk_runtimes = bulk_benchmark_estimator(estimator_conf['instance'], X_test, n_bulk_repeats, verbose) (a, b) = (atomic_runtimes, bulk_runtimes) stats[estimator_conf['name']] = {'atomic': a, 'bulk': b} cls_names = [estimator_conf['name'] for estimator_conf in configuration['estimators']] runtimes = [1000000.0 * stats[clf_name]['atomic'] for clf_name in cls_names] (fig, ax1) = plt.subplots(figsize=(10, 6)) bp = plt.boxplot(runtimes) cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'], estimator_conf['complexity_computer'](estimator_conf['instance']), estimator_conf['complexity_label']) for estimator_conf in configuration['estimators']] plt.setp(ax1, xticklabels=cls_infos) plt.setp(bp['boxes'], color='black') plt.setp(bp['whiskers'], color='black') plt.setp(bp['fliers'], color='red', marker='+') ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax1.set_axisbelow(True) ax1.set_title('Prediction Time per Instance - %s, %d feats.' % ('atomic'.capitalize(), configuration['n_features'])) ax1.set_ylabel('Prediction Time (us)') plt.show() runtimes = [1000000.0 * stats[clf_name]['bulk'] for clf_name in cls_names] (fig, ax1) = plt.subplots(figsize=(10, 6)) bp = plt.boxplot(runtimes) cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'], estimator_conf['complexity_computer'](estimator_conf['instance']), estimator_conf['complexity_label']) for estimator_conf in configuration['estimators']] plt.setp(ax1, xticklabels=cls_infos) plt.setp(bp['boxes'], color='black') plt.setp(bp['whiskers'], color='black') plt.setp(bp['fliers'], color='red', marker='+') ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax1.set_axisbelow(True) ax1.set_title('Prediction Time per Instance - %s, %d feats.' % ('bulk (%d)' % configuration['n_test'].capitalize(), configuration['n_features'])) ax1.set_ylabel('Prediction Time (us)') plt.show() </DeepExtract>
def benchmark(configuration): """Run the whole benchmark.""" <DeepExtract> if verbose: print('generating dataset...') (X, y, coef) = make_regression(n_samples=configuration['n_train'] + configuration['n_test'], n_features=configuration['n_features'], noise=noise, coef=True) random_seed = 13 (X_train, X_test, y_train, y_test) = train_test_split(X, y, train_size=configuration['n_train'], test_size=configuration['n_test'], random_state=random_seed) (X_train, y_train) = shuffle(X_train, y_train, random_state=random_seed) X_scaler = StandardScaler() X_train = X_scaler.fit_transform(X_train) X_test = X_scaler.transform(X_test) y_scaler = StandardScaler() y_train = y_scaler.fit_transform(y_train[:, None])[:, 0] y_test = y_scaler.transform(y_test[:, None])[:, 0] gc.collect() if verbose: print('ok') (X_train, y_train, X_test, y_test) = (X_train, y_train, X_test, y_test) </DeepExtract> stats = {} for estimator_conf in configuration['estimators']: print('Benchmarking', estimator_conf['instance']) estimator_conf['instance'].fit(X_train, y_train) gc.collect() <DeepExtract> atomic_runtimes = atomic_benchmark_estimator(estimator_conf['instance'], X_test, verbose) bulk_runtimes = bulk_benchmark_estimator(estimator_conf['instance'], X_test, n_bulk_repeats, verbose) (a, b) = (atomic_runtimes, bulk_runtimes) </DeepExtract> stats[estimator_conf['name']] = {'atomic': a, 'bulk': b} cls_names = [estimator_conf['name'] for estimator_conf in configuration['estimators']] runtimes = [1000000.0 * stats[clf_name]['atomic'] for clf_name in cls_names] <DeepExtract> (fig, ax1) = plt.subplots(figsize=(10, 6)) bp = plt.boxplot(runtimes) cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'], estimator_conf['complexity_computer'](estimator_conf['instance']), estimator_conf['complexity_label']) for estimator_conf in configuration['estimators']] plt.setp(ax1, xticklabels=cls_infos) plt.setp(bp['boxes'], color='black') plt.setp(bp['whiskers'], color='black') plt.setp(bp['fliers'], color='red', marker='+') ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax1.set_axisbelow(True) ax1.set_title('Prediction Time per Instance - %s, %d feats.' % ('atomic'.capitalize(), configuration['n_features'])) ax1.set_ylabel('Prediction Time (us)') plt.show() </DeepExtract> runtimes = [1000000.0 * stats[clf_name]['bulk'] for clf_name in cls_names] <DeepExtract> (fig, ax1) = plt.subplots(figsize=(10, 6)) bp = plt.boxplot(runtimes) cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'], estimator_conf['complexity_computer'](estimator_conf['instance']), estimator_conf['complexity_label']) for estimator_conf in configuration['estimators']] plt.setp(ax1, xticklabels=cls_infos) plt.setp(bp['boxes'], color='black') plt.setp(bp['whiskers'], color='black') plt.setp(bp['fliers'], color='red', marker='+') ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax1.set_axisbelow(True) ax1.set_title('Prediction Time per Instance - %s, %d feats.' % ('bulk (%d)' % configuration['n_test'].capitalize(), configuration['n_features'])) ax1.set_ylabel('Prediction Time (us)') plt.show() </DeepExtract>
@idf_.setter def idf_(self, value): if not self.use_idf: raise ValueError('`idf_` cannot be set when `user_idf=False`.') if not hasattr(self, '_tfidf'): self._tfidf = TfidfTransformer(norm=self.norm, use_idf=self.use_idf, smooth_idf=self.smooth_idf, sublinear_tf=self.sublinear_tf) vocabulary = self.vocabulary if vocabulary is not None: if isinstance(vocabulary, set): vocabulary = sorted(vocabulary) if not isinstance(vocabulary, Mapping): vocab = {} for (i, t) in enumerate(vocabulary): if vocab.setdefault(t, i) != i: msg = 'Duplicate term in vocabulary: %r' % t raise ValueError(msg) vocabulary = vocab else: indices = set(vocabulary.values()) if len(indices) != len(vocabulary): raise ValueError('Vocabulary contains repeated indices.') for i in range(len(vocabulary)): if i not in indices: msg = "Vocabulary of size %d doesn't contain index %d." % (len(vocabulary), i) raise ValueError(msg) if not vocabulary: raise ValueError('empty vocabulary passed to fit') self.fixed_vocabulary_ = True self.vocabulary_ = dict(vocabulary) else: self.fixed_vocabulary_ = False if hasattr(self, 'vocabulary_'): if len(self.vocabulary_) != len(value): raise ValueError('idf length = %d must be equal to vocabulary size = %d' % (len(value), len(self.vocabulary))) self._tfidf.idf_ = value
@idf_.setter def idf_(self, value): if not self.use_idf: raise ValueError('`idf_` cannot be set when `user_idf=False`.') if not hasattr(self, '_tfidf'): self._tfidf = TfidfTransformer(norm=self.norm, use_idf=self.use_idf, smooth_idf=self.smooth_idf, sublinear_tf=self.sublinear_tf) <DeepExtract> vocabulary = self.vocabulary if vocabulary is not None: if isinstance(vocabulary, set): vocabulary = sorted(vocabulary) if not isinstance(vocabulary, Mapping): vocab = {} for (i, t) in enumerate(vocabulary): if vocab.setdefault(t, i) != i: msg = 'Duplicate term in vocabulary: %r' % t raise ValueError(msg) vocabulary = vocab else: indices = set(vocabulary.values()) if len(indices) != len(vocabulary): raise ValueError('Vocabulary contains repeated indices.') for i in range(len(vocabulary)): if i not in indices: msg = "Vocabulary of size %d doesn't contain index %d." % (len(vocabulary), i) raise ValueError(msg) if not vocabulary: raise ValueError('empty vocabulary passed to fit') self.fixed_vocabulary_ = True self.vocabulary_ = dict(vocabulary) else: self.fixed_vocabulary_ = False </DeepExtract> if hasattr(self, 'vocabulary_'): if len(self.vocabulary_) != len(value): raise ValueError('idf length = %d must be equal to vocabulary size = %d' % (len(value), len(self.vocabulary))) self._tfidf.idf_ = value
def setDebug(self, flag=True): """ Enable display of debugging messages while doing pattern matching. Set C{flag} to True to enable, False to disable. Example:: wd = Word(alphas).setName("alphaword") integer = Word(nums).setName("numword") term = wd | integer # turn on debugging for wd wd.setDebug() OneOrMore(term).parseString("abc 123 xyz 890") prints:: Match alphaword at loc 0(1,1) Matched alphaword -> ['abc'] Match alphaword at loc 3(1,4) Exception raised:Expected alphaword (at char 4), (line:1, col:5) Match alphaword at loc 7(1,8) Matched alphaword -> ['xyz'] Match alphaword at loc 11(1,12) Exception raised:Expected alphaword (at char 12), (line:1, col:13) Match alphaword at loc 15(1,16) Exception raised:Expected alphaword (at char 15), (line:1, col:16) The output shown is that produced by the default debug actions - custom debug actions can be specified using L{setDebugActions}. Prior to attempting to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"} is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"} message is shown. Also note the use of L{setName} to assign a human-readable name to the expression, which makes debugging and exception messages easier to understand - for instance, the default name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}. """ if flag: self.debugActions = (_defaultStartDebugAction or _defaultStartDebugAction, _defaultSuccessDebugAction or _defaultSuccessDebugAction, _defaultExceptionDebugAction or _defaultExceptionDebugAction) self.debug = True return self else: self.debug = False return self
def setDebug(self, flag=True): """ Enable display of debugging messages while doing pattern matching. Set C{flag} to True to enable, False to disable. Example:: wd = Word(alphas).setName("alphaword") integer = Word(nums).setName("numword") term = wd | integer # turn on debugging for wd wd.setDebug() OneOrMore(term).parseString("abc 123 xyz 890") prints:: Match alphaword at loc 0(1,1) Matched alphaword -> ['abc'] Match alphaword at loc 3(1,4) Exception raised:Expected alphaword (at char 4), (line:1, col:5) Match alphaword at loc 7(1,8) Matched alphaword -> ['xyz'] Match alphaword at loc 11(1,12) Exception raised:Expected alphaword (at char 12), (line:1, col:13) Match alphaword at loc 15(1,16) Exception raised:Expected alphaword (at char 15), (line:1, col:16) The output shown is that produced by the default debug actions - custom debug actions can be specified using L{setDebugActions}. Prior to attempting to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"} is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"} message is shown. Also note the use of L{setName} to assign a human-readable name to the expression, which makes debugging and exception messages easier to understand - for instance, the default name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}. """ if flag: <DeepExtract> self.debugActions = (_defaultStartDebugAction or _defaultStartDebugAction, _defaultSuccessDebugAction or _defaultSuccessDebugAction, _defaultExceptionDebugAction or _defaultExceptionDebugAction) self.debug = True return self </DeepExtract> else: self.debug = False return self
def variable_batch_size_comparison(data): batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10, data.shape[0], num=10)] for n_components in [i.astype(int) for i in np.linspace(data.shape[1] // 10, data.shape[1], num=4)]: all_times = defaultdict(list) all_errors = defaultdict(list) pca = PCA(n_components=n_components) rpca = PCA(n_components=n_components, svd_solver='randomized', random_state=1999) results_dict = {k: benchmark(est, data) for (k, est) in [('pca', pca), ('rpca', rpca)]} all_times['pca'].extend([results_dict['pca']['time']] * len(batch_sizes)) all_errors['pca'].extend([results_dict['pca']['error']] * len(batch_sizes)) all_times['rpca'].extend([results_dict['rpca']['time']] * len(batch_sizes)) all_errors['rpca'].extend([results_dict['rpca']['error']] * len(batch_sizes)) for batch_size in batch_sizes: ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size) results_dict = {k: benchmark(est, data) for (k, est) in [('ipca', ipca)]} all_times['ipca'].append(results_dict['ipca']['time']) all_errors['ipca'].append(results_dict['ipca']['error']) plt.figure() plot_results(batch_sizes, all_times['pca'], label='PCA') plot_results(batch_sizes, all_times['ipca'], label='IncrementalPCA') plt.legend(loc='lower left') plt.suptitle('Algorithm runtime vs. batch_size for n_components %i\n LFW, size %i x %i' % (n_components, data.shape[0], data.shape[1])) plt.xlabel('Batch size') plt.ylabel('Time (seconds)') plt.figure() plot_results(batch_sizes, all_errors['pca'], label='PCA') plot_results(batch_sizes, all_errors['ipca'], label='IncrementalPCA') plt.legend(loc='lower left') plt.suptitle('Algorithm error vs. batch_size for n_components %i\n LFW, size %i x %i' % (n_components, data.shape[0], data.shape[1])) plt.xlabel('Batch size') plt.ylabel('Mean absolute error') </DeepExtract>
def variable_batch_size_comparison(data): batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10, data.shape[0], num=10)] for n_components in [i.astype(int) for i in np.linspace(data.shape[1] // 10, data.shape[1], num=4)]: all_times = defaultdict(list) all_errors = defaultdict(list) pca = PCA(n_components=n_components) rpca = PCA(n_components=n_components, svd_solver='randomized', random_state=1999) results_dict = {k: benchmark(est, data) for (k, est) in [('pca', pca), ('rpca', rpca)]} all_times['pca'].extend([results_dict['pca']['time']] * len(batch_sizes)) all_errors['pca'].extend([results_dict['pca']['error']] * len(batch_sizes)) all_times['rpca'].extend([results_dict['rpca']['time']] * len(batch_sizes)) all_errors['rpca'].extend([results_dict['rpca']['error']] * len(batch_sizes)) for batch_size in batch_sizes: ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size) results_dict = {k: benchmark(est, data) for (k, est) in [('ipca', ipca)]} all_times['ipca'].append(results_dict['ipca']['time']) all_errors['ipca'].append(results_dict['ipca']['error']) <DeepExtract> plt.figure() plot_results(batch_sizes, all_times['pca'], label='PCA') plot_results(batch_sizes, all_times['ipca'], label='IncrementalPCA') plt.legend(loc='lower left') plt.suptitle('Algorithm runtime vs. batch_size for n_components %i\n LFW, size %i x %i' % (n_components, data.shape[0], data.shape[1])) plt.xlabel('Batch size') plt.ylabel('Time (seconds)') </DeepExtract> <DeepExtract> plt.figure() plot_results(batch_sizes, all_errors['pca'], label='PCA') plot_results(batch_sizes, all_errors['ipca'], label='IncrementalPCA') plt.legend(loc='lower left') plt.suptitle('Algorithm error vs. batch_size for n_components %i\n LFW, size %i x %i' % (n_components, data.shape[0], data.shape[1])) plt.xlabel('Batch size') plt.ylabel('Mean absolute error') </DeepExtract>
def predict_log_proba(self, X): """ Return log probability estimates for the test vectors X. Parameters ---------- X : {array-like, object with finite length or shape} Training data. Returns ------- P : ndarray of shape (n_samples, n_classes) or list of such arrays Returns the log probability of the sample for each class in the model, where classes are ordered arithmetically for each output. """ check_is_fitted(self) n_samples = _num_samples(X) rs = check_random_state(self.random_state) n_classes_ = self.n_classes_ classes_ = self.classes_ class_prior_ = self.class_prior_ constant = self.constant if self.n_outputs_ == 1: n_classes_ = [n_classes_] classes_ = [classes_] class_prior_ = [class_prior_] constant = [constant] P = [] for k in range(self.n_outputs_): if self._strategy == 'most_frequent': ind = class_prior_[k].argmax() out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) out[:, ind] = 1.0 elif self._strategy == 'prior': out = np.ones((n_samples, 1)) * class_prior_[k] elif self._strategy == 'stratified': out = rs.multinomial(1, class_prior_[k], size=n_samples) out = out.astype(np.float64) elif self._strategy == 'uniform': out = np.ones((n_samples, n_classes_[k]), dtype=np.float64) out /= n_classes_[k] elif self._strategy == 'constant': ind = np.where(classes_[k] == constant[k]) out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) out[:, ind] = 1.0 P.append(out) if self.n_outputs_ == 1: P = P[0] proba = P if self.n_outputs_ == 1: return np.log(proba) else: return [np.log(p) for p in proba]
def predict_log_proba(self, X): """ Return log probability estimates for the test vectors X. Parameters ---------- X : {array-like, object with finite length or shape} Training data. Returns ------- P : ndarray of shape (n_samples, n_classes) or list of such arrays Returns the log probability of the sample for each class in the model, where classes are ordered arithmetically for each output. """ <DeepExtract> check_is_fitted(self) n_samples = _num_samples(X) rs = check_random_state(self.random_state) n_classes_ = self.n_classes_ classes_ = self.classes_ class_prior_ = self.class_prior_ constant = self.constant if self.n_outputs_ == 1: n_classes_ = [n_classes_] classes_ = [classes_] class_prior_ = [class_prior_] constant = [constant] P = [] for k in range(self.n_outputs_): if self._strategy == 'most_frequent': ind = class_prior_[k].argmax() out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) out[:, ind] = 1.0 elif self._strategy == 'prior': out = np.ones((n_samples, 1)) * class_prior_[k] elif self._strategy == 'stratified': out = rs.multinomial(1, class_prior_[k], size=n_samples) out = out.astype(np.float64) elif self._strategy == 'uniform': out = np.ones((n_samples, n_classes_[k]), dtype=np.float64) out /= n_classes_[k] elif self._strategy == 'constant': ind = np.where(classes_[k] == constant[k]) out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) out[:, ind] = 1.0 P.append(out) if self.n_outputs_ == 1: P = P[0] proba = P </DeepExtract> if self.n_outputs_ == 1: return np.log(proba) else: return [np.log(p) for p in proba]
def _get_support_mask(self): check_is_fitted(self) if self.percentile == 100: return np.ones(len(self.scores_), dtype=bool) elif self.percentile == 0: return np.zeros(len(self.scores_), dtype=bool) self.scores_ = as_float_array(self.scores_, copy=True) self.scores_[np.isnan(self.scores_)] = np.finfo(self.scores_.dtype).min self.scores_ = self.scores_ threshold = np.percentile(scores, 100 - self.percentile) mask = scores > threshold ties = np.where(scores == threshold)[0] if len(ties): max_feats = int(len(scores) * self.percentile / 100) kept_ties = ties[:max_feats - mask.sum()] mask[kept_ties] = True return mask
def _get_support_mask(self): check_is_fitted(self) if self.percentile == 100: return np.ones(len(self.scores_), dtype=bool) elif self.percentile == 0: return np.zeros(len(self.scores_), dtype=bool) <DeepExtract> self.scores_ = as_float_array(self.scores_, copy=True) self.scores_[np.isnan(self.scores_)] = np.finfo(self.scores_.dtype).min self.scores_ = self.scores_ </DeepExtract> threshold = np.percentile(scores, 100 - self.percentile) mask = scores > threshold ties = np.where(scores == threshold)[0] if len(ties): max_feats = int(len(scores) * self.percentile / 100) kept_ties = ties[:max_feats - mask.sum()] mask[kept_ties] = True return mask
def _limit_features(self, X, vocabulary, high=None, low=None, limit=None): """Remove too rare or too common features. Prune features that are non zero in more samples than high or less documents than low, modifying the vocabulary, and restricting it to at most the limit most frequent. This does not prune samples with zero features. """ if high is None and low is None and (limit is None): return (X, set()) if sp.isspmatrix_csr(X): dfs = np.bincount(X.indices, minlength=X.shape[1]) else: dfs = np.diff(X.indptr) mask = np.ones(len(dfs), dtype=bool) if high is not None: mask &= dfs <= high if low is not None: mask &= dfs >= low if limit is not None and mask.sum() > limit: tfs = np.asarray(X.sum(axis=0)).ravel() mask_inds = (-tfs[mask]).argsort()[:limit] new_mask = np.zeros(len(dfs), dtype=bool) new_mask[np.where(mask)[0][mask_inds]] = True mask = new_mask new_indices = np.cumsum(mask) - 1 removed_terms = set() for (term, old_index) in list(vocabulary.items()): if mask[old_index]: vocabulary[term] = new_indices[old_index] else: del vocabulary[term] removed_terms.add(term) kept_indices = np.where(mask)[0] if len(kept_indices) == 0: raise ValueError('After pruning, no terms remain. Try a lower min_df or a higher max_df.') return (X[:, kept_indices], removed_terms)
def _limit_features(self, X, vocabulary, high=None, low=None, limit=None): """Remove too rare or too common features. Prune features that are non zero in more samples than high or less documents than low, modifying the vocabulary, and restricting it to at most the limit most frequent. This does not prune samples with zero features. """ if high is None and low is None and (limit is None): return (X, set()) <DeepExtract> if sp.isspmatrix_csr(X): dfs = np.bincount(X.indices, minlength=X.shape[1]) else: dfs = np.diff(X.indptr) </DeepExtract> mask = np.ones(len(dfs), dtype=bool) if high is not None: mask &= dfs <= high if low is not None: mask &= dfs >= low if limit is not None and mask.sum() > limit: tfs = np.asarray(X.sum(axis=0)).ravel() mask_inds = (-tfs[mask]).argsort()[:limit] new_mask = np.zeros(len(dfs), dtype=bool) new_mask[np.where(mask)[0][mask_inds]] = True mask = new_mask new_indices = np.cumsum(mask) - 1 removed_terms = set() for (term, old_index) in list(vocabulary.items()): if mask[old_index]: vocabulary[term] = new_indices[old_index] else: del vocabulary[term] removed_terms.add(term) kept_indices = np.where(mask)[0] if len(kept_indices) == 0: raise ValueError('After pruning, no terms remain. Try a lower min_df or a higher max_df.') return (X[:, kept_indices], removed_terms)
@pytest.mark.parametrize('NaiveBayes', ALL_NAIVE_BAYES_CLASSES) def test_NB_partial_fit_no_first_classes(NaiveBayes, global_random_seed): rng = np.random.RandomState(global_random_seed) X2 = rng.randint(5, size=(6, 100)) y2 = np.array([1, 1, 2, 2, 3, 3]) (X2, y2) = (X2, y2) with pytest.raises(ValueError, match='classes must be passed on the first call to partial_fit.'): NaiveBayes().partial_fit(X2, y2) clf = NaiveBayes() clf.partial_fit(X2, y2, classes=np.unique(y2)) with pytest.raises(ValueError, match='is not the same as on last call to partial_fit'): clf.partial_fit(X2, y2, classes=np.arange(42))
@pytest.mark.parametrize('NaiveBayes', ALL_NAIVE_BAYES_CLASSES) def test_NB_partial_fit_no_first_classes(NaiveBayes, global_random_seed): <DeepExtract> rng = np.random.RandomState(global_random_seed) X2 = rng.randint(5, size=(6, 100)) y2 = np.array([1, 1, 2, 2, 3, 3]) (X2, y2) = (X2, y2) </DeepExtract> with pytest.raises(ValueError, match='classes must be passed on the first call to partial_fit.'): NaiveBayes().partial_fit(X2, y2) clf = NaiveBayes() clf.partial_fit(X2, y2, classes=np.unique(y2)) with pytest.raises(ValueError, match='is not the same as on last call to partial_fit'): clf.partial_fit(X2, y2, classes=np.arange(42))
def _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma, A=None, B=None, rho=None): """update H in Multiplicative Update NMF.""" if beta_loss == 2: numerator = safe_sparse_dot(W.T, X) denominator = np.linalg.multi_dot([W.T, W, H]) else: if sp.issparse(X): (ii, jj) = X.nonzero() n_vals = ii.shape[0] dot_vals = np.empty(n_vals) n_components = W.shape[1] batch_size = max(n_components, n_vals // n_components) for start in range(0, n_vals, batch_size): batch = slice(start, start + batch_size) dot_vals[batch] = np.multiply(W[ii[batch], :], H.T[jj[batch], :]).sum(axis=1) WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape) WH_safe_X = WH.tocsr() else: WH_safe_X = np.dot(W, H) if sp.issparse(X): WH_safe_X_data = WH_safe_X.data X_data = X.data else: WH_safe_X_data = WH_safe_X X_data = X WH = WH_safe_X.copy() if beta_loss - 1.0 < 0: WH[WH < EPSILON] = EPSILON if beta_loss - 2.0 < 0: WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON if beta_loss == 1: np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data) elif beta_loss == 0: WH_safe_X_data **= -1 WH_safe_X_data **= 2 WH_safe_X_data *= X_data else: WH_safe_X_data **= beta_loss - 2 WH_safe_X_data *= X_data numerator = safe_sparse_dot(W.T, WH_safe_X) if beta_loss == 1: W_sum = np.sum(W, axis=0) W_sum[W_sum == 0] = 1.0 denominator = W_sum[:, np.newaxis] else: if sp.issparse(X): WtWH = np.empty(H.shape) for i in range(X.shape[1]): WHi = np.dot(W, H[:, i]) if beta_loss - 1 < 0: WHi[WHi < EPSILON] = EPSILON WHi **= beta_loss - 1 WtWH[:, i] = np.dot(W.T, WHi) else: WH **= beta_loss - 1 WtWH = np.dot(W.T, WH) denominator = WtWH if l1_reg_H > 0: denominator += l1_reg_H if l2_reg_H > 0: denominator = denominator + l2_reg_H * H denominator[denominator == 0] = EPSILON if A is not None and B is not None: if gamma != 1: H **= 1 / gamma numerator *= H A *= rho B *= rho A += numerator B += denominator H = A / B if gamma != 1: H **= gamma else: delta_H = numerator delta_H /= denominator if gamma != 1: delta_H **= gamma H *= delta_H return H
def _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma, A=None, B=None, rho=None): """update H in Multiplicative Update NMF.""" if beta_loss == 2: numerator = safe_sparse_dot(W.T, X) denominator = np.linalg.multi_dot([W.T, W, H]) else: <DeepExtract> if sp.issparse(X): (ii, jj) = X.nonzero() n_vals = ii.shape[0] dot_vals = np.empty(n_vals) n_components = W.shape[1] batch_size = max(n_components, n_vals // n_components) for start in range(0, n_vals, batch_size): batch = slice(start, start + batch_size) dot_vals[batch] = np.multiply(W[ii[batch], :], H.T[jj[batch], :]).sum(axis=1) WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape) WH_safe_X = WH.tocsr() else: WH_safe_X = np.dot(W, H) </DeepExtract> if sp.issparse(X): WH_safe_X_data = WH_safe_X.data X_data = X.data else: WH_safe_X_data = WH_safe_X X_data = X WH = WH_safe_X.copy() if beta_loss - 1.0 < 0: WH[WH < EPSILON] = EPSILON if beta_loss - 2.0 < 0: WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON if beta_loss == 1: np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data) elif beta_loss == 0: WH_safe_X_data **= -1 WH_safe_X_data **= 2 WH_safe_X_data *= X_data else: WH_safe_X_data **= beta_loss - 2 WH_safe_X_data *= X_data numerator = safe_sparse_dot(W.T, WH_safe_X) if beta_loss == 1: W_sum = np.sum(W, axis=0) W_sum[W_sum == 0] = 1.0 denominator = W_sum[:, np.newaxis] else: if sp.issparse(X): WtWH = np.empty(H.shape) for i in range(X.shape[1]): WHi = np.dot(W, H[:, i]) if beta_loss - 1 < 0: WHi[WHi < EPSILON] = EPSILON WHi **= beta_loss - 1 WtWH[:, i] = np.dot(W.T, WHi) else: WH **= beta_loss - 1 WtWH = np.dot(W.T, WH) denominator = WtWH if l1_reg_H > 0: denominator += l1_reg_H if l2_reg_H > 0: denominator = denominator + l2_reg_H * H denominator[denominator == 0] = EPSILON if A is not None and B is not None: if gamma != 1: H **= 1 / gamma numerator *= H A *= rho B *= rho A += numerator B += denominator H = A / B if gamma != 1: H **= gamma else: delta_H = numerator delta_H /= denominator if gamma != 1: delta_H **= gamma H *= delta_H return H
def plot(self, *, ax=None, n_cols=3, line_kw=None, ice_lines_kw=None, pd_line_kw=None, contour_kw=None, bar_kw=None, heatmap_kw=None, pdp_lim=None, centered=False): """Plot partial dependence plots. Parameters ---------- ax : Matplotlib axes or array-like of Matplotlib axes, default=None - If a single axis is passed in, it is treated as a bounding axes and a grid of partial dependence plots will be drawn within these bounds. The `n_cols` parameter controls the number of columns in the grid. - If an array-like of axes are passed in, the partial dependence plots will be drawn directly into these axes. - If `None`, a figure and a bounding axes is created and treated as the single axes case. n_cols : int, default=3 The maximum number of columns in the grid plot. Only active when `ax` is a single axes or `None`. line_kw : dict, default=None Dict with keywords passed to the `matplotlib.pyplot.plot` call. For one-way partial dependence plots. ice_lines_kw : dict, default=None Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. For ICE lines in the one-way partial dependence plots. The key value pairs defined in `ice_lines_kw` takes priority over `line_kw`. .. versionadded:: 1.0 pd_line_kw : dict, default=None Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. For partial dependence in one-way partial dependence plots. The key value pairs defined in `pd_line_kw` takes priority over `line_kw`. .. versionadded:: 1.0 contour_kw : dict, default=None Dict with keywords passed to the `matplotlib.pyplot.contourf` call for two-way partial dependence plots. bar_kw : dict, default=None Dict with keywords passed to the `matplotlib.pyplot.bar` call for one-way categorical partial dependence plots. .. versionadded:: 1.2 heatmap_kw : dict, default=None Dict with keywords passed to the `matplotlib.pyplot.imshow` call for two-way categorical partial dependence plots. .. versionadded:: 1.2 pdp_lim : dict, default=None Global min and max average predictions, such that all plots will have the same scale and y limits. `pdp_lim[1]` is the global min and max for single partial dependence curves. `pdp_lim[2]` is the global min and max for two-way partial dependence curves. If `None` (default), the limit will be inferred from the global minimum and maximum of all predictions. .. versionadded:: 1.1 centered : bool, default=False If `True`, the ICE and PD lines will start at the origin of the y-axis. By default, no centering is done. .. versionadded:: 1.1 Returns ------- display : :class:`~sklearn.inspection.PartialDependenceDisplay` Returns a :class:`~sklearn.inspection.PartialDependenceDisplay` object that contains the partial dependence plots. """ check_matplotlib_support('plot_partial_dependence') import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpecFromSubplotSpec if isinstance(self.kind, str): kind = [self.kind] * len(self.features) else: kind = self.kind if self.is_categorical is None: is_categorical = [(False,) if len(fx) == 1 else (False, False) for fx in self.features] else: is_categorical = self.is_categorical if len(kind) != len(self.features): raise ValueError(f'When `kind` is provided as a list of strings, it should contain as many elements as `features`. `kind` contains {len(kind)} element(s) and `features` contains {len(self.features)} element(s).') valid_kinds = {'average', 'individual', 'both'} if any([k not in valid_kinds for k in kind]): raise ValueError(f'Values provided to `kind` must be one of: {valid_kinds!r} or a list of such values. Currently, kind={self.kind!r}') if self.pdp_lim != 'deprecated': warnings.warn('The `pdp_lim` parameter is deprecated in version 1.1 and will be removed in version 1.3. Provide `pdp_lim` to the `plot` method.instead.', FutureWarning) if pdp_lim is not None and self.pdp_lim != pdp_lim: warnings.warn('`pdp_lim` has been passed in both the constructor and the `plot` method. For backward compatibility, the parameter from the constructor will be used.', UserWarning) pdp_lim = self.pdp_lim if not centered: pd_results_ = self.pd_results else: pd_results_ = [] for (kind_plot, pd_result) in zip(kind, self.pd_results): current_results = {'grid_values': pd_result['grid_values']} if kind_plot in ('individual', 'both'): preds = pd_result.individual preds = preds - preds[self.target_idx, :, 0, None] current_results['individual'] = preds if kind_plot in ('average', 'both'): avg_preds = pd_result.average avg_preds = avg_preds - avg_preds[self.target_idx, 0, None] current_results['average'] = avg_preds pd_results_.append(Bunch(**current_results)) if pdp_lim is None: pdp_lim = {} for (kind_plot, pdp) in zip(kind, pd_results_): values = pdp['grid_values'] preds = pdp.average if kind_plot == 'average' else pdp.individual min_pd = preds[self.target_idx].min() max_pd = preds[self.target_idx].max() span = max_pd - min_pd min_pd -= 0.05 * span max_pd += 0.05 * span n_fx = len(values) (old_min_pd, old_max_pd) = pdp_lim.get(n_fx, (min_pd, max_pd)) min_pd = min(min_pd, old_min_pd) max_pd = max(max_pd, old_max_pd) pdp_lim[n_fx] = (min_pd, max_pd) if line_kw is None: line_kw = {} if ice_lines_kw is None: ice_lines_kw = {} if pd_line_kw is None: pd_line_kw = {} if bar_kw is None: bar_kw = {} if heatmap_kw is None: heatmap_kw = {} if ax is None: (_, ax) = plt.subplots() if contour_kw is None: contour_kw = {} default_contour_kws = {'alpha': 0.75} contour_kw = {**default_contour_kws, **contour_kw} n_features = len(self.features) is_average_plot = [kind_plot == 'average' for kind_plot in kind] if all(is_average_plot): n_ice_lines = 0 n_lines = 1 else: ice_plot_idx = is_average_plot.index(False) if isinstance(self.subsample, numbers.Integral): if self.subsample < len(pd_results_[ice_plot_idx].individual[0]): n_ice_lines = self.subsample n_ice_lines = len(pd_results_[ice_plot_idx].individual[0]) elif isinstance(self.subsample, numbers.Real): n_ice_lines = ceil(len(pd_results_[ice_plot_idx].individual[0]) * self.subsample) n_ice_lines = len(pd_results_[ice_plot_idx].individual[0]) if any([kind_plot == 'both' for kind_plot in kind]): n_lines = n_ice_lines + 1 else: n_lines = n_ice_lines if isinstance(ax, plt.Axes): if not ax.axison: raise ValueError('The ax was already used in another plot function, please set ax=display.axes_ instead') ax.set_axis_off() self.bounding_ax_ = ax self.figure_ = ax.figure n_cols = min(n_cols, n_features) n_rows = int(np.ceil(n_features / float(n_cols))) self.axes_ = np.empty((n_rows, n_cols), dtype=object) if all(is_average_plot): self.lines_ = np.empty((n_rows, n_cols), dtype=object) else: self.lines_ = np.empty((n_rows, n_cols, n_lines), dtype=object) self.contours_ = np.empty((n_rows, n_cols), dtype=object) self.bars_ = np.empty((n_rows, n_cols), dtype=object) self.heatmaps_ = np.empty((n_rows, n_cols), dtype=object) axes_ravel = self.axes_.ravel() gs = GridSpecFromSubplotSpec(n_rows, n_cols, subplot_spec=ax.get_subplotspec()) for (i, spec) in zip(range(n_features), gs): axes_ravel[i] = self.figure_.add_subplot(spec) else: ax = np.asarray(ax, dtype=object) if ax.size != n_features: raise ValueError('Expected ax to have {} axes, got {}'.format(n_features, ax.size)) if ax.ndim == 2: n_cols = ax.shape[1] else: n_cols = None self.bounding_ax_ = None self.figure_ = ax.ravel()[0].figure self.axes_ = ax if all(is_average_plot): self.lines_ = np.empty_like(ax, dtype=object) else: self.lines_ = np.empty(ax.shape + (n_lines,), dtype=object) self.contours_ = np.empty_like(ax, dtype=object) self.bars_ = np.empty_like(ax, dtype=object) self.heatmaps_ = np.empty_like(ax, dtype=object) if 2 in pdp_lim: Z_level = np.linspace(*pdp_lim[2], num=8) self.deciles_vlines_ = np.empty_like(self.axes_, dtype=object) self.deciles_hlines_ = np.empty_like(self.axes_, dtype=object) for (pd_plot_idx, (axi, feature_idx, cat, pd_result, kind_plot)) in enumerate(zip(self.axes_.ravel(), self.features, is_categorical, pd_results_, kind)): avg_preds = None preds = None feature_values = pd_result['grid_values'] if kind_plot == 'individual': preds = pd_result.individual elif kind_plot == 'average': avg_preds = pd_result.average else: avg_preds = pd_result.average preds = pd_result.individual if len(feature_values) == 1: default_line_kws = {'color': 'C0', 'label': 'average' if kind_plot == 'both' else None} if kind_plot == 'individual': default_ice_lines_kws = {'alpha': 0.3, 'linewidth': 0.5} default_pd_lines_kws = {} elif kind_plot == 'both': default_ice_lines_kws = {'alpha': 0.3, 'linewidth': 0.5, 'color': 'tab:blue'} default_pd_lines_kws = {'color': 'tab:orange', 'linestyle': '--'} else: default_ice_lines_kws = {} default_pd_lines_kws = {} ice_lines_kw = {**default_line_kws, **default_ice_lines_kws, **line_kw, **ice_lines_kw} del ice_lines_kw['label'] pd_line_kw = {**default_line_kws, **default_pd_lines_kws, **line_kw, **pd_line_kw} default_bar_kws = {'color': 'C0'} bar_kw = {**default_bar_kws, **bar_kw} default_heatmap_kw = {} heatmap_kw = {**default_heatmap_kw, **heatmap_kw} from matplotlib import transforms if kind_plot in ('individual', 'both'): self._plot_ice_lines(preds[self.target_idx], feature_values[0], n_ice_lines, axi, pd_plot_idx, n_lines, ice_lines_kw) if kind_plot in ('average', 'both'): if kind_plot == 'average': pd_line_idx = pd_plot_idx else: pd_line_idx = pd_plot_idx * n_lines + n_ice_lines self._plot_average_dependence(avg_preds[self.target_idx].ravel(), feature_values[0], axi, pd_line_idx, pd_line_kw, cat[0], bar_kw) trans = transforms.blended_transform_factory(axi.transData, axi.transAxes) vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape) if self.deciles.get(feature_idx[0], None) is not None: self.deciles_vlines_[vlines_idx] = axi.vlines(self.deciles[feature_idx[0]], 0, 0.05, transform=trans, color='k') min_val = min((val[0] for val in pdp_lim.values())) max_val = max((val[1] for val in pdp_lim.values())) axi.set_ylim([min_val, max_val]) if not axi.get_xlabel(): axi.set_xlabel(self.feature_names[feature_idx[0]]) if n_cols is None or pd_plot_idx % n_cols == 0: if not axi.get_ylabel(): axi.set_ylabel('Partial dependence') else: axi.set_yticklabels([]) if pd_line_kw.get('label', None) and kind_plot != 'individual' and (not cat[0]): axi.legend() else: if cat[0] and cat[1]: import matplotlib.pyplot as plt default_im_kw = dict(interpolation='nearest', cmap='viridis') im_kw = {**default_im_kw, **heatmap_kw} data = avg_preds[self.target_idx] im = axi.imshow(data, **im_kw) text = None (cmap_min, cmap_max) = (im.cmap(0), im.cmap(1.0)) text = np.empty_like(data, dtype=object) thresh = (data.max() + data.min()) / 2.0 for flat_index in range(data.size): (row, col) = np.unravel_index(flat_index, data.shape) color = cmap_max if data[row, col] < thresh else cmap_min values_format = '.2f' text_data = format(data[row, col], values_format) text_kwargs = dict(ha='center', va='center', color=color) text[row, col] = axi.text(col, row, text_data, **text_kwargs) fig = axi.figure fig.colorbar(im, ax=axi) axi.set(xticks=np.arange(len(feature_values[1])), yticks=np.arange(len(feature_values[0])), xticklabels=feature_values[1], yticklabels=feature_values[0], xlabel=self.feature_names[feature_idx[1]], ylabel=self.feature_names[feature_idx[0]]) plt.setp(axi.get_xticklabels(), rotation='vertical') heatmap_idx = np.unravel_index(pd_plot_idx, self.heatmaps_.shape) self.heatmaps_[heatmap_idx] = im else: from matplotlib import transforms (XX, YY) = np.meshgrid(feature_values[0], feature_values[1]) Z = avg_preds[self.target_idx].T CS = axi.contour(XX, YY, Z, levels=Z_level, linewidths=0.5, colors='k') contour_idx = np.unravel_index(pd_plot_idx, self.contours_.shape) self.contours_[contour_idx] = axi.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1], vmin=Z_level[0], **contour_kw) axi.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True) trans = transforms.blended_transform_factory(axi.transData, axi.transAxes) (xlim, ylim) = (axi.get_xlim(), axi.get_ylim()) vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape) self.deciles_vlines_[vlines_idx] = axi.vlines(self.deciles[feature_idx[0]], 0, 0.05, transform=trans, color='k') hlines_idx = np.unravel_index(pd_plot_idx, self.deciles_hlines_.shape) self.deciles_hlines_[hlines_idx] = axi.hlines(self.deciles[feature_idx[1]], 0, 0.05, transform=trans, color='k') axi.set_xlim(xlim) axi.set_ylim(ylim) if not axi.get_xlabel(): axi.set_xlabel(self.feature_names[feature_idx[0]]) axi.set_ylabel(self.feature_names[feature_idx[1]]) return self
def plot(self, *, ax=None, n_cols=3, line_kw=None, ice_lines_kw=None, pd_line_kw=None, contour_kw=None, bar_kw=None, heatmap_kw=None, pdp_lim=None, centered=False): """Plot partial dependence plots. Parameters ---------- ax : Matplotlib axes or array-like of Matplotlib axes, default=None - If a single axis is passed in, it is treated as a bounding axes and a grid of partial dependence plots will be drawn within these bounds. The `n_cols` parameter controls the number of columns in the grid. - If an array-like of axes are passed in, the partial dependence plots will be drawn directly into these axes. - If `None`, a figure and a bounding axes is created and treated as the single axes case. n_cols : int, default=3 The maximum number of columns in the grid plot. Only active when `ax` is a single axes or `None`. line_kw : dict, default=None Dict with keywords passed to the `matplotlib.pyplot.plot` call. For one-way partial dependence plots. ice_lines_kw : dict, default=None Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. For ICE lines in the one-way partial dependence plots. The key value pairs defined in `ice_lines_kw` takes priority over `line_kw`. .. versionadded:: 1.0 pd_line_kw : dict, default=None Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. For partial dependence in one-way partial dependence plots. The key value pairs defined in `pd_line_kw` takes priority over `line_kw`. .. versionadded:: 1.0 contour_kw : dict, default=None Dict with keywords passed to the `matplotlib.pyplot.contourf` call for two-way partial dependence plots. bar_kw : dict, default=None Dict with keywords passed to the `matplotlib.pyplot.bar` call for one-way categorical partial dependence plots. .. versionadded:: 1.2 heatmap_kw : dict, default=None Dict with keywords passed to the `matplotlib.pyplot.imshow` call for two-way categorical partial dependence plots. .. versionadded:: 1.2 pdp_lim : dict, default=None Global min and max average predictions, such that all plots will have the same scale and y limits. `pdp_lim[1]` is the global min and max for single partial dependence curves. `pdp_lim[2]` is the global min and max for two-way partial dependence curves. If `None` (default), the limit will be inferred from the global minimum and maximum of all predictions. .. versionadded:: 1.1 centered : bool, default=False If `True`, the ICE and PD lines will start at the origin of the y-axis. By default, no centering is done. .. versionadded:: 1.1 Returns ------- display : :class:`~sklearn.inspection.PartialDependenceDisplay` Returns a :class:`~sklearn.inspection.PartialDependenceDisplay` object that contains the partial dependence plots. """ check_matplotlib_support('plot_partial_dependence') import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpecFromSubplotSpec if isinstance(self.kind, str): kind = [self.kind] * len(self.features) else: kind = self.kind if self.is_categorical is None: is_categorical = [(False,) if len(fx) == 1 else (False, False) for fx in self.features] else: is_categorical = self.is_categorical if len(kind) != len(self.features): raise ValueError(f'When `kind` is provided as a list of strings, it should contain as many elements as `features`. `kind` contains {len(kind)} element(s) and `features` contains {len(self.features)} element(s).') valid_kinds = {'average', 'individual', 'both'} if any([k not in valid_kinds for k in kind]): raise ValueError(f'Values provided to `kind` must be one of: {valid_kinds!r} or a list of such values. Currently, kind={self.kind!r}') if self.pdp_lim != 'deprecated': warnings.warn('The `pdp_lim` parameter is deprecated in version 1.1 and will be removed in version 1.3. Provide `pdp_lim` to the `plot` method.instead.', FutureWarning) if pdp_lim is not None and self.pdp_lim != pdp_lim: warnings.warn('`pdp_lim` has been passed in both the constructor and the `plot` method. For backward compatibility, the parameter from the constructor will be used.', UserWarning) pdp_lim = self.pdp_lim if not centered: pd_results_ = self.pd_results else: pd_results_ = [] for (kind_plot, pd_result) in zip(kind, self.pd_results): current_results = {'grid_values': pd_result['grid_values']} if kind_plot in ('individual', 'both'): preds = pd_result.individual preds = preds - preds[self.target_idx, :, 0, None] current_results['individual'] = preds if kind_plot in ('average', 'both'): avg_preds = pd_result.average avg_preds = avg_preds - avg_preds[self.target_idx, 0, None] current_results['average'] = avg_preds pd_results_.append(Bunch(**current_results)) if pdp_lim is None: pdp_lim = {} for (kind_plot, pdp) in zip(kind, pd_results_): values = pdp['grid_values'] preds = pdp.average if kind_plot == 'average' else pdp.individual min_pd = preds[self.target_idx].min() max_pd = preds[self.target_idx].max() span = max_pd - min_pd min_pd -= 0.05 * span max_pd += 0.05 * span n_fx = len(values) (old_min_pd, old_max_pd) = pdp_lim.get(n_fx, (min_pd, max_pd)) min_pd = min(min_pd, old_min_pd) max_pd = max(max_pd, old_max_pd) pdp_lim[n_fx] = (min_pd, max_pd) if line_kw is None: line_kw = {} if ice_lines_kw is None: ice_lines_kw = {} if pd_line_kw is None: pd_line_kw = {} if bar_kw is None: bar_kw = {} if heatmap_kw is None: heatmap_kw = {} if ax is None: (_, ax) = plt.subplots() if contour_kw is None: contour_kw = {} default_contour_kws = {'alpha': 0.75} contour_kw = {**default_contour_kws, **contour_kw} n_features = len(self.features) is_average_plot = [kind_plot == 'average' for kind_plot in kind] if all(is_average_plot): n_ice_lines = 0 n_lines = 1 else: ice_plot_idx = is_average_plot.index(False) <DeepExtract> if isinstance(self.subsample, numbers.Integral): if self.subsample < len(pd_results_[ice_plot_idx].individual[0]): n_ice_lines = self.subsample n_ice_lines = len(pd_results_[ice_plot_idx].individual[0]) elif isinstance(self.subsample, numbers.Real): n_ice_lines = ceil(len(pd_results_[ice_plot_idx].individual[0]) * self.subsample) n_ice_lines = len(pd_results_[ice_plot_idx].individual[0]) </DeepExtract> if any([kind_plot == 'both' for kind_plot in kind]): n_lines = n_ice_lines + 1 else: n_lines = n_ice_lines if isinstance(ax, plt.Axes): if not ax.axison: raise ValueError('The ax was already used in another plot function, please set ax=display.axes_ instead') ax.set_axis_off() self.bounding_ax_ = ax self.figure_ = ax.figure n_cols = min(n_cols, n_features) n_rows = int(np.ceil(n_features / float(n_cols))) self.axes_ = np.empty((n_rows, n_cols), dtype=object) if all(is_average_plot): self.lines_ = np.empty((n_rows, n_cols), dtype=object) else: self.lines_ = np.empty((n_rows, n_cols, n_lines), dtype=object) self.contours_ = np.empty((n_rows, n_cols), dtype=object) self.bars_ = np.empty((n_rows, n_cols), dtype=object) self.heatmaps_ = np.empty((n_rows, n_cols), dtype=object) axes_ravel = self.axes_.ravel() gs = GridSpecFromSubplotSpec(n_rows, n_cols, subplot_spec=ax.get_subplotspec()) for (i, spec) in zip(range(n_features), gs): axes_ravel[i] = self.figure_.add_subplot(spec) else: ax = np.asarray(ax, dtype=object) if ax.size != n_features: raise ValueError('Expected ax to have {} axes, got {}'.format(n_features, ax.size)) if ax.ndim == 2: n_cols = ax.shape[1] else: n_cols = None self.bounding_ax_ = None self.figure_ = ax.ravel()[0].figure self.axes_ = ax if all(is_average_plot): self.lines_ = np.empty_like(ax, dtype=object) else: self.lines_ = np.empty(ax.shape + (n_lines,), dtype=object) self.contours_ = np.empty_like(ax, dtype=object) self.bars_ = np.empty_like(ax, dtype=object) self.heatmaps_ = np.empty_like(ax, dtype=object) if 2 in pdp_lim: Z_level = np.linspace(*pdp_lim[2], num=8) self.deciles_vlines_ = np.empty_like(self.axes_, dtype=object) self.deciles_hlines_ = np.empty_like(self.axes_, dtype=object) for (pd_plot_idx, (axi, feature_idx, cat, pd_result, kind_plot)) in enumerate(zip(self.axes_.ravel(), self.features, is_categorical, pd_results_, kind)): avg_preds = None preds = None feature_values = pd_result['grid_values'] if kind_plot == 'individual': preds = pd_result.individual elif kind_plot == 'average': avg_preds = pd_result.average else: avg_preds = pd_result.average preds = pd_result.individual if len(feature_values) == 1: default_line_kws = {'color': 'C0', 'label': 'average' if kind_plot == 'both' else None} if kind_plot == 'individual': default_ice_lines_kws = {'alpha': 0.3, 'linewidth': 0.5} default_pd_lines_kws = {} elif kind_plot == 'both': default_ice_lines_kws = {'alpha': 0.3, 'linewidth': 0.5, 'color': 'tab:blue'} default_pd_lines_kws = {'color': 'tab:orange', 'linestyle': '--'} else: default_ice_lines_kws = {} default_pd_lines_kws = {} ice_lines_kw = {**default_line_kws, **default_ice_lines_kws, **line_kw, **ice_lines_kw} del ice_lines_kw['label'] pd_line_kw = {**default_line_kws, **default_pd_lines_kws, **line_kw, **pd_line_kw} default_bar_kws = {'color': 'C0'} bar_kw = {**default_bar_kws, **bar_kw} default_heatmap_kw = {} heatmap_kw = {**default_heatmap_kw, **heatmap_kw} <DeepExtract> from matplotlib import transforms if kind_plot in ('individual', 'both'): self._plot_ice_lines(preds[self.target_idx], feature_values[0], n_ice_lines, axi, pd_plot_idx, n_lines, ice_lines_kw) if kind_plot in ('average', 'both'): if kind_plot == 'average': pd_line_idx = pd_plot_idx else: pd_line_idx = pd_plot_idx * n_lines + n_ice_lines self._plot_average_dependence(avg_preds[self.target_idx].ravel(), feature_values[0], axi, pd_line_idx, pd_line_kw, cat[0], bar_kw) trans = transforms.blended_transform_factory(axi.transData, axi.transAxes) vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape) if self.deciles.get(feature_idx[0], None) is not None: self.deciles_vlines_[vlines_idx] = axi.vlines(self.deciles[feature_idx[0]], 0, 0.05, transform=trans, color='k') min_val = min((val[0] for val in pdp_lim.values())) max_val = max((val[1] for val in pdp_lim.values())) axi.set_ylim([min_val, max_val]) if not axi.get_xlabel(): axi.set_xlabel(self.feature_names[feature_idx[0]]) if n_cols is None or pd_plot_idx % n_cols == 0: if not axi.get_ylabel(): axi.set_ylabel('Partial dependence') else: axi.set_yticklabels([]) if pd_line_kw.get('label', None) and kind_plot != 'individual' and (not cat[0]): axi.legend() </DeepExtract> else: <DeepExtract> if cat[0] and cat[1]: import matplotlib.pyplot as plt default_im_kw = dict(interpolation='nearest', cmap='viridis') im_kw = {**default_im_kw, **heatmap_kw} data = avg_preds[self.target_idx] im = axi.imshow(data, **im_kw) text = None (cmap_min, cmap_max) = (im.cmap(0), im.cmap(1.0)) text = np.empty_like(data, dtype=object) thresh = (data.max() + data.min()) / 2.0 for flat_index in range(data.size): (row, col) = np.unravel_index(flat_index, data.shape) color = cmap_max if data[row, col] < thresh else cmap_min values_format = '.2f' text_data = format(data[row, col], values_format) text_kwargs = dict(ha='center', va='center', color=color) text[row, col] = axi.text(col, row, text_data, **text_kwargs) fig = axi.figure fig.colorbar(im, ax=axi) axi.set(xticks=np.arange(len(feature_values[1])), yticks=np.arange(len(feature_values[0])), xticklabels=feature_values[1], yticklabels=feature_values[0], xlabel=self.feature_names[feature_idx[1]], ylabel=self.feature_names[feature_idx[0]]) plt.setp(axi.get_xticklabels(), rotation='vertical') heatmap_idx = np.unravel_index(pd_plot_idx, self.heatmaps_.shape) self.heatmaps_[heatmap_idx] = im else: from matplotlib import transforms (XX, YY) = np.meshgrid(feature_values[0], feature_values[1]) Z = avg_preds[self.target_idx].T CS = axi.contour(XX, YY, Z, levels=Z_level, linewidths=0.5, colors='k') contour_idx = np.unravel_index(pd_plot_idx, self.contours_.shape) self.contours_[contour_idx] = axi.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1], vmin=Z_level[0], **contour_kw) axi.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True) trans = transforms.blended_transform_factory(axi.transData, axi.transAxes) (xlim, ylim) = (axi.get_xlim(), axi.get_ylim()) vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape) self.deciles_vlines_[vlines_idx] = axi.vlines(self.deciles[feature_idx[0]], 0, 0.05, transform=trans, color='k') hlines_idx = np.unravel_index(pd_plot_idx, self.deciles_hlines_.shape) self.deciles_hlines_[hlines_idx] = axi.hlines(self.deciles[feature_idx[1]], 0, 0.05, transform=trans, color='k') axi.set_xlim(xlim) axi.set_ylim(ylim) if not axi.get_xlabel(): axi.set_xlabel(self.feature_names[feature_idx[0]]) axi.set_ylabel(self.feature_names[feature_idx[1]]) </DeepExtract> return self
def rbf_kernel(X, Y=None, gamma=None): """Compute the rbf (gaussian) kernel between X and Y. K(x, y) = exp(-gamma ||x-y||^2) for each pair of rows x in X and y in Y. Read more in the :ref:`User Guide <rbf_kernel>`. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) A feature array. Y : ndarray of shape (n_samples_Y, n_features), default=None An optional second feature array. If `None`, uses `Y=X`. gamma : float, default=None If None, defaults to 1.0 / n_features. Returns ------- kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y) The RBF kernel. """ (X, Y, dtype_float) = _return_float_dtype(X, Y) estimator = 'check_pairwise_arrays' if dtype is None: dtype = dtype_float if Y is X or Y is None: X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator) else: X = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator) Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator) if precomputed: if X.shape[1] != Y.shape[0]: raise ValueError('Precomputed metric requires shape (n_queries, n_indexed). Got (%d, %d) for %d indexed.' % (X.shape[0], X.shape[1], Y.shape[0])) elif X.shape[1] != Y.shape[1]: raise ValueError('Incompatible dimension for X and Y matrices: X.shape[1] == %d while Y.shape[1] == %d' % (X.shape[1], Y.shape[1])) (X, Y) = (X, Y) if gamma is None: gamma = 1.0 / X.shape[1] (X, Y) = check_pairwise_arrays(X, Y) if X_norm_squared is not None: X_norm_squared = check_array(X_norm_squared, ensure_2d=False) original_shape = X_norm_squared.shape if X_norm_squared.shape == (X.shape[0],): X_norm_squared = X_norm_squared.reshape(-1, 1) if X_norm_squared.shape == (1, X.shape[0]): X_norm_squared = X_norm_squared.T if X_norm_squared.shape != (X.shape[0], 1): raise ValueError(f'Incompatible dimensions for X of shape {X.shape} and X_norm_squared of shape {original_shape}.') if Y_norm_squared is not None: Y_norm_squared = check_array(Y_norm_squared, ensure_2d=False) original_shape = Y_norm_squared.shape if Y_norm_squared.shape == (Y.shape[0],): Y_norm_squared = Y_norm_squared.reshape(1, -1) if Y_norm_squared.shape == (Y.shape[0], 1): Y_norm_squared = Y_norm_squared.T if Y_norm_squared.shape != (1, Y.shape[0]): raise ValueError(f'Incompatible dimensions for Y of shape {Y.shape} and Y_norm_squared of shape {original_shape}.') K = _euclidean_distances(X, Y, X_norm_squared, Y_norm_squared, True) K *= -gamma np.exp(K, K) return K
def rbf_kernel(X, Y=None, gamma=None): """Compute the rbf (gaussian) kernel between X and Y. K(x, y) = exp(-gamma ||x-y||^2) for each pair of rows x in X and y in Y. Read more in the :ref:`User Guide <rbf_kernel>`. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) A feature array. Y : ndarray of shape (n_samples_Y, n_features), default=None An optional second feature array. If `None`, uses `Y=X`. gamma : float, default=None If None, defaults to 1.0 / n_features. Returns ------- kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y) The RBF kernel. """ <DeepExtract> (X, Y, dtype_float) = _return_float_dtype(X, Y) estimator = 'check_pairwise_arrays' if dtype is None: dtype = dtype_float if Y is X or Y is None: X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator) else: X = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator) Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator) if precomputed: if X.shape[1] != Y.shape[0]: raise ValueError('Precomputed metric requires shape (n_queries, n_indexed). Got (%d, %d) for %d indexed.' % (X.shape[0], X.shape[1], Y.shape[0])) elif X.shape[1] != Y.shape[1]: raise ValueError('Incompatible dimension for X and Y matrices: X.shape[1] == %d while Y.shape[1] == %d' % (X.shape[1], Y.shape[1])) (X, Y) = (X, Y) </DeepExtract> if gamma is None: gamma = 1.0 / X.shape[1] <DeepExtract> (X, Y) = check_pairwise_arrays(X, Y) if X_norm_squared is not None: X_norm_squared = check_array(X_norm_squared, ensure_2d=False) original_shape = X_norm_squared.shape if X_norm_squared.shape == (X.shape[0],): X_norm_squared = X_norm_squared.reshape(-1, 1) if X_norm_squared.shape == (1, X.shape[0]): X_norm_squared = X_norm_squared.T if X_norm_squared.shape != (X.shape[0], 1): raise ValueError(f'Incompatible dimensions for X of shape {X.shape} and X_norm_squared of shape {original_shape}.') if Y_norm_squared is not None: Y_norm_squared = check_array(Y_norm_squared, ensure_2d=False) original_shape = Y_norm_squared.shape if Y_norm_squared.shape == (Y.shape[0],): Y_norm_squared = Y_norm_squared.reshape(1, -1) if Y_norm_squared.shape == (Y.shape[0], 1): Y_norm_squared = Y_norm_squared.T if Y_norm_squared.shape != (1, Y.shape[0]): raise ValueError(f'Incompatible dimensions for Y of shape {Y.shape} and Y_norm_squared of shape {original_shape}.') K = _euclidean_distances(X, Y, X_norm_squared, Y_norm_squared, True) </DeepExtract> K *= -gamma np.exp(K, K) return K
def _parallel_build_estimators(n_estimators, ensemble, X, y, sample_weight, seeds, total_n_estimators, verbose, check_input): """Private function used to build a batch of estimators within a job.""" (n_samples, n_features) = X.shape max_features = ensemble._max_features max_samples = ensemble._max_samples bootstrap = ensemble.bootstrap bootstrap_features = ensemble.bootstrap_features support_sample_weight = has_fit_parameter(ensemble.estimator_, 'sample_weight') has_check_input = has_fit_parameter(ensemble.estimator_, 'check_input') requires_feature_indexing = bootstrap_features or max_features != n_features if not support_sample_weight and sample_weight is not None: raise ValueError("The base estimator doesn't support sample weight") estimators = [] estimators_features = [] for i in range(n_estimators): if verbose > 1: print('Building estimator %d of %d for this parallel run (total %d)...' % (i + 1, n_estimators, total_n_estimators)) random_state = seeds[i] estimator = ensemble._make_estimator(append=False, random_state=random_state) if has_check_input: estimator_fit = partial(estimator.fit, check_input=check_input) else: estimator_fit = estimator.fit random_state = check_random_state(random_state) feature_indices = _generate_indices(random_state, bootstrap_features, n_features, max_features) sample_indices = _generate_indices(random_state, bootstrap, n_samples, max_samples) (features, indices) = (feature_indices, sample_indices) if support_sample_weight: if sample_weight is None: curr_sample_weight = np.ones((n_samples,)) else: curr_sample_weight = sample_weight.copy() if bootstrap: sample_counts = np.bincount(indices, minlength=n_samples) curr_sample_weight *= sample_counts else: not_indices_mask = ~indices_to_mask(indices, n_samples) curr_sample_weight[not_indices_mask] = 0 X_ = X[:, features] if requires_feature_indexing else X estimator_fit(X_, y, sample_weight=curr_sample_weight) else: X_ = X[indices][:, features] if requires_feature_indexing else X[indices] estimator_fit(X_, y[indices]) estimators.append(estimator) estimators_features.append(features) return (estimators, estimators_features)
def _parallel_build_estimators(n_estimators, ensemble, X, y, sample_weight, seeds, total_n_estimators, verbose, check_input): """Private function used to build a batch of estimators within a job.""" (n_samples, n_features) = X.shape max_features = ensemble._max_features max_samples = ensemble._max_samples bootstrap = ensemble.bootstrap bootstrap_features = ensemble.bootstrap_features support_sample_weight = has_fit_parameter(ensemble.estimator_, 'sample_weight') has_check_input = has_fit_parameter(ensemble.estimator_, 'check_input') requires_feature_indexing = bootstrap_features or max_features != n_features if not support_sample_weight and sample_weight is not None: raise ValueError("The base estimator doesn't support sample weight") estimators = [] estimators_features = [] for i in range(n_estimators): if verbose > 1: print('Building estimator %d of %d for this parallel run (total %d)...' % (i + 1, n_estimators, total_n_estimators)) random_state = seeds[i] estimator = ensemble._make_estimator(append=False, random_state=random_state) if has_check_input: estimator_fit = partial(estimator.fit, check_input=check_input) else: estimator_fit = estimator.fit <DeepExtract> random_state = check_random_state(random_state) feature_indices = _generate_indices(random_state, bootstrap_features, n_features, max_features) sample_indices = _generate_indices(random_state, bootstrap, n_samples, max_samples) (features, indices) = (feature_indices, sample_indices) </DeepExtract> if support_sample_weight: if sample_weight is None: curr_sample_weight = np.ones((n_samples,)) else: curr_sample_weight = sample_weight.copy() if bootstrap: sample_counts = np.bincount(indices, minlength=n_samples) curr_sample_weight *= sample_counts else: not_indices_mask = ~indices_to_mask(indices, n_samples) curr_sample_weight[not_indices_mask] = 0 X_ = X[:, features] if requires_feature_indexing else X estimator_fit(X_, y, sample_weight=curr_sample_weight) else: X_ = X[indices][:, features] if requires_feature_indexing else X[indices] estimator_fit(X_, y[indices]) estimators.append(estimator) estimators_features.append(features) return (estimators, estimators_features)
def test_subsamples(): random_state = np.random.RandomState(0) n_samples = 10000 X = random_state.normal(size=(n_samples, 4)) w = np.array([5.0, 10.0, 42.0, 7.0]) c = 1.0 noise = 0.1 * random_state.normal(size=n_samples) y = np.dot(X, w) + c + noise n_outliers = n_samples // 10 ix = random_state.randint(0, n_samples, size=n_outliers) y[ix] = 50 * random_state.normal(size=n_outliers) (X, y, w, c) = (X, y, w, c) theil_sen = TheilSenRegressor(n_subsamples=X.shape[0], random_state=0).fit(X, y) lstq = LinearRegression().fit(X, y) assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_subsamples(): <DeepExtract> random_state = np.random.RandomState(0) n_samples = 10000 X = random_state.normal(size=(n_samples, 4)) w = np.array([5.0, 10.0, 42.0, 7.0]) c = 1.0 noise = 0.1 * random_state.normal(size=n_samples) y = np.dot(X, w) + c + noise n_outliers = n_samples // 10 ix = random_state.randint(0, n_samples, size=n_outliers) y[ix] = 50 * random_state.normal(size=n_outliers) (X, y, w, c) = (X, y, w, c) </DeepExtract> theil_sen = TheilSenRegressor(n_subsamples=X.shape[0], random_state=0).fit(X, y) lstq = LinearRegression().fit(X, y) assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
@validate_params({'X': ['array-like'], 'Y': ['array-like', None]}) def additive_chi2_kernel(X, Y=None): """Compute the additive chi-squared kernel between observations in X and Y. The chi-squared kernel is computed between each pair of rows in X and Y. X and Y have to be non-negative. This kernel is most commonly applied to histograms. The chi-squared kernel is given by:: k(x, y) = -Sum [(x - y)^2 / (x + y)] It can be interpreted as a weighted difference per entry. Read more in the :ref:`User Guide <chi2_kernel>`. Parameters ---------- X : array-like of shape (n_samples_X, n_features) A feature array. Y : array-like of shape (n_samples_Y, n_features), default=None An optional second feature array. If `None`, uses `Y=X`. Returns ------- kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y) The kernel matrix. See Also -------- chi2_kernel : The exponentiated version of the kernel, which is usually preferable. sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation to this kernel. Notes ----- As the negative of a distance, this kernel is only conditionally positive definite. References ---------- * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C. Local features and kernels for classification of texture and object categories: A comprehensive study International Journal of Computer Vision 2007 https://hal.archives-ouvertes.fr/hal-00171412/document """ (X, Y, dtype_float) = _return_float_dtype(X, Y) estimator = 'check_pairwise_arrays' if dtype is None: dtype = dtype_float if Y is X or Y is None: X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator) else: X = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator) Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator) if precomputed: if X.shape[1] != Y.shape[0]: raise ValueError('Precomputed metric requires shape (n_queries, n_indexed). Got (%d, %d) for %d indexed.' % (X.shape[0], X.shape[1], Y.shape[0])) elif X.shape[1] != Y.shape[1]: raise ValueError('Incompatible dimension for X and Y matrices: X.shape[1] == %d while Y.shape[1] == %d' % (X.shape[1], Y.shape[1])) (X, Y) = (X, Y) if (X < 0).any(): raise ValueError('X contains negative values.') if Y is not X and (Y < 0).any(): raise ValueError('Y contains negative values.') result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype) _chi2_kernel_fast(X, Y, result) return result
@validate_params({'X': ['array-like'], 'Y': ['array-like', None]}) def additive_chi2_kernel(X, Y=None): """Compute the additive chi-squared kernel between observations in X and Y. The chi-squared kernel is computed between each pair of rows in X and Y. X and Y have to be non-negative. This kernel is most commonly applied to histograms. The chi-squared kernel is given by:: k(x, y) = -Sum [(x - y)^2 / (x + y)] It can be interpreted as a weighted difference per entry. Read more in the :ref:`User Guide <chi2_kernel>`. Parameters ---------- X : array-like of shape (n_samples_X, n_features) A feature array. Y : array-like of shape (n_samples_Y, n_features), default=None An optional second feature array. If `None`, uses `Y=X`. Returns ------- kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y) The kernel matrix. See Also -------- chi2_kernel : The exponentiated version of the kernel, which is usually preferable. sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation to this kernel. Notes ----- As the negative of a distance, this kernel is only conditionally positive definite. References ---------- * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C. Local features and kernels for classification of texture and object categories: A comprehensive study International Journal of Computer Vision 2007 https://hal.archives-ouvertes.fr/hal-00171412/document """ <DeepExtract> (X, Y, dtype_float) = _return_float_dtype(X, Y) estimator = 'check_pairwise_arrays' if dtype is None: dtype = dtype_float if Y is X or Y is None: X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator) else: X = check_array(X, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator) Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, estimator=estimator) if precomputed: if X.shape[1] != Y.shape[0]: raise ValueError('Precomputed metric requires shape (n_queries, n_indexed). Got (%d, %d) for %d indexed.' % (X.shape[0], X.shape[1], Y.shape[0])) elif X.shape[1] != Y.shape[1]: raise ValueError('Incompatible dimension for X and Y matrices: X.shape[1] == %d while Y.shape[1] == %d' % (X.shape[1], Y.shape[1])) (X, Y) = (X, Y) </DeepExtract> if (X < 0).any(): raise ValueError('X contains negative values.') if Y is not X and (Y < 0).any(): raise ValueError('Y contains negative values.') result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype) _chi2_kernel_fast(X, Y, result) return result
def load_wine(*, return_X_y=False, as_frame=False): """Load and return the wine dataset (classification). .. versionadded:: 0.18 The wine dataset is a classic and very easy multi-class classification dataset. ================= ============== Classes 3 Samples per class [59,71,48] Samples total 178 Dimensionality 13 Features real, positive ================= ============== The copy of UCI ML Wine Data Set dataset is downloaded and modified to fit standard format from: https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data Read more in the :ref:`User Guide <wine_dataset>`. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (178, 13) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target: {ndarray, Series} of shape (178,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names: list The names of the dataset columns. target_names: list The names of target classes. frame: DataFrame of shape (178, 14) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR: str The full description of the dataset. (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarrays by default. The first contains a 2D array of shape (178, 13) with each row representing one sample and each column representing the features. The second array of shape (178,) contains the target samples. Examples -------- Let's say you are interested in the samples 10, 80, and 140, and want to know their class name. >>> from sklearn.datasets import load_wine >>> data = load_wine() >>> data.target[[10, 80, 140]] array([0, 1, 2]) >>> list(data.target_names) ['class_0', 'class_1', 'class_2'] """ with _open_text(data_module, 'wine_data.csv') as csv_file: data_file = csv.reader(csv_file) temp = next(data_file) n_samples = int(temp[0]) n_features = int(temp[1]) target_names = np.array(temp[2:]) data = np.empty((n_samples, n_features)) target = np.empty((n_samples,), dtype=int) for (i, ir) in enumerate(data_file): data[i] = np.asarray(ir[:-1], dtype=np.float64) target[i] = np.asarray(ir[-1], dtype=int) if 'wine_data.rst' is None: (data, target, target_names, fdescr) = (data, target, target_names) else: assert descr_module is not None descr = load_descr(descr_module=descr_module, descr_file_name='wine_data.rst') (data, target, target_names, fdescr) = (data, target, target_names, descr) feature_names = ['alcohol', 'malic_acid', 'ash', 'alcalinity_of_ash', 'magnesium', 'total_phenols', 'flavanoids', 'nonflavanoid_phenols', 'proanthocyanins', 'color_intensity', 'hue', 'od280/od315_of_diluted_wines', 'proline'] frame = None target_columns = ['target'] if as_frame: pd = check_pandas_support('{} with as_frame=True'.format('load_wine')) if not sparse_data: data_df = pd.DataFrame(data, columns=feature_names) else: data_df = pd.DataFrame.sparse.from_spmatrix(data, columns=feature_names) target_df = pd.DataFrame(target, columns=target_columns) combined_df = pd.concat([data_df, target_df], axis=1) X = combined_df[feature_names] y = combined_df[target_columns] if y.shape[1] == 1: y = y.iloc[:, 0] (frame, data, target) = (combined_df, X, y) if return_X_y: return (data, target) return Bunch(data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names)
def load_wine(*, return_X_y=False, as_frame=False): """Load and return the wine dataset (classification). .. versionadded:: 0.18 The wine dataset is a classic and very easy multi-class classification dataset. ================= ============== Classes 3 Samples per class [59,71,48] Samples total 178 Dimensionality 13 Features real, positive ================= ============== The copy of UCI ML Wine Data Set dataset is downloaded and modified to fit standard format from: https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data Read more in the :ref:`User Guide <wine_dataset>`. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (178, 13) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target: {ndarray, Series} of shape (178,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names: list The names of the dataset columns. target_names: list The names of target classes. frame: DataFrame of shape (178, 14) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR: str The full description of the dataset. (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarrays by default. The first contains a 2D array of shape (178, 13) with each row representing one sample and each column representing the features. The second array of shape (178,) contains the target samples. Examples -------- Let's say you are interested in the samples 10, 80, and 140, and want to know their class name. >>> from sklearn.datasets import load_wine >>> data = load_wine() >>> data.target[[10, 80, 140]] array([0, 1, 2]) >>> list(data.target_names) ['class_0', 'class_1', 'class_2'] """ <DeepExtract> with _open_text(data_module, 'wine_data.csv') as csv_file: data_file = csv.reader(csv_file) temp = next(data_file) n_samples = int(temp[0]) n_features = int(temp[1]) target_names = np.array(temp[2:]) data = np.empty((n_samples, n_features)) target = np.empty((n_samples,), dtype=int) for (i, ir) in enumerate(data_file): data[i] = np.asarray(ir[:-1], dtype=np.float64) target[i] = np.asarray(ir[-1], dtype=int) if 'wine_data.rst' is None: (data, target, target_names, fdescr) = (data, target, target_names) else: assert descr_module is not None descr = load_descr(descr_module=descr_module, descr_file_name='wine_data.rst') (data, target, target_names, fdescr) = (data, target, target_names, descr) </DeepExtract> feature_names = ['alcohol', 'malic_acid', 'ash', 'alcalinity_of_ash', 'magnesium', 'total_phenols', 'flavanoids', 'nonflavanoid_phenols', 'proanthocyanins', 'color_intensity', 'hue', 'od280/od315_of_diluted_wines', 'proline'] frame = None target_columns = ['target'] if as_frame: <DeepExtract> pd = check_pandas_support('{} with as_frame=True'.format('load_wine')) if not sparse_data: data_df = pd.DataFrame(data, columns=feature_names) else: data_df = pd.DataFrame.sparse.from_spmatrix(data, columns=feature_names) target_df = pd.DataFrame(target, columns=target_columns) combined_df = pd.concat([data_df, target_df], axis=1) X = combined_df[feature_names] y = combined_df[target_columns] if y.shape[1] == 1: y = y.iloc[:, 0] (frame, data, target) = (combined_df, X, y) </DeepExtract> if return_X_y: return (data, target) return Bunch(data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names)
def _to_graph(n_x, n_y, n_z, mask=None, img=None, return_as=sparse.coo_matrix, dtype=None): """Auxiliary function for img_to_graph and grid_to_graph""" vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z)) edges_deep = np.vstack((vertices[:, :, :-1].ravel(), vertices[:, :, 1:].ravel())) edges_right = np.vstack((vertices[:, :-1].ravel(), vertices[:, 1:].ravel())) edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel())) edges = np.hstack((edges_deep, edges_right, edges_down)) edges = edges if dtype is None: if img is None: dtype = int else: dtype = img.dtype if img is not None: img = np.atleast_3d(img) (_, n_y, n_z) = img.shape gradient = np.abs(img[edges[0] // (n_y * n_z), edges[0] % (n_y * n_z) // n_z, edges[0] % (n_y * n_z) % n_z] - img[edges[1] // (n_y * n_z), edges[1] % (n_y * n_z) // n_z, edges[1] % (n_y * n_z) % n_z]) weights = gradient if mask is not None: inds = np.arange(mask.size) inds = inds[mask.ravel()] ind_mask = np.logical_and(np.in1d(edges[0], inds), np.in1d(edges[1], inds)) edges = edges[:, ind_mask] if weights is not None: weights = weights[ind_mask] if len(edges.ravel()): maxval = edges.max() else: maxval = 0 order = np.searchsorted(np.flatnonzero(mask), np.arange(maxval + 1)) edges = order[edges] if weights is None: (edges, weights) = edges else: (edges, weights) = (edges, weights) diag = img.squeeze()[mask] else: diag = img.ravel() n_voxels = diag.size else: if mask is not None: mask = mask.astype(dtype=bool, copy=False) inds = np.arange(mask.size) inds = inds[mask.ravel()] ind_mask = np.logical_and(np.in1d(edges[0], inds), np.in1d(edges[1], inds)) edges = edges[:, ind_mask] if weights is not None: weights = weights[ind_mask] if len(edges.ravel()): maxval = edges.max() else: maxval = 0 order = np.searchsorted(np.flatnonzero(mask), np.arange(maxval + 1)) edges = order[edges] if weights is None: edges = edges else: edges = (edges, weights) n_voxels = np.sum(mask) else: n_voxels = n_x * n_y * n_z weights = np.ones(edges.shape[1], dtype=dtype) diag = np.ones(n_voxels, dtype=dtype) diag_idx = np.arange(n_voxels) i_idx = np.hstack((edges[0], edges[1])) j_idx = np.hstack((edges[1], edges[0])) graph = sparse.coo_matrix((np.hstack((weights, weights, diag)), (np.hstack((i_idx, diag_idx)), np.hstack((j_idx, diag_idx)))), (n_voxels, n_voxels), dtype=dtype) if return_as is np.ndarray: return graph.toarray() return return_as(graph)
def _to_graph(n_x, n_y, n_z, mask=None, img=None, return_as=sparse.coo_matrix, dtype=None): """Auxiliary function for img_to_graph and grid_to_graph""" <DeepExtract> vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z)) edges_deep = np.vstack((vertices[:, :, :-1].ravel(), vertices[:, :, 1:].ravel())) edges_right = np.vstack((vertices[:, :-1].ravel(), vertices[:, 1:].ravel())) edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel())) edges = np.hstack((edges_deep, edges_right, edges_down)) edges = edges </DeepExtract> if dtype is None: if img is None: dtype = int else: dtype = img.dtype if img is not None: img = np.atleast_3d(img) <DeepExtract> (_, n_y, n_z) = img.shape gradient = np.abs(img[edges[0] // (n_y * n_z), edges[0] % (n_y * n_z) // n_z, edges[0] % (n_y * n_z) % n_z] - img[edges[1] // (n_y * n_z), edges[1] % (n_y * n_z) // n_z, edges[1] % (n_y * n_z) % n_z]) weights = gradient </DeepExtract> if mask is not None: <DeepExtract> inds = np.arange(mask.size) inds = inds[mask.ravel()] ind_mask = np.logical_and(np.in1d(edges[0], inds), np.in1d(edges[1], inds)) edges = edges[:, ind_mask] if weights is not None: weights = weights[ind_mask] if len(edges.ravel()): maxval = edges.max() else: maxval = 0 order = np.searchsorted(np.flatnonzero(mask), np.arange(maxval + 1)) edges = order[edges] if weights is None: (edges, weights) = edges else: (edges, weights) = (edges, weights) </DeepExtract> diag = img.squeeze()[mask] else: diag = img.ravel() n_voxels = diag.size else: if mask is not None: mask = mask.astype(dtype=bool, copy=False) <DeepExtract> inds = np.arange(mask.size) inds = inds[mask.ravel()] ind_mask = np.logical_and(np.in1d(edges[0], inds), np.in1d(edges[1], inds)) edges = edges[:, ind_mask] if weights is not None: weights = weights[ind_mask] if len(edges.ravel()): maxval = edges.max() else: maxval = 0 order = np.searchsorted(np.flatnonzero(mask), np.arange(maxval + 1)) edges = order[edges] if weights is None: edges = edges else: edges = (edges, weights) </DeepExtract> n_voxels = np.sum(mask) else: n_voxels = n_x * n_y * n_z weights = np.ones(edges.shape[1], dtype=dtype) diag = np.ones(n_voxels, dtype=dtype) diag_idx = np.arange(n_voxels) i_idx = np.hstack((edges[0], edges[1])) j_idx = np.hstack((edges[1], edges[0])) graph = sparse.coo_matrix((np.hstack((weights, weights, diag)), (np.hstack((i_idx, diag_idx)), np.hstack((j_idx, diag_idx)))), (n_voxels, n_voxels), dtype=dtype) if return_as is np.ndarray: return graph.toarray() return return_as(graph)
@validate_params({'X': ['array-like'], 'y': [np.ndarray], 'n_nonzero_coefs': [Interval(Integral, 1, None, closed='left'), None], 'tol': [Interval(Real, 0, None, closed='left'), None], 'precompute': ['boolean', StrOptions({'auto'})], 'copy_X': ['boolean'], 'return_path': ['boolean'], 'return_n_iter': ['boolean']}) def orthogonal_mp(X, y, *, n_nonzero_coefs=None, tol=None, precompute=False, copy_X=True, return_path=False, return_n_iter=False): """Orthogonal Matching Pursuit (OMP). Solves n_targets Orthogonal Matching Pursuit problems. An instance of the problem has the form: When parametrized by the number of non-zero coefficients using `n_nonzero_coefs`: argmin ||y - X\\gamma||^2 subject to ||\\gamma||_0 <= n_{nonzero coefs} When parametrized by error using the parameter `tol`: argmin ||\\gamma||_0 subject to ||y - X\\gamma||^2 <= tol Read more in the :ref:`User Guide <omp>`. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Columns are assumed to have unit norm. y : ndarray of shape (n_samples,) or (n_samples, n_targets) Input targets. n_nonzero_coefs : int, default=None Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float, default=None Maximum norm of the residual. If not None, overrides n_nonzero_coefs. precompute : 'auto' or bool, default=False Whether to perform precomputations. Improves performance when n_targets or n_samples is very large. copy_X : bool, default=True Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. return_path : bool, default=False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, default=False Whether or not to return the number of iterations. Returns ------- coef : ndarray of shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis generates coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See Also -------- OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model. orthogonal_mp_gram : Solve OMP problems using Gram matrix and the product X.T * y. lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm. sklearn.decomposition.sparse_encode : Sparse coding. Notes ----- Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ X = check_array(X, order='F', copy=copy_X) copy_X = False if y.ndim == 1: y = y.reshape(-1, 1) y = check_array(y) if y.shape[1] > 1: copy_X = True if n_nonzero_coefs is None and tol is None: n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1) if tol is None and n_nonzero_coefs > X.shape[1]: raise ValueError('The number of atoms cannot be more than the number of features') if precompute == 'auto': precompute = X.shape[0] > X.shape[1] if precompute: G = np.dot(X.T, X) G = np.asfortranarray(G) Xy = np.dot(X.T, y) if tol is not None: norms_squared = np.sum(y ** 2, axis=0) else: norms_squared = None return orthogonal_mp_gram(G, Xy, n_nonzero_coefs=n_nonzero_coefs, tol=tol, norms_squared=norms_squared, copy_Gram=copy_X, copy_Xy=False, return_path=return_path) if return_path: coef = np.zeros((X.shape[1], y.shape[1], X.shape[1])) else: coef = np.zeros((X.shape[1], y.shape[1])) n_iters = [] for k in range(y.shape[1]): if copy_X: X = X.copy('F') else: X = np.asfortranarray(X) min_float = np.finfo(X.dtype).eps (nrm2, swap) = linalg.get_blas_funcs(('nrm2', 'swap'), (X,)) (potrs,) = get_lapack_funcs(('potrs',), (X,)) alpha = np.dot(X.T, y[:, k]) residual = y[:, k] gamma = np.empty(0) n_active = 0 indices = np.arange(X.shape[1]) max_features = X.shape[1] if tol is not None else n_nonzero_coefs L = np.empty((max_features, max_features), dtype=X.dtype) if return_path: coefs = np.empty_like(L) while True: lam = np.argmax(np.abs(np.dot(X.T, residual))) if lam < n_active or alpha[lam] ** 2 < min_float: warnings.warn(premature, RuntimeWarning, stacklevel=2) break if n_active > 0: L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam]) linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, check_finite=False) v = nrm2(L[n_active, :n_active]) ** 2 Lkk = linalg.norm(X[:, lam]) ** 2 - v if Lkk <= min_float: warnings.warn(premature, RuntimeWarning, stacklevel=2) break L[n_active, n_active] = sqrt(Lkk) else: L[0, 0] = linalg.norm(X[:, lam]) (X.T[n_active], X.T[lam]) = swap(X.T[n_active], X.T[lam]) (alpha[n_active], alpha[lam]) = (alpha[lam], alpha[n_active]) (indices[n_active], indices[lam]) = (indices[lam], indices[n_active]) n_active += 1 (gamma, _) = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True, overwrite_b=False) if return_path: coefs[:n_active, n_active - 1] = gamma residual = y[:, k] - np.dot(X[:, :n_active], gamma) if tol is not None and nrm2(residual) ** 2 <= tol: break elif n_active == max_features: break if return_path: out = (gamma, indices[:n_active], coefs[:, :n_active], n_active) else: out = (gamma, indices[:n_active], n_active) if return_path: (_, idx, coefs, n_iter) = out coef = coef[:, :, :len(idx)] for (n_active, x) in enumerate(coefs.T): coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1] else: (x, idx, n_iter) = out coef[idx, k] = x n_iters.append(n_iter) if y.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return (np.squeeze(coef), n_iters) else: return np.squeeze(coef)
@validate_params({'X': ['array-like'], 'y': [np.ndarray], 'n_nonzero_coefs': [Interval(Integral, 1, None, closed='left'), None], 'tol': [Interval(Real, 0, None, closed='left'), None], 'precompute': ['boolean', StrOptions({'auto'})], 'copy_X': ['boolean'], 'return_path': ['boolean'], 'return_n_iter': ['boolean']}) def orthogonal_mp(X, y, *, n_nonzero_coefs=None, tol=None, precompute=False, copy_X=True, return_path=False, return_n_iter=False): """Orthogonal Matching Pursuit (OMP). Solves n_targets Orthogonal Matching Pursuit problems. An instance of the problem has the form: When parametrized by the number of non-zero coefficients using `n_nonzero_coefs`: argmin ||y - X\\gamma||^2 subject to ||\\gamma||_0 <= n_{nonzero coefs} When parametrized by error using the parameter `tol`: argmin ||\\gamma||_0 subject to ||y - X\\gamma||^2 <= tol Read more in the :ref:`User Guide <omp>`. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Columns are assumed to have unit norm. y : ndarray of shape (n_samples,) or (n_samples, n_targets) Input targets. n_nonzero_coefs : int, default=None Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float, default=None Maximum norm of the residual. If not None, overrides n_nonzero_coefs. precompute : 'auto' or bool, default=False Whether to perform precomputations. Improves performance when n_targets or n_samples is very large. copy_X : bool, default=True Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. return_path : bool, default=False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, default=False Whether or not to return the number of iterations. Returns ------- coef : ndarray of shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis generates coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See Also -------- OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model. orthogonal_mp_gram : Solve OMP problems using Gram matrix and the product X.T * y. lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm. sklearn.decomposition.sparse_encode : Sparse coding. Notes ----- Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ X = check_array(X, order='F', copy=copy_X) copy_X = False if y.ndim == 1: y = y.reshape(-1, 1) y = check_array(y) if y.shape[1] > 1: copy_X = True if n_nonzero_coefs is None and tol is None: n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1) if tol is None and n_nonzero_coefs > X.shape[1]: raise ValueError('The number of atoms cannot be more than the number of features') if precompute == 'auto': precompute = X.shape[0] > X.shape[1] if precompute: G = np.dot(X.T, X) G = np.asfortranarray(G) Xy = np.dot(X.T, y) if tol is not None: norms_squared = np.sum(y ** 2, axis=0) else: norms_squared = None return orthogonal_mp_gram(G, Xy, n_nonzero_coefs=n_nonzero_coefs, tol=tol, norms_squared=norms_squared, copy_Gram=copy_X, copy_Xy=False, return_path=return_path) if return_path: coef = np.zeros((X.shape[1], y.shape[1], X.shape[1])) else: coef = np.zeros((X.shape[1], y.shape[1])) n_iters = [] for k in range(y.shape[1]): <DeepExtract> if copy_X: X = X.copy('F') else: X = np.asfortranarray(X) min_float = np.finfo(X.dtype).eps (nrm2, swap) = linalg.get_blas_funcs(('nrm2', 'swap'), (X,)) (potrs,) = get_lapack_funcs(('potrs',), (X,)) alpha = np.dot(X.T, y[:, k]) residual = y[:, k] gamma = np.empty(0) n_active = 0 indices = np.arange(X.shape[1]) max_features = X.shape[1] if tol is not None else n_nonzero_coefs L = np.empty((max_features, max_features), dtype=X.dtype) if return_path: coefs = np.empty_like(L) while True: lam = np.argmax(np.abs(np.dot(X.T, residual))) if lam < n_active or alpha[lam] ** 2 < min_float: warnings.warn(premature, RuntimeWarning, stacklevel=2) break if n_active > 0: L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam]) linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, check_finite=False) v = nrm2(L[n_active, :n_active]) ** 2 Lkk = linalg.norm(X[:, lam]) ** 2 - v if Lkk <= min_float: warnings.warn(premature, RuntimeWarning, stacklevel=2) break L[n_active, n_active] = sqrt(Lkk) else: L[0, 0] = linalg.norm(X[:, lam]) (X.T[n_active], X.T[lam]) = swap(X.T[n_active], X.T[lam]) (alpha[n_active], alpha[lam]) = (alpha[lam], alpha[n_active]) (indices[n_active], indices[lam]) = (indices[lam], indices[n_active]) n_active += 1 (gamma, _) = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True, overwrite_b=False) if return_path: coefs[:n_active, n_active - 1] = gamma residual = y[:, k] - np.dot(X[:, :n_active], gamma) if tol is not None and nrm2(residual) ** 2 <= tol: break elif n_active == max_features: break if return_path: out = (gamma, indices[:n_active], coefs[:, :n_active], n_active) else: out = (gamma, indices[:n_active], n_active) </DeepExtract> if return_path: (_, idx, coefs, n_iter) = out coef = coef[:, :, :len(idx)] for (n_active, x) in enumerate(coefs.T): coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1] else: (x, idx, n_iter) = out coef[idx, k] = x n_iters.append(n_iter) if y.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return (np.squeeze(coef), n_iters) else: return np.squeeze(coef)
@validate_params({'y_true': ['array-like', 'sparse matrix'], 'y_pred': ['array-like', 'sparse matrix'], 'normalize': ['boolean'], 'sample_weight': ['array-like', None]}) def accuracy_score(y_true, y_pred, *, normalize=True, sample_weight=None): """Accuracy classification score. In multilabel classification, this function computes subset accuracy: the set of labels predicted for a sample must *exactly* match the corresponding set of labels in y_true. Read more in the :ref:`User Guide <accuracy_score>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, default=True If ``False``, return the number of correctly classified samples. Otherwise, return the fraction of correctly classified samples. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float If ``normalize == True``, return the fraction of correctly classified samples (float), else returns the number of correctly classified samples (int). The best performance is 1 with ``normalize == True`` and the number of samples with ``normalize == False``. See Also -------- balanced_accuracy_score : Compute the balanced accuracy to deal with imbalanced datasets. jaccard_score : Compute the Jaccard similarity coefficient score. hamming_loss : Compute the average Hamming loss or Hamming distance between two sets of samples. zero_one_loss : Compute the Zero-one classification loss. By default, the function will return the percentage of imperfectly predicted subsets. Notes ----- In binary classification, this function is equal to the `jaccard_score` function. Examples -------- >>> from sklearn.metrics import accuracy_score >>> y_pred = [0, 2, 1, 3] >>> y_true = [0, 1, 2, 3] >>> accuracy_score(y_true, y_pred) 0.5 >>> accuracy_score(y_true, y_pred, normalize=False) 2 In the multilabel case with binary label indicators: >>> import numpy as np >>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2))) 0.5 """ check_consistent_length(y_true, y_pred) type_true = type_of_target(y_true, input_name='y_true') type_pred = type_of_target(y_pred, input_name='y_pred') y_type = {type_true, type_pred} if y_type == {'binary', 'multiclass'}: y_type = {'multiclass'} if len(y_type) > 1: raise ValueError("Classification metrics can't handle a mix of {0} and {1} targets".format(type_true, type_pred)) y_type = y_type.pop() if y_type not in ['binary', 'multiclass', 'multilabel-indicator']: raise ValueError('{0} is not supported'.format(y_type)) if y_type in ['binary', 'multiclass']: y_true = column_or_1d(y_true) y_pred = column_or_1d(y_pred) if y_type == 'binary': try: unique_values = np.union1d(y_true, y_pred) except TypeError as e: raise TypeError(f'Labels in y_true and y_pred should be of the same type. Got y_true={np.unique(y_true)} and y_pred={np.unique(y_pred)}. Make sure that the predictions provided by the classifier coincides with the true labels.') from e if len(unique_values) > 2: y_type = 'multiclass' if y_type.startswith('multilabel'): y_true = csr_matrix(y_true) y_pred = csr_matrix(y_pred) y_type = 'multilabel-indicator' (y_type, y_true, y_pred) = (y_type, y_true, y_pred) check_consistent_length(y_true, y_pred, sample_weight) if y_type.startswith('multilabel'): differing_labels = count_nonzero(y_true - y_pred, axis=1) score = differing_labels == 0 else: score = y_true == y_pred return _weighted_sum(score, sample_weight, normalize)
@validate_params({'y_true': ['array-like', 'sparse matrix'], 'y_pred': ['array-like', 'sparse matrix'], 'normalize': ['boolean'], 'sample_weight': ['array-like', None]}) def accuracy_score(y_true, y_pred, *, normalize=True, sample_weight=None): """Accuracy classification score. In multilabel classification, this function computes subset accuracy: the set of labels predicted for a sample must *exactly* match the corresponding set of labels in y_true. Read more in the :ref:`User Guide <accuracy_score>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, default=True If ``False``, return the number of correctly classified samples. Otherwise, return the fraction of correctly classified samples. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float If ``normalize == True``, return the fraction of correctly classified samples (float), else returns the number of correctly classified samples (int). The best performance is 1 with ``normalize == True`` and the number of samples with ``normalize == False``. See Also -------- balanced_accuracy_score : Compute the balanced accuracy to deal with imbalanced datasets. jaccard_score : Compute the Jaccard similarity coefficient score. hamming_loss : Compute the average Hamming loss or Hamming distance between two sets of samples. zero_one_loss : Compute the Zero-one classification loss. By default, the function will return the percentage of imperfectly predicted subsets. Notes ----- In binary classification, this function is equal to the `jaccard_score` function. Examples -------- >>> from sklearn.metrics import accuracy_score >>> y_pred = [0, 2, 1, 3] >>> y_true = [0, 1, 2, 3] >>> accuracy_score(y_true, y_pred) 0.5 >>> accuracy_score(y_true, y_pred, normalize=False) 2 In the multilabel case with binary label indicators: >>> import numpy as np >>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2))) 0.5 """ <DeepExtract> check_consistent_length(y_true, y_pred) type_true = type_of_target(y_true, input_name='y_true') type_pred = type_of_target(y_pred, input_name='y_pred') y_type = {type_true, type_pred} if y_type == {'binary', 'multiclass'}: y_type = {'multiclass'} if len(y_type) > 1: raise ValueError("Classification metrics can't handle a mix of {0} and {1} targets".format(type_true, type_pred)) y_type = y_type.pop() if y_type not in ['binary', 'multiclass', 'multilabel-indicator']: raise ValueError('{0} is not supported'.format(y_type)) if y_type in ['binary', 'multiclass']: y_true = column_or_1d(y_true) y_pred = column_or_1d(y_pred) if y_type == 'binary': try: unique_values = np.union1d(y_true, y_pred) except TypeError as e: raise TypeError(f'Labels in y_true and y_pred should be of the same type. Got y_true={np.unique(y_true)} and y_pred={np.unique(y_pred)}. Make sure that the predictions provided by the classifier coincides with the true labels.') from e if len(unique_values) > 2: y_type = 'multiclass' if y_type.startswith('multilabel'): y_true = csr_matrix(y_true) y_pred = csr_matrix(y_pred) y_type = 'multilabel-indicator' (y_type, y_true, y_pred) = (y_type, y_true, y_pred) </DeepExtract> check_consistent_length(y_true, y_pred, sample_weight) if y_type.startswith('multilabel'): differing_labels = count_nonzero(y_true - y_pred, axis=1) score = differing_labels == 0 else: score = y_true == y_pred return _weighted_sum(score, sample_weight, normalize)
def decorator(func): setattr(func, '_skl_parameter_constraints', parameter_constraints) @functools.wraps(func) def wrapper(*args, **kwargs): func_sig = signature(func) params = func_sig.bind(*args, **kwargs) params.apply_defaults() to_ignore = [p.name for p in func_sig.parameters.values() if p.kind in (p.VAR_POSITIONAL, p.VAR_KEYWORD)] to_ignore += ['self', 'cls'] params = {k: v for (k, v) in params.arguments.items() if k not in to_ignore} for (param_name, param_val) in params.items(): if param_name not in parameter_constraints: continue constraints = parameter_constraints[param_name] if constraints == 'no_validation': continue constraints = [make_constraint(constraint) for constraint in constraints] for constraint in constraints: if constraint.is_satisfied_by(param_val): break else: constraints = [constraint for constraint in constraints if not constraint.hidden] if len(constraints) == 1: constraints_str = f'{constraints[0]}' else: constraints_str = f"{', '.join([str(c) for c in constraints[:-1]])} or {constraints[-1]}" raise InvalidParameterError(f'The {param_name!r} parameter of {func.__qualname__} must be {constraints_str}. Got {param_val!r} instead.') try: return func(*args, **kwargs) except InvalidParameterError as e: msg = re.sub('parameter of \\w+ must be', f'parameter of {func.__qualname__} must be', str(e)) raise InvalidParameterError(msg) from e return wrapper
def decorator(func): setattr(func, '_skl_parameter_constraints', parameter_constraints) @functools.wraps(func) def wrapper(*args, **kwargs): func_sig = signature(func) params = func_sig.bind(*args, **kwargs) params.apply_defaults() to_ignore = [p.name for p in func_sig.parameters.values() if p.kind in (p.VAR_POSITIONAL, p.VAR_KEYWORD)] to_ignore += ['self', 'cls'] params = {k: v for (k, v) in params.arguments.items() if k not in to_ignore} <DeepExtract> for (param_name, param_val) in params.items(): if param_name not in parameter_constraints: continue constraints = parameter_constraints[param_name] if constraints == 'no_validation': continue constraints = [make_constraint(constraint) for constraint in constraints] for constraint in constraints: if constraint.is_satisfied_by(param_val): break else: constraints = [constraint for constraint in constraints if not constraint.hidden] if len(constraints) == 1: constraints_str = f'{constraints[0]}' else: constraints_str = f"{', '.join([str(c) for c in constraints[:-1]])} or {constraints[-1]}" raise InvalidParameterError(f'The {param_name!r} parameter of {func.__qualname__} must be {constraints_str}. Got {param_val!r} instead.') </DeepExtract> try: return func(*args, **kwargs) except InvalidParameterError as e: msg = re.sub('parameter of \\w+ must be', f'parameter of {func.__qualname__} must be', str(e)) raise InvalidParameterError(msg) from e return wrapper
def partial_fit(self, X=None, y=None): """ Online learning. Prevents rebuilding of CFTree from scratch. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None Input data. If X is not provided, only the global clustering step is done. y : Ignored Not used, present here for API consistency by convention. Returns ------- self Fitted estimator. """ self._validate_params() if X is None: clusterer = self.n_clusters centroids = self.subcluster_centers_ compute_labels = X is not None and self.compute_labels not_enough_centroids = False if isinstance(clusterer, Integral): clusterer = AgglomerativeClustering(n_clusters=self.n_clusters) if len(centroids) < self.n_clusters: not_enough_centroids = True self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True) if clusterer is None or not_enough_centroids: self.subcluster_labels_ = np.arange(len(centroids)) if not_enough_centroids: warnings.warn('Number of subclusters found (%d) by BIRCH is less than (%d). Decrease the threshold.' % (len(centroids), self.n_clusters), ConvergenceWarning) else: self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_) if compute_labels: self.labels_ = self._predict(X) return self else: return self._fit(X, partial=True)
def partial_fit(self, X=None, y=None): """ Online learning. Prevents rebuilding of CFTree from scratch. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None Input data. If X is not provided, only the global clustering step is done. y : Ignored Not used, present here for API consistency by convention. Returns ------- self Fitted estimator. """ self._validate_params() if X is None: <DeepExtract> clusterer = self.n_clusters centroids = self.subcluster_centers_ compute_labels = X is not None and self.compute_labels not_enough_centroids = False if isinstance(clusterer, Integral): clusterer = AgglomerativeClustering(n_clusters=self.n_clusters) if len(centroids) < self.n_clusters: not_enough_centroids = True self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True) if clusterer is None or not_enough_centroids: self.subcluster_labels_ = np.arange(len(centroids)) if not_enough_centroids: warnings.warn('Number of subclusters found (%d) by BIRCH is less than (%d). Decrease the threshold.' % (len(centroids), self.n_clusters), ConvergenceWarning) else: self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_) if compute_labels: self.labels_ = self._predict(X) </DeepExtract> return self else: return self._fit(X, partial=True)
def evaluate_candidates(candidate_params, cv=None, more_results=None): cv = cv or cv_orig candidate_params = list(candidate_params) n_candidates = len(candidate_params) if self.verbose > 0: print('Fitting {0} folds for each of {1} candidates, totalling {2} fits'.format(n_splits, n_candidates, n_candidates * n_splits)) out = parallel((delayed(_fit_and_score)(clone(base_estimator), X, y, train=train, test=test, parameters=parameters, split_progress=(split_idx, n_splits), candidate_progress=(cand_idx, n_candidates), **fit_and_score_kwargs) for ((cand_idx, parameters), (split_idx, (train, test))) in product(enumerate(candidate_params), enumerate(cv.split(X, y, groups))))) if len(out) < 1: raise ValueError('No fits were performed. Was the CV iterator empty? Were there no candidates?') elif len(out) != n_candidates * n_splits: raise ValueError('cv.split and cv.get_n_splits returned inconsistent results. Expected {} splits, got {}'.format(n_splits, len(out) // n_candidates)) _warn_or_raise_about_fit_failures(out, self.error_score) if callable(self.scoring): _insert_error_scores(out, self.error_score) all_candidate_params.extend(candidate_params) all_out.extend(out) if more_results is not None: for (key, value) in more_results.items(): all_more_results[key].extend(value) nonlocal results n_candidates = len(all_candidate_params) all_out = _aggregate_score_dicts(all_out) results = dict(all_more_results or {}) for (key, val) in results.items(): results[key] = np.asarray(val) def _store(key_name, array, weights=None, splits=False, rank=False): """A small helper to store the scores/times to the cv_results_""" array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits) if splits: for split_idx in range(n_splits): results['split%d_%s' % (split_idx, key_name)] = array[:, split_idx] array_means = np.average(array, axis=1, weights=weights) results['mean_%s' % key_name] = array_means if key_name.startswith(('train_', 'test_')) and np.any(~np.isfinite(array_means)): warnings.warn(f"One or more of the {key_name.split('_')[0]} scores are non-finite: {array_means}", category=UserWarning) array_stds = np.sqrt(np.average((array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights)) results['std_%s' % key_name] = array_stds if rank: if np.isnan(array_means).all(): rank_result = np.ones_like(array_means, dtype=np.int32) else: min_array_means = np.nanmin(array_means) - 1 array_means = np.nan_to_num(array_means, nan=min_array_means) rank_result = rankdata(-array_means, method='min').astype(np.int32, copy=False) results['rank_%s' % key_name] = rank_result _store('fit_time', all_out['fit_time']) _store('score_time', all_out['score_time']) param_results = defaultdict(partial(MaskedArray, np.empty(n_candidates), mask=True, dtype=object)) for (cand_idx, params) in enumerate(all_candidate_params): for (name, value) in params.items(): param_results['param_%s' % name][cand_idx] = value results.update(param_results) results['params'] = all_candidate_params test_scores_dict = _normalize_score_results(all_out['test_scores']) if self.return_train_score: train_scores_dict = _normalize_score_results(all_out['train_scores']) for scorer_name in test_scores_dict: _store('test_%s' % scorer_name, test_scores_dict[scorer_name], splits=True, rank=True, weights=None) if self.return_train_score: _store('train_%s' % scorer_name, train_scores_dict[scorer_name], splits=True) results = results return results
def evaluate_candidates(candidate_params, cv=None, more_results=None): cv = cv or cv_orig candidate_params = list(candidate_params) n_candidates = len(candidate_params) if self.verbose > 0: print('Fitting {0} folds for each of {1} candidates, totalling {2} fits'.format(n_splits, n_candidates, n_candidates * n_splits)) out = parallel((delayed(_fit_and_score)(clone(base_estimator), X, y, train=train, test=test, parameters=parameters, split_progress=(split_idx, n_splits), candidate_progress=(cand_idx, n_candidates), **fit_and_score_kwargs) for ((cand_idx, parameters), (split_idx, (train, test))) in product(enumerate(candidate_params), enumerate(cv.split(X, y, groups))))) if len(out) < 1: raise ValueError('No fits were performed. Was the CV iterator empty? Were there no candidates?') elif len(out) != n_candidates * n_splits: raise ValueError('cv.split and cv.get_n_splits returned inconsistent results. Expected {} splits, got {}'.format(n_splits, len(out) // n_candidates)) _warn_or_raise_about_fit_failures(out, self.error_score) if callable(self.scoring): _insert_error_scores(out, self.error_score) all_candidate_params.extend(candidate_params) all_out.extend(out) if more_results is not None: for (key, value) in more_results.items(): all_more_results[key].extend(value) nonlocal results <DeepExtract> n_candidates = len(all_candidate_params) all_out = _aggregate_score_dicts(all_out) results = dict(all_more_results or {}) for (key, val) in results.items(): results[key] = np.asarray(val) def _store(key_name, array, weights=None, splits=False, rank=False): """A small helper to store the scores/times to the cv_results_""" array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits) if splits: for split_idx in range(n_splits): results['split%d_%s' % (split_idx, key_name)] = array[:, split_idx] array_means = np.average(array, axis=1, weights=weights) results['mean_%s' % key_name] = array_means if key_name.startswith(('train_', 'test_')) and np.any(~np.isfinite(array_means)): warnings.warn(f"One or more of the {key_name.split('_')[0]} scores are non-finite: {array_means}", category=UserWarning) array_stds = np.sqrt(np.average((array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights)) results['std_%s' % key_name] = array_stds if rank: if np.isnan(array_means).all(): rank_result = np.ones_like(array_means, dtype=np.int32) else: min_array_means = np.nanmin(array_means) - 1 array_means = np.nan_to_num(array_means, nan=min_array_means) rank_result = rankdata(-array_means, method='min').astype(np.int32, copy=False) results['rank_%s' % key_name] = rank_result _store('fit_time', all_out['fit_time']) _store('score_time', all_out['score_time']) param_results = defaultdict(partial(MaskedArray, np.empty(n_candidates), mask=True, dtype=object)) for (cand_idx, params) in enumerate(all_candidate_params): for (name, value) in params.items(): param_results['param_%s' % name][cand_idx] = value results.update(param_results) results['params'] = all_candidate_params test_scores_dict = _normalize_score_results(all_out['test_scores']) if self.return_train_score: train_scores_dict = _normalize_score_results(all_out['train_scores']) for scorer_name in test_scores_dict: _store('test_%s' % scorer_name, test_scores_dict[scorer_name], splits=True, rank=True, weights=None) if self.return_train_score: _store('train_%s' % scorer_name, train_scores_dict[scorer_name], splits=True) results = results </DeepExtract> return results
def _yeo_johnson_optimize(self, x): """Find and return optimal lambda parameter of the Yeo-Johnson transform by MLE, for observed data x. Like for Box-Cox, MLE is done via the brent optimizer. """ x_tiny = np.finfo(np.float64).tiny def _neg_log_likelihood(lmbda): """Return the negative log likelihood of the observed data x as a function of lambda.""" out = np.zeros_like(x) pos = x >= 0 if abs(lmbda) < np.spacing(1.0): out[pos] = np.log1p(x[pos]) else: out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda if abs(lmbda - 2) > np.spacing(1.0): out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda) else: out[~pos] = -np.log1p(-x[~pos]) x_trans = out n_samples = x.shape[0] x_trans_var = x_trans.var() if x_trans_var < x_tiny: return np.inf log_var = np.log(x_trans_var) loglike = -n_samples / 2 * log_var loglike += (lmbda - 1) * (np.sign(x) * np.log1p(np.abs(x))).sum() return -loglike x = x[~np.isnan(x)] return optimize.brent(_neg_log_likelihood, brack=(-2, 2))
def _yeo_johnson_optimize(self, x): """Find and return optimal lambda parameter of the Yeo-Johnson transform by MLE, for observed data x. Like for Box-Cox, MLE is done via the brent optimizer. """ x_tiny = np.finfo(np.float64).tiny def _neg_log_likelihood(lmbda): """Return the negative log likelihood of the observed data x as a function of lambda.""" <DeepExtract> out = np.zeros_like(x) pos = x >= 0 if abs(lmbda) < np.spacing(1.0): out[pos] = np.log1p(x[pos]) else: out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda if abs(lmbda - 2) > np.spacing(1.0): out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda) else: out[~pos] = -np.log1p(-x[~pos]) x_trans = out </DeepExtract> n_samples = x.shape[0] x_trans_var = x_trans.var() if x_trans_var < x_tiny: return np.inf log_var = np.log(x_trans_var) loglike = -n_samples / 2 * log_var loglike += (lmbda - 1) * (np.sign(x) * np.log1p(np.abs(x))).sum() return -loglike x = x[~np.isnan(x)] return optimize.brent(_neg_log_likelihood, brack=(-2, 2))
@hides def predict_log_proba(self, X, *args, **kwargs): check_is_fitted(self) return np.ones(X.shape[0])
@hides def predict_log_proba(self, X, *args, **kwargs): <DeepExtract> check_is_fitted(self) </DeepExtract> return np.ones(X.shape[0])
def score(self, X, y): """Return the mean accuracy on the given test data and labels. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples, n_outputs) True values for X. Returns ------- scores : float Mean accuracy of predicted target versus true target. """ check_is_fitted(self) n_outputs_ = len(self.estimators_) if y.ndim == 1: raise ValueError('y must have at least two dimensions for multi target classification but has only one') if y.shape[1] != n_outputs_: raise ValueError('The number of outputs of Y for fit {0} and score {1} should be same'.format(n_outputs_, y.shape[1])) check_is_fitted(self) if not hasattr(self.estimators_[0], 'predict'): raise ValueError('The base estimator should implement a predict method') y = Parallel(n_jobs=self.n_jobs)((delayed(e.predict)(X) for e in self.estimators_)) y_pred = np.asarray(y).T return np.mean(np.all(y == y_pred, axis=1))
def score(self, X, y): """Return the mean accuracy on the given test data and labels. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples, n_outputs) True values for X. Returns ------- scores : float Mean accuracy of predicted target versus true target. """ check_is_fitted(self) n_outputs_ = len(self.estimators_) if y.ndim == 1: raise ValueError('y must have at least two dimensions for multi target classification but has only one') if y.shape[1] != n_outputs_: raise ValueError('The number of outputs of Y for fit {0} and score {1} should be same'.format(n_outputs_, y.shape[1])) <DeepExtract> check_is_fitted(self) if not hasattr(self.estimators_[0], 'predict'): raise ValueError('The base estimator should implement a predict method') y = Parallel(n_jobs=self.n_jobs)((delayed(e.predict)(X) for e in self.estimators_)) y_pred = np.asarray(y).T </DeepExtract> return np.mean(np.all(y == y_pred, axis=1))
@pytest.mark.parametrize('gzip_response', [True, False]) def test_dataset_with_openml_warning(monkeypatch, gzip_response): data_id = 3 url_prefix_data_description = 'https://openml.org/api/v1/json/data/' url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/' url_prefix_download_data = 'https://openml.org/data/v1/' url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/' path_suffix = '.gz' read_fn = gzip.open data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}' def _file_name(url, suffix): output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act') def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix): assert url.startswith(expected_prefix) data_file_name = _file_name(url, suffix) with _open_binary(data_module, data_file_name) as f: if has_gzip_header and gzip_response: fp = BytesIO(f.read()) return _MockHTTPResponse(fp, True) else: decompressed_f = read_fn(f, 'rb') fp = BytesIO(decompressed_f.read()) return _MockHTTPResponse(fp, False) def _mock_urlopen_data_description(url, has_gzip_header): return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json') def _mock_urlopen_data_features(url, has_gzip_header): return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json') def _mock_urlopen_download_data(url, has_gzip_header): return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff') def _mock_urlopen_data_list(url, has_gzip_header): assert url.startswith(url_prefix_data_list) data_file_name = _file_name(url, '.json') with _open_binary(data_module, data_file_name) as f: decompressed_f = read_fn(f, 'rb') decoded_s = decompressed_f.read().decode('utf-8') json_data = json.loads(decoded_s) if 'error' in json_data: raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None) with _open_binary(data_module, data_file_name) as f: if has_gzip_header: fp = BytesIO(f.read()) return _MockHTTPResponse(fp, True) else: decompressed_f = read_fn(f, 'rb') fp = BytesIO(decompressed_f.read()) return _MockHTTPResponse(fp, False) def _mock_urlopen(request, *args, **kwargs): url = request.get_full_url() has_gzip_header = request.get_header('Accept-encoding') == 'gzip' if url.startswith(url_prefix_data_list): return _mock_urlopen_data_list(url, has_gzip_header) elif url.startswith(url_prefix_data_features): return _mock_urlopen_data_features(url, has_gzip_header) elif url.startswith(url_prefix_download_data): return _mock_urlopen_download_data(url, has_gzip_header) elif url.startswith(url_prefix_data_description): return _mock_urlopen_data_description(url, has_gzip_header) else: raise ValueError('Unknown mocking URL pattern: %s' % url) if test_offline: monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen) msg = 'OpenML raised a warning on the dataset. It might be unusable. Warning:' with pytest.warns(UserWarning, match=msg): fetch_openml(data_id=data_id, cache=False, as_frame=False, parser='liac-arff')
@pytest.mark.parametrize('gzip_response', [True, False]) def test_dataset_with_openml_warning(monkeypatch, gzip_response): data_id = 3 <DeepExtract> url_prefix_data_description = 'https://openml.org/api/v1/json/data/' url_prefix_data_features = 'https://openml.org/api/v1/json/data/features/' url_prefix_download_data = 'https://openml.org/data/v1/' url_prefix_data_list = 'https://openml.org/api/v1/json/data/list/' path_suffix = '.gz' read_fn = gzip.open data_module = OPENML_TEST_DATA_MODULE + '.' + f'id_{data_id}' def _file_name(url, suffix): output = re.sub('\\W', '-', url[len('https://openml.org/'):]) + suffix + path_suffix return output.replace('-json-data-list', '-jdl').replace('-json-data-features', '-jdf').replace('-json-data-qualities', '-jdq').replace('-json-data', '-jd').replace('-data_name', '-dn').replace('-download', '-dl').replace('-limit', '-l').replace('-data_version', '-dv').replace('-status', '-s').replace('-deactivated', '-dact').replace('-active', '-act') def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix): assert url.startswith(expected_prefix) data_file_name = _file_name(url, suffix) with _open_binary(data_module, data_file_name) as f: if has_gzip_header and gzip_response: fp = BytesIO(f.read()) return _MockHTTPResponse(fp, True) else: decompressed_f = read_fn(f, 'rb') fp = BytesIO(decompressed_f.read()) return _MockHTTPResponse(fp, False) def _mock_urlopen_data_description(url, has_gzip_header): return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_description, suffix='.json') def _mock_urlopen_data_features(url, has_gzip_header): return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_data_features, suffix='.json') def _mock_urlopen_download_data(url, has_gzip_header): return _mock_urlopen_shared(url=url, has_gzip_header=has_gzip_header, expected_prefix=url_prefix_download_data, suffix='.arff') def _mock_urlopen_data_list(url, has_gzip_header): assert url.startswith(url_prefix_data_list) data_file_name = _file_name(url, '.json') with _open_binary(data_module, data_file_name) as f: decompressed_f = read_fn(f, 'rb') decoded_s = decompressed_f.read().decode('utf-8') json_data = json.loads(decoded_s) if 'error' in json_data: raise HTTPError(url=None, code=412, msg='Simulated mock error', hdrs=None, fp=None) with _open_binary(data_module, data_file_name) as f: if has_gzip_header: fp = BytesIO(f.read()) return _MockHTTPResponse(fp, True) else: decompressed_f = read_fn(f, 'rb') fp = BytesIO(decompressed_f.read()) return _MockHTTPResponse(fp, False) def _mock_urlopen(request, *args, **kwargs): url = request.get_full_url() has_gzip_header = request.get_header('Accept-encoding') == 'gzip' if url.startswith(url_prefix_data_list): return _mock_urlopen_data_list(url, has_gzip_header) elif url.startswith(url_prefix_data_features): return _mock_urlopen_data_features(url, has_gzip_header) elif url.startswith(url_prefix_download_data): return _mock_urlopen_download_data(url, has_gzip_header) elif url.startswith(url_prefix_data_description): return _mock_urlopen_data_description(url, has_gzip_header) else: raise ValueError('Unknown mocking URL pattern: %s' % url) if test_offline: monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen) </DeepExtract> msg = 'OpenML raised a warning on the dataset. It might be unusable. Warning:' with pytest.warns(UserWarning, match=msg): fetch_openml(data_id=data_id, cache=False, as_frame=False, parser='liac-arff')
@pytest.mark.parametrize('kernel', ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine']) @pytest.mark.parametrize('bandwidth', [0.01, 0.1, 1, 'scott', 'silverman']) def test_kernel_density(kernel, bandwidth): (n_samples, n_features) = (100, 3) rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) Y = rng.randn(n_samples, n_features) if bandwidth == 'scott': bandwidth = X.shape[0] ** (-1 / (X.shape[1] + 4)) elif bandwidth == 'silverman': bandwidth = (X.shape[0] * (X.shape[1] + 2) / 4) ** (-1 / (X.shape[1] + 4)) d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1)) norm = kernel_norm(bandwidth, X.shape[1], kernel) / X.shape[0] if kernel == 'gaussian': dens_true = norm * np.exp(-0.5 * (d * d) / (bandwidth * bandwidth)).sum(-1) elif kernel == 'tophat': dens_true = norm * (d < bandwidth).sum(-1) elif kernel == 'epanechnikov': dens_true = norm * ((1.0 - d * d / (bandwidth * bandwidth)) * (d < bandwidth)).sum(-1) elif kernel == 'exponential': dens_true = norm * np.exp(-d / bandwidth).sum(-1) elif kernel == 'linear': dens_true = norm * ((1 - d / bandwidth) * (d < bandwidth)).sum(-1) elif kernel == 'cosine': dens_true = norm * (np.cos(0.5 * np.pi * d / bandwidth) * (d < bandwidth)).sum(-1) else: raise ValueError('kernel not recognized') for rtol in [0, 1e-05]: for atol in [1e-06, 0.01]: for breadth_first in (True, False): kde = KernelDensity(kernel=kernel, bandwidth=bandwidth, atol=atol, rtol=rtol) log_dens = kde.fit(X).score_samples(Y) assert_allclose(np.exp(log_dens), dens_true, atol=atol, rtol=max(1e-07, rtol)) assert_allclose(np.exp(kde.score(Y)), np.prod(dens_true), atol=atol, rtol=max(1e-07, rtol)) </DeepExtract>
@pytest.mark.parametrize('kernel', ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine']) @pytest.mark.parametrize('bandwidth', [0.01, 0.1, 1, 'scott', 'silverman']) def test_kernel_density(kernel, bandwidth): (n_samples, n_features) = (100, 3) rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) Y = rng.randn(n_samples, n_features) <DeepExtract> if bandwidth == 'scott': bandwidth = X.shape[0] ** (-1 / (X.shape[1] + 4)) elif bandwidth == 'silverman': bandwidth = (X.shape[0] * (X.shape[1] + 2) / 4) ** (-1 / (X.shape[1] + 4)) d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1)) norm = kernel_norm(bandwidth, X.shape[1], kernel) / X.shape[0] if kernel == 'gaussian': dens_true = norm * np.exp(-0.5 * (d * d) / (bandwidth * bandwidth)).sum(-1) elif kernel == 'tophat': dens_true = norm * (d < bandwidth).sum(-1) elif kernel == 'epanechnikov': dens_true = norm * ((1.0 - d * d / (bandwidth * bandwidth)) * (d < bandwidth)).sum(-1) elif kernel == 'exponential': dens_true = norm * np.exp(-d / bandwidth).sum(-1) elif kernel == 'linear': dens_true = norm * ((1 - d / bandwidth) * (d < bandwidth)).sum(-1) elif kernel == 'cosine': dens_true = norm * (np.cos(0.5 * np.pi * d / bandwidth) * (d < bandwidth)).sum(-1) else: raise ValueError('kernel not recognized') </DeepExtract> for rtol in [0, 1e-05]: for atol in [1e-06, 0.01]: for breadth_first in (True, False): <DeepExtract> kde = KernelDensity(kernel=kernel, bandwidth=bandwidth, atol=atol, rtol=rtol) log_dens = kde.fit(X).score_samples(Y) assert_allclose(np.exp(log_dens), dens_true, atol=atol, rtol=max(1e-07, rtol)) assert_allclose(np.exp(kde.score(Y)), np.prod(dens_true), atol=atol, rtol=max(1e-07, rtol)) </DeepExtract>
def _transform(self, X, handle_unknown='error', force_all_finite=True, warn_on_unknown=False, ignore_category_indices=None): self._check_feature_names(X, reset=False) self._check_n_features(X, reset=False) if not (hasattr(X, 'iloc') and getattr(X, 'ndim', 0) == 2): X_temp = check_array(X, dtype=None, force_all_finite=force_all_finite) if not hasattr(X, 'dtype') and np.issubdtype(X_temp.dtype, np.str_): X = check_array(X, dtype=object, force_all_finite=force_all_finite) else: X = X_temp needs_validation = False else: needs_validation = force_all_finite (n_samples, n_features) = X.shape X_columns = [] for i in range(n_features): Xi = _safe_indexing(X, indices=i, axis=1) Xi = check_array(Xi, ensure_2d=False, dtype=None, force_all_finite=needs_validation) X_columns.append(Xi) (X_list, n_samples, n_features) = (X_columns, n_samples, n_features) X_int = np.zeros((n_samples, n_features), dtype=int) X_mask = np.ones((n_samples, n_features), dtype=bool) columns_with_unknown = [] for i in range(n_features): Xi = X_list[i] (diff, valid_mask) = _check_unknown(Xi, self.categories_[i], return_mask=True) if not np.all(valid_mask): if handle_unknown == 'error': msg = 'Found unknown categories {0} in column {1} during transform'.format(diff, i) raise ValueError(msg) else: if warn_on_unknown: columns_with_unknown.append(i) X_mask[:, i] = valid_mask if self.categories_[i].dtype.kind in ('U', 'S') and self.categories_[i].itemsize > Xi.itemsize: Xi = Xi.astype(self.categories_[i].dtype) elif self.categories_[i].dtype.kind == 'O' and Xi.dtype.kind == 'U': Xi = Xi.astype('O') else: Xi = Xi.copy() Xi[~valid_mask] = self.categories_[i][0] X_int[:, i] = _encode(Xi, uniques=self.categories_[i], check_unknown=False) if columns_with_unknown: warnings.warn(f'Found unknown categories in columns {columns_with_unknown} during transform. These unknown categories will be encoded as all zeros', UserWarning) if not self._infrequent_enabled: return ignore_category_indices = ignore_category_indices or {} for col_idx in range(X_int.shape[1]): infrequent_idx = self._infrequent_indices[col_idx] if infrequent_idx is None: continue X_int[~X_mask[:, col_idx], col_idx] = infrequent_idx[0] if self.handle_unknown == 'infrequent_if_exist': X_mask[:, col_idx] = True for (i, mapping) in enumerate(self._default_to_infrequent_mappings): if mapping is None: continue if i in ignore_category_indices: rows_to_update = X_int[:, i] != ignore_category_indices[i] else: rows_to_update = slice(None) X_int[rows_to_update, i] = np.take(mapping, X_int[rows_to_update, i]) return (X_int, X_mask)
def _transform(self, X, handle_unknown='error', force_all_finite=True, warn_on_unknown=False, ignore_category_indices=None): self._check_feature_names(X, reset=False) self._check_n_features(X, reset=False) <DeepExtract> if not (hasattr(X, 'iloc') and getattr(X, 'ndim', 0) == 2): X_temp = check_array(X, dtype=None, force_all_finite=force_all_finite) if not hasattr(X, 'dtype') and np.issubdtype(X_temp.dtype, np.str_): X = check_array(X, dtype=object, force_all_finite=force_all_finite) else: X = X_temp needs_validation = False else: needs_validation = force_all_finite (n_samples, n_features) = X.shape X_columns = [] for i in range(n_features): Xi = _safe_indexing(X, indices=i, axis=1) Xi = check_array(Xi, ensure_2d=False, dtype=None, force_all_finite=needs_validation) X_columns.append(Xi) (X_list, n_samples, n_features) = (X_columns, n_samples, n_features) </DeepExtract> X_int = np.zeros((n_samples, n_features), dtype=int) X_mask = np.ones((n_samples, n_features), dtype=bool) columns_with_unknown = [] for i in range(n_features): Xi = X_list[i] (diff, valid_mask) = _check_unknown(Xi, self.categories_[i], return_mask=True) if not np.all(valid_mask): if handle_unknown == 'error': msg = 'Found unknown categories {0} in column {1} during transform'.format(diff, i) raise ValueError(msg) else: if warn_on_unknown: columns_with_unknown.append(i) X_mask[:, i] = valid_mask if self.categories_[i].dtype.kind in ('U', 'S') and self.categories_[i].itemsize > Xi.itemsize: Xi = Xi.astype(self.categories_[i].dtype) elif self.categories_[i].dtype.kind == 'O' and Xi.dtype.kind == 'U': Xi = Xi.astype('O') else: Xi = Xi.copy() Xi[~valid_mask] = self.categories_[i][0] X_int[:, i] = _encode(Xi, uniques=self.categories_[i], check_unknown=False) if columns_with_unknown: warnings.warn(f'Found unknown categories in columns {columns_with_unknown} during transform. These unknown categories will be encoded as all zeros', UserWarning) <DeepExtract> if not self._infrequent_enabled: return ignore_category_indices = ignore_category_indices or {} for col_idx in range(X_int.shape[1]): infrequent_idx = self._infrequent_indices[col_idx] if infrequent_idx is None: continue X_int[~X_mask[:, col_idx], col_idx] = infrequent_idx[0] if self.handle_unknown == 'infrequent_if_exist': X_mask[:, col_idx] = True for (i, mapping) in enumerate(self._default_to_infrequent_mappings): if mapping is None: continue if i in ignore_category_indices: rows_to_update = X_int[:, i] != ignore_category_indices[i] else: rows_to_update = slice(None) X_int[rows_to_update, i] = np.take(mapping, X_int[rows_to_update, i]) </DeepExtract> return (X_int, X_mask)
def gibbs(self, v): """Perform one Gibbs sampling step. Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer to start from. Returns ------- v_new : ndarray of shape (n_samples, n_features) Values of the visible layer after one Gibbs step. """ check_is_fitted(self) if not hasattr(self, 'random_state_'): self.random_state_ = check_random_state(self.random_state) p = self._mean_hiddens(v) h_ = self.random_state_.uniform(size=p.shape) < p p = np.dot(h_, self.components_) p += self.intercept_visible_ expit(p, out=p) v_ = self.random_state_.uniform(size=p.shape) < p return v_
def gibbs(self, v): """Perform one Gibbs sampling step. Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer to start from. Returns ------- v_new : ndarray of shape (n_samples, n_features) Values of the visible layer after one Gibbs step. """ check_is_fitted(self) if not hasattr(self, 'random_state_'): self.random_state_ = check_random_state(self.random_state) <DeepExtract> p = self._mean_hiddens(v) h_ = self.random_state_.uniform(size=p.shape) < p </DeepExtract> <DeepExtract> p = np.dot(h_, self.components_) p += self.intercept_visible_ expit(p, out=p) v_ = self.random_state_.uniform(size=p.shape) < p </DeepExtract> return v_
def benchmark_throughputs(configuration, duration_secs=0.1): """benchmark throughput for different estimators.""" if verbose: print('generating dataset...') (X, y, coef) = make_regression(n_samples=configuration['n_train'] + configuration['n_test'], n_features=configuration['n_features'], noise=noise, coef=True) random_seed = 13 (X_train, X_test, y_train, y_test) = train_test_split(X, y, train_size=configuration['n_train'], test_size=configuration['n_test'], random_state=random_seed) (X_train, y_train) = shuffle(X_train, y_train, random_state=random_seed) X_scaler = StandardScaler() X_train = X_scaler.fit_transform(X_train) X_test = X_scaler.transform(X_test) y_scaler = StandardScaler() y_train = y_scaler.fit_transform(y_train[:, None])[:, 0] y_test = y_scaler.transform(y_test[:, None])[:, 0] gc.collect() if verbose: print('ok') (X_train, y_train, X_test, y_test) = (X_train, y_train, X_test, y_test) throughputs = dict() for estimator_config in configuration['estimators']: estimator_config['instance'].fit(X_train, y_train) start_time = time.time() n_predictions = 0 while time.time() - start_time < duration_secs: estimator_config['instance'].predict(X_test[[0]]) n_predictions += 1 throughputs[estimator_config['name']] = n_predictions / duration_secs return throughputs
def benchmark_throughputs(configuration, duration_secs=0.1): """benchmark throughput for different estimators.""" <DeepExtract> if verbose: print('generating dataset...') (X, y, coef) = make_regression(n_samples=configuration['n_train'] + configuration['n_test'], n_features=configuration['n_features'], noise=noise, coef=True) random_seed = 13 (X_train, X_test, y_train, y_test) = train_test_split(X, y, train_size=configuration['n_train'], test_size=configuration['n_test'], random_state=random_seed) (X_train, y_train) = shuffle(X_train, y_train, random_state=random_seed) X_scaler = StandardScaler() X_train = X_scaler.fit_transform(X_train) X_test = X_scaler.transform(X_test) y_scaler = StandardScaler() y_train = y_scaler.fit_transform(y_train[:, None])[:, 0] y_test = y_scaler.transform(y_test[:, None])[:, 0] gc.collect() if verbose: print('ok') (X_train, y_train, X_test, y_test) = (X_train, y_train, X_test, y_test) </DeepExtract> throughputs = dict() for estimator_config in configuration['estimators']: estimator_config['instance'].fit(X_train, y_train) start_time = time.time() n_predictions = 0 while time.time() - start_time < duration_secs: estimator_config['instance'].predict(X_test[[0]]) n_predictions += 1 throughputs[estimator_config['name']] = n_predictions / duration_secs return throughputs
@pytest.mark.parametrize('GradientBoosting, X, y', [(HistGradientBoostingClassifier, X_classification, y_classification), (HistGradientBoostingRegressor, X_regression, y_regression)]) @pytest.mark.parametrize('rng_type', ('none', 'int', 'instance')) def test_random_seeds_warm_start(GradientBoosting, X, y, rng_type): def _get_rng(rng_type): if rng_type == 'none': return None elif rng_type == 'int': return 42 else: return np.random.RandomState(0) if rng_type == 'none': random_state = None elif rng_type == 'int': random_state = 42 else: random_state = np.random.RandomState(0) gb_1 = GradientBoosting(early_stopping=True, max_iter=2, random_state=random_state) gb_1.set_params(scoring=check_scoring(gb_1)) gb_1.fit(X, y) random_seed_1_1 = gb_1._random_seed gb_1.fit(X, y) random_seed_1_2 = gb_1._random_seed if rng_type == 'none': random_state = None elif rng_type == 'int': random_state = 42 else: random_state = np.random.RandomState(0) gb_2 = GradientBoosting(early_stopping=True, max_iter=2, random_state=random_state, warm_start=True) gb_2.set_params(scoring=check_scoring(gb_2)) gb_2.fit(X, y) random_seed_2_1 = gb_2._random_seed gb_2.fit(X, y) random_seed_2_2 = gb_2._random_seed if rng_type == 'none': assert random_seed_1_1 != random_seed_1_2 != random_seed_2_1 elif rng_type == 'int': assert random_seed_1_1 == random_seed_1_2 == random_seed_2_1 else: assert random_seed_1_1 == random_seed_2_1 != random_seed_1_2 assert random_seed_2_1 == random_seed_2_2
@pytest.mark.parametrize('GradientBoosting, X, y', [(HistGradientBoostingClassifier, X_classification, y_classification), (HistGradientBoostingRegressor, X_regression, y_regression)]) @pytest.mark.parametrize('rng_type', ('none', 'int', 'instance')) def test_random_seeds_warm_start(GradientBoosting, X, y, rng_type): def _get_rng(rng_type): if rng_type == 'none': return None elif rng_type == 'int': return 42 else: return np.random.RandomState(0) <DeepExtract> if rng_type == 'none': random_state = None elif rng_type == 'int': random_state = 42 else: random_state = np.random.RandomState(0) </DeepExtract> gb_1 = GradientBoosting(early_stopping=True, max_iter=2, random_state=random_state) gb_1.set_params(scoring=check_scoring(gb_1)) gb_1.fit(X, y) random_seed_1_1 = gb_1._random_seed gb_1.fit(X, y) random_seed_1_2 = gb_1._random_seed <DeepExtract> if rng_type == 'none': random_state = None elif rng_type == 'int': random_state = 42 else: random_state = np.random.RandomState(0) </DeepExtract> gb_2 = GradientBoosting(early_stopping=True, max_iter=2, random_state=random_state, warm_start=True) gb_2.set_params(scoring=check_scoring(gb_2)) gb_2.fit(X, y) random_seed_2_1 = gb_2._random_seed gb_2.fit(X, y) random_seed_2_2 = gb_2._random_seed if rng_type == 'none': assert random_seed_1_1 != random_seed_1_2 != random_seed_2_1 elif rng_type == 'int': assert random_seed_1_1 == random_seed_1_2 == random_seed_2_1 else: assert random_seed_1_1 == random_seed_2_1 != random_seed_1_2 assert random_seed_2_1 == random_seed_2_2
def preParse(self, instring, loc): if col(loc, instring) != self.col: instrlen = len(instring) if self.ignoreExprs: exprsFound = True while exprsFound: exprsFound = False for e in self.ignoreExprs: try: while 1: (loc, dummy) = e._parse(instring, loc) exprsFound = True except ParseException: pass loc = loc while loc < instrlen and instring[loc].isspace() and (col(loc, instring) != self.col): loc += 1 return loc
def preParse(self, instring, loc): if col(loc, instring) != self.col: instrlen = len(instring) if self.ignoreExprs: <DeepExtract> exprsFound = True while exprsFound: exprsFound = False for e in self.ignoreExprs: try: while 1: (loc, dummy) = e._parse(instring, loc) exprsFound = True except ParseException: pass loc = loc </DeepExtract> while loc < instrlen and instring[loc].isspace() and (col(loc, instring) != self.col): loc += 1 return loc
def test_warm_start_convergence(): random_state = np.random.RandomState(0) if n_targets > 1: w = random_state.randn(n_features, n_targets) else: w = random_state.randn(n_features) w[n_informative_features:] = 0.0 X = random_state.randn(n_samples, n_features) y = np.dot(X, w) X_test = random_state.randn(n_samples, n_features) y_test = np.dot(X_test, w) (X, y, _, _) = (X, y, X_test, y_test) model = ElasticNet(alpha=0.001, tol=0.001).fit(X, y) n_iter_reference = model.n_iter_ assert n_iter_reference > 2 model.fit(X, y) n_iter_cold_start = model.n_iter_ assert n_iter_cold_start == n_iter_reference model.set_params(warm_start=True) model.fit(X, y) n_iter_warm_start = model.n_iter_ assert n_iter_warm_start == 1
def test_warm_start_convergence(): <DeepExtract> random_state = np.random.RandomState(0) if n_targets > 1: w = random_state.randn(n_features, n_targets) else: w = random_state.randn(n_features) w[n_informative_features:] = 0.0 X = random_state.randn(n_samples, n_features) y = np.dot(X, w) X_test = random_state.randn(n_samples, n_features) y_test = np.dot(X_test, w) (X, y, _, _) = (X, y, X_test, y_test) </DeepExtract> model = ElasticNet(alpha=0.001, tol=0.001).fit(X, y) n_iter_reference = model.n_iter_ assert n_iter_reference > 2 model.fit(X, y) n_iter_cold_start = model.n_iter_ assert n_iter_cold_start == n_iter_reference model.set_params(warm_start=True) model.fit(X, y) n_iter_warm_start = model.n_iter_ assert n_iter_warm_start == 1
def fit_transform(self, raw_documents, y=None): """Learn vocabulary and idf, return document-term matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- raw_documents : iterable An iterable which generates either str, unicode or file objects. y : None This parameter is ignored. Returns ------- X : sparse matrix of (n_samples, n_features) Tf-idf-weighted document-term matrix. """ if self.dtype not in FLOAT_DTYPES: warnings.warn("Only {} 'dtype' should be used. {} 'dtype' will be converted to np.float64.".format(FLOAT_DTYPES, self.dtype), UserWarning) self._tfidf = TfidfTransformer(norm=self.norm, use_idf=self.use_idf, smooth_idf=self.smooth_idf, sublinear_tf=self.sublinear_tf) X = super().fit_transform(raw_documents) self._tfidf.fit(X) return self._tfidf.transform(X, copy=False)
def fit_transform(self, raw_documents, y=None): """Learn vocabulary and idf, return document-term matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- raw_documents : iterable An iterable which generates either str, unicode or file objects. y : None This parameter is ignored. Returns ------- X : sparse matrix of (n_samples, n_features) Tf-idf-weighted document-term matrix. """ <DeepExtract> if self.dtype not in FLOAT_DTYPES: warnings.warn("Only {} 'dtype' should be used. {} 'dtype' will be converted to np.float64.".format(FLOAT_DTYPES, self.dtype), UserWarning) </DeepExtract> self._tfidf = TfidfTransformer(norm=self.norm, use_idf=self.use_idf, smooth_idf=self.smooth_idf, sublinear_tf=self.sublinear_tf) X = super().fit_transform(raw_documents) self._tfidf.fit(X) return self._tfidf.transform(X, copy=False)
def fit(self, X, y=None): """Fit a Minimum Covariance Determinant with the FastMCD algorithm. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself. """ self._validate_params() X = self._validate_data(X, ensure_min_samples=2, estimator='MinCovDet') random_state = check_random_state(self.random_state) (n_samples, n_features) = X.shape if (linalg.svdvals(np.dot(X.T, X)) > 1e-08).sum() != n_features: warnings.warn('The covariance matrix associated to your dataset is not full rank') random_state = check_random_state(random_state) X = check_array(X, ensure_min_samples=2, estimator='fast_mcd') (n_samples, n_features) = X.shape if self.support_fraction is None: n_support = int(np.ceil(0.5 * (n_samples + n_features + 1))) else: n_support = int(self.support_fraction * n_samples) if n_features == 1: if n_support < n_samples: X_sorted = np.sort(np.ravel(X)) diff = X_sorted[n_support:] - X_sorted[:n_samples - n_support] halves_start = np.where(diff == np.min(diff))[0] location = 0.5 * (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean() support = np.zeros(n_samples, dtype=bool) X_centered = X - location support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True covariance = np.asarray([[np.var(X[support])]]) location = np.array([location]) precision = linalg.pinvh(covariance) dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1) else: support = np.ones(n_samples, dtype=bool) covariance = np.asarray([[np.var(X)]]) location = np.asarray([np.mean(X)]) X_centered = X - location precision = linalg.pinvh(covariance) dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1) if n_samples > 500 and n_features > 1: n_subsets = n_samples // 300 n_samples_subsets = n_samples // n_subsets samples_shuffle = random_state.permutation(n_samples) h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples)))) n_trials_tot = 500 n_best_sub = 10 n_trials = max(10, n_trials_tot // n_subsets) n_best_tot = n_subsets * n_best_sub all_best_locations = np.zeros((n_best_tot, n_features)) try: all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) except MemoryError: n_best_tot = 10 all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) n_best_sub = 2 for i in range(n_subsets): low_bound = i * n_samples_subsets high_bound = low_bound + n_samples_subsets current_subset = X[samples_shuffle[low_bound:high_bound]] (best_locations_sub, best_covariances_sub, _, _) = select_candidates(current_subset, h_subset, n_trials, select=n_best_sub, n_iter=2, cov_computation_method=self._nonrobust_covariance, random_state=random_state) subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub) all_best_locations[subset_slice] = best_locations_sub all_best_covariances[subset_slice] = best_covariances_sub n_samples_merged = min(1500, n_samples) h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples)))) if n_samples > 1500: n_best_merged = 10 else: n_best_merged = 1 selection = random_state.permutation(n_samples)[:n_samples_merged] (locations_merged, covariances_merged, supports_merged, d) = select_candidates(X[selection], h_merged, n_trials=(all_best_locations, all_best_covariances), select=n_best_merged, cov_computation_method=self._nonrobust_covariance, random_state=random_state) if n_samples < 1500: location = locations_merged[0] covariance = covariances_merged[0] support = np.zeros(n_samples, dtype=bool) dist = np.zeros(n_samples) support[selection] = supports_merged[0] dist[selection] = d[0] else: (locations_full, covariances_full, supports_full, d) = select_candidates(X, n_support, n_trials=(locations_merged, covariances_merged), select=1, cov_computation_method=self._nonrobust_covariance, random_state=random_state) location = locations_full[0] covariance = covariances_full[0] support = supports_full[0] dist = d[0] elif n_features > 1: n_trials = 30 n_best = 10 (locations_best, covariances_best, _, _) = select_candidates(X, n_support, n_trials=n_trials, select=n_best, n_iter=2, cov_computation_method=self._nonrobust_covariance, random_state=random_state) (locations_full, covariances_full, supports_full, d) = select_candidates(X, n_support, n_trials=(locations_best, covariances_best), select=1, cov_computation_method=self._nonrobust_covariance, random_state=random_state) location = locations_full[0] covariance = covariances_full[0] support = supports_full[0] dist = d[0] (raw_location, raw_covariance, raw_support, raw_dist) = (location, covariance, support, dist) if self.assume_centered: raw_location = np.zeros(n_features) raw_covariance = self._nonrobust_covariance(X[raw_support], assume_centered=True) precision = linalg.pinvh(raw_covariance) raw_dist = np.sum(np.dot(X, precision) * X, 1) self.raw_location_ = raw_location self.raw_covariance_ = raw_covariance self.raw_support_ = raw_support self.location_ = raw_location self.support_ = raw_support self.dist_ = raw_dist n_samples = len(self.dist_) n_support = np.sum(self.support_) if n_support < n_samples and np.allclose(self.raw_covariance_, 0): raise ValueError('The covariance matrix of the support data is equal to 0, try to increase support_fraction') correction = np.median(self.dist_) / chi2(X.shape[1]).isf(0.5) covariance_corrected = self.raw_covariance_ * correction self.dist_ /= correction return covariance_corrected (n_samples, n_features) = X.shape mask = self.dist_ < chi2(n_features).isf(0.025) if self.assume_centered: location_reweighted = np.zeros(n_features) else: location_reweighted = X[mask].mean(0) covariance_reweighted = self._nonrobust_covariance(X[mask], assume_centered=self.assume_centered) support_reweighted = np.zeros(n_samples, dtype=bool) support_reweighted[mask] = True self._set_covariance(covariance_reweighted) self.location_ = location_reweighted self.support_ = support_reweighted X_centered = X - self.location_ self.dist_ = np.sum(np.dot(X_centered, self.get_precision()) * X_centered, 1) return (location_reweighted, covariance_reweighted, support_reweighted) return self
def fit(self, X, y=None): """Fit a Minimum Covariance Determinant with the FastMCD algorithm. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself. """ self._validate_params() X = self._validate_data(X, ensure_min_samples=2, estimator='MinCovDet') random_state = check_random_state(self.random_state) (n_samples, n_features) = X.shape if (linalg.svdvals(np.dot(X.T, X)) > 1e-08).sum() != n_features: warnings.warn('The covariance matrix associated to your dataset is not full rank') <DeepExtract> random_state = check_random_state(random_state) X = check_array(X, ensure_min_samples=2, estimator='fast_mcd') (n_samples, n_features) = X.shape if self.support_fraction is None: n_support = int(np.ceil(0.5 * (n_samples + n_features + 1))) else: n_support = int(self.support_fraction * n_samples) if n_features == 1: if n_support < n_samples: X_sorted = np.sort(np.ravel(X)) diff = X_sorted[n_support:] - X_sorted[:n_samples - n_support] halves_start = np.where(diff == np.min(diff))[0] location = 0.5 * (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean() support = np.zeros(n_samples, dtype=bool) X_centered = X - location support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True covariance = np.asarray([[np.var(X[support])]]) location = np.array([location]) precision = linalg.pinvh(covariance) dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1) else: support = np.ones(n_samples, dtype=bool) covariance = np.asarray([[np.var(X)]]) location = np.asarray([np.mean(X)]) X_centered = X - location precision = linalg.pinvh(covariance) dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1) if n_samples > 500 and n_features > 1: n_subsets = n_samples // 300 n_samples_subsets = n_samples // n_subsets samples_shuffle = random_state.permutation(n_samples) h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples)))) n_trials_tot = 500 n_best_sub = 10 n_trials = max(10, n_trials_tot // n_subsets) n_best_tot = n_subsets * n_best_sub all_best_locations = np.zeros((n_best_tot, n_features)) try: all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) except MemoryError: n_best_tot = 10 all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) n_best_sub = 2 for i in range(n_subsets): low_bound = i * n_samples_subsets high_bound = low_bound + n_samples_subsets current_subset = X[samples_shuffle[low_bound:high_bound]] (best_locations_sub, best_covariances_sub, _, _) = select_candidates(current_subset, h_subset, n_trials, select=n_best_sub, n_iter=2, cov_computation_method=self._nonrobust_covariance, random_state=random_state) subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub) all_best_locations[subset_slice] = best_locations_sub all_best_covariances[subset_slice] = best_covariances_sub n_samples_merged = min(1500, n_samples) h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples)))) if n_samples > 1500: n_best_merged = 10 else: n_best_merged = 1 selection = random_state.permutation(n_samples)[:n_samples_merged] (locations_merged, covariances_merged, supports_merged, d) = select_candidates(X[selection], h_merged, n_trials=(all_best_locations, all_best_covariances), select=n_best_merged, cov_computation_method=self._nonrobust_covariance, random_state=random_state) if n_samples < 1500: location = locations_merged[0] covariance = covariances_merged[0] support = np.zeros(n_samples, dtype=bool) dist = np.zeros(n_samples) support[selection] = supports_merged[0] dist[selection] = d[0] else: (locations_full, covariances_full, supports_full, d) = select_candidates(X, n_support, n_trials=(locations_merged, covariances_merged), select=1, cov_computation_method=self._nonrobust_covariance, random_state=random_state) location = locations_full[0] covariance = covariances_full[0] support = supports_full[0] dist = d[0] elif n_features > 1: n_trials = 30 n_best = 10 (locations_best, covariances_best, _, _) = select_candidates(X, n_support, n_trials=n_trials, select=n_best, n_iter=2, cov_computation_method=self._nonrobust_covariance, random_state=random_state) (locations_full, covariances_full, supports_full, d) = select_candidates(X, n_support, n_trials=(locations_best, covariances_best), select=1, cov_computation_method=self._nonrobust_covariance, random_state=random_state) location = locations_full[0] covariance = covariances_full[0] support = supports_full[0] dist = d[0] (raw_location, raw_covariance, raw_support, raw_dist) = (location, covariance, support, dist) </DeepExtract> if self.assume_centered: raw_location = np.zeros(n_features) raw_covariance = self._nonrobust_covariance(X[raw_support], assume_centered=True) precision = linalg.pinvh(raw_covariance) raw_dist = np.sum(np.dot(X, precision) * X, 1) self.raw_location_ = raw_location self.raw_covariance_ = raw_covariance self.raw_support_ = raw_support self.location_ = raw_location self.support_ = raw_support self.dist_ = raw_dist <DeepExtract> n_samples = len(self.dist_) n_support = np.sum(self.support_) if n_support < n_samples and np.allclose(self.raw_covariance_, 0): raise ValueError('The covariance matrix of the support data is equal to 0, try to increase support_fraction') correction = np.median(self.dist_) / chi2(X.shape[1]).isf(0.5) covariance_corrected = self.raw_covariance_ * correction self.dist_ /= correction return covariance_corrected </DeepExtract> <DeepExtract> (n_samples, n_features) = X.shape mask = self.dist_ < chi2(n_features).isf(0.025) if self.assume_centered: location_reweighted = np.zeros(n_features) else: location_reweighted = X[mask].mean(0) covariance_reweighted = self._nonrobust_covariance(X[mask], assume_centered=self.assume_centered) support_reweighted = np.zeros(n_samples, dtype=bool) support_reweighted[mask] = True self._set_covariance(covariance_reweighted) self.location_ = location_reweighted self.support_ = support_reweighted X_centered = X - self.location_ self.dist_ = np.sum(np.dot(X_centered, self.get_precision()) * X_centered, 1) return (location_reweighted, covariance_reweighted, support_reweighted) </DeepExtract> return self
def test_classifier_chain_fit_and_predict_with_sparse_data(): (X, y) = make_classification(n_samples=1000, n_features=100, n_classes=16, n_informative=10, random_state=0) Y_multi = np.array([[int(yyy) for yyy in format(yy, '#06b')[2:]] for yy in y]) (X, Y) = (X, Y_multi) X_sparse = sp.csr_matrix(X) classifier_chain = ClassifierChain(LogisticRegression()) classifier_chain.fit(X_sparse, Y) Y_pred_sparse = classifier_chain.predict(X_sparse) classifier_chain = ClassifierChain(LogisticRegression()) classifier_chain.fit(X, Y) Y_pred_dense = classifier_chain.predict(X) assert_array_equal(Y_pred_sparse, Y_pred_dense)
def test_classifier_chain_fit_and_predict_with_sparse_data(): <DeepExtract> (X, y) = make_classification(n_samples=1000, n_features=100, n_classes=16, n_informative=10, random_state=0) Y_multi = np.array([[int(yyy) for yyy in format(yy, '#06b')[2:]] for yy in y]) (X, Y) = (X, Y_multi) </DeepExtract> X_sparse = sp.csr_matrix(X) classifier_chain = ClassifierChain(LogisticRegression()) classifier_chain.fit(X_sparse, Y) Y_pred_sparse = classifier_chain.predict(X_sparse) classifier_chain = ClassifierChain(LogisticRegression()) classifier_chain.fit(X, Y) Y_pred_dense = classifier_chain.predict(X) assert_array_equal(Y_pred_sparse, Y_pred_dense)
def inplace_column_scale(X, scale): """Inplace column scaling of a CSC/CSR matrix. Scale each feature of the data matrix by multiplying with specific scale provided by the caller assuming a (n_samples, n_features) shape. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) Matrix to normalize using the variance of the features. It should be of CSC or CSR format. scale : ndarray of shape (n_features,), dtype={np.float32, np.float64} Array of precomputed feature-wise values to use for scaling. """ if isinstance(X, sp.csc_matrix): assert scale.shape[0] == X.T.shape[0] X.T.data *= np.repeat(scale, np.diff(X.T.indptr)) elif isinstance(X, sp.csr_matrix): assert scale.shape[0] == X.shape[1] X.data *= scale.take(X.indices, mode='clip') else: input_type = X.format if sp.issparse(X) else type(X) err = 'Expected a CSR or CSC sparse matrix, got %s.' % input_type raise TypeError(err) </DeepExtract>
def inplace_column_scale(X, scale): """Inplace column scaling of a CSC/CSR matrix. Scale each feature of the data matrix by multiplying with specific scale provided by the caller assuming a (n_samples, n_features) shape. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) Matrix to normalize using the variance of the features. It should be of CSC or CSR format. scale : ndarray of shape (n_features,), dtype={np.float32, np.float64} Array of precomputed feature-wise values to use for scaling. """ if isinstance(X, sp.csc_matrix): <DeepExtract> assert scale.shape[0] == X.T.shape[0] X.T.data *= np.repeat(scale, np.diff(X.T.indptr)) </DeepExtract> elif isinstance(X, sp.csr_matrix): <DeepExtract> assert scale.shape[0] == X.shape[1] X.data *= scale.take(X.indices, mode='clip') </DeepExtract> else: <DeepExtract> input_type = X.format if sp.issparse(X) else type(X) err = 'Expected a CSR or CSC sparse matrix, got %s.' % input_type raise TypeError(err) </DeepExtract>
def _compute_depth(tree, node): """ Returns the depth of the subtree rooted in node. """ def compute_depth_(current_node, current_depth, children_left, children_right, depths): depths += [current_depth] left = children_left[current_node] right = children_right[current_node] if left != -1 and right != -1: depths += [current_depth + 1] left = children_left[left] right = children_right[left] if left != -1 and right != -1: compute_depth_(left, current_depth + 1 + 1, children_left, children_right, depths) compute_depth_(right, current_depth + 1 + 1, children_left, children_right, depths) depths += [current_depth + 1] left = children_left[right] right = children_right[right] if left != -1 and right != -1: compute_depth_(left, current_depth + 1 + 1, children_left, children_right, depths) compute_depth_(right, current_depth + 1 + 1, children_left, children_right, depths) depths = [] depths += [1] left = tree.children_left[node] right = tree.children_right[node] if left != -1 and right != -1: compute_depth_(left, 1 + 1, tree.children_left, tree.children_right, depths) compute_depth_(right, 1 + 1, tree.children_left, tree.children_right, depths) return max(depths)
def _compute_depth(tree, node): """ Returns the depth of the subtree rooted in node. """ def compute_depth_(current_node, current_depth, children_left, children_right, depths): depths += [current_depth] left = children_left[current_node] right = children_right[current_node] if left != -1 and right != -1: <DeepExtract> depths += [current_depth + 1] left = children_left[left] right = children_right[left] if left != -1 and right != -1: compute_depth_(left, current_depth + 1 + 1, children_left, children_right, depths) compute_depth_(right, current_depth + 1 + 1, children_left, children_right, depths) </DeepExtract> <DeepExtract> depths += [current_depth + 1] left = children_left[right] right = children_right[right] if left != -1 and right != -1: compute_depth_(left, current_depth + 1 + 1, children_left, children_right, depths) compute_depth_(right, current_depth + 1 + 1, children_left, children_right, depths) </DeepExtract> depths = [] <DeepExtract> depths += [1] left = tree.children_left[node] right = tree.children_right[node] if left != -1 and right != -1: compute_depth_(left, 1 + 1, tree.children_left, tree.children_right, depths) compute_depth_(right, 1 + 1, tree.children_left, tree.children_right, depths) </DeepExtract> return max(depths)
def transform(self, X): """Generate missing values indicator for `X`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data to complete. Returns ------- Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_features_with_missing) The missing indicator for input data. The data type of `Xt` will be boolean. """ check_is_fitted(self) if not self._precomputed: if self.strategy in ('most_frequent', 'constant'): if isinstance(X, list) and any((isinstance(elem, str) for row in X for elem in row)): dtype = object else: dtype = None else: dtype = FLOAT_DTYPES if not False and self._fit_dtype.kind == 'O': dtype = self._fit_dtype if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values): force_all_finite = 'allow-nan' else: force_all_finite = True try: X = self._validate_data(X, reset=False, accept_sparse='csc', dtype=dtype, force_all_finite=force_all_finite, copy=self.copy) except ValueError as ve: if 'could not convert' in str(ve): new_ve = ValueError('Cannot use {} strategy with non-numeric data:\n{}'.format(self.strategy, ve)) raise new_ve from None else: raise ve if False: self._fit_dtype = X.dtype _check_inputs_dtype(X, self.missing_values) if X.dtype.kind not in ('i', 'u', 'f', 'O'): raise ValueError('SimpleImputer does not support data with dtype {0}. Please provide either a numeric array (with a floating point or integer dtype) or categorical data represented either as an array with integer dtype or an array of string values with an object dtype.'.format(X.dtype)) X = X elif not (hasattr(X, 'dtype') and X.dtype.kind == 'b'): raise ValueError('precomputed is True but the input data is not a mask') if not self._precomputed: imputer_mask = _get_mask(X, self.missing_values) else: imputer_mask = X if sp.issparse(X): imputer_mask.eliminate_zeros() if self.features == 'missing-only': n_missing = imputer_mask.getnnz(axis=0) if self.sparse is False: imputer_mask = imputer_mask.toarray() elif imputer_mask.format == 'csr': imputer_mask = imputer_mask.tocsc() else: if not self._precomputed: imputer_mask = _get_mask(X, self.missing_values) else: imputer_mask = X if self.features == 'missing-only': n_missing = imputer_mask.sum(axis=0) if self.sparse is True: imputer_mask = sp.csc_matrix(imputer_mask) if self.features == 'all': features_indices = np.arange(X.shape[1]) else: features_indices = np.flatnonzero(n_missing) (imputer_mask, features) = (imputer_mask, features_indices) if self.features == 'missing-only': features_diff_fit_trans = np.setdiff1d(features, self.features_) if self.error_on_new and features_diff_fit_trans.size > 0: raise ValueError('The features {} have missing values in transform but have no missing values in fit.'.format(features_diff_fit_trans)) if self.features_.size < self._n_features: imputer_mask = imputer_mask[:, self.features_] return imputer_mask
def transform(self, X): """Generate missing values indicator for `X`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data to complete. Returns ------- Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_features_with_missing) The missing indicator for input data. The data type of `Xt` will be boolean. """ check_is_fitted(self) if not self._precomputed: <DeepExtract> if self.strategy in ('most_frequent', 'constant'): if isinstance(X, list) and any((isinstance(elem, str) for row in X for elem in row)): dtype = object else: dtype = None else: dtype = FLOAT_DTYPES if not False and self._fit_dtype.kind == 'O': dtype = self._fit_dtype if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values): force_all_finite = 'allow-nan' else: force_all_finite = True try: X = self._validate_data(X, reset=False, accept_sparse='csc', dtype=dtype, force_all_finite=force_all_finite, copy=self.copy) except ValueError as ve: if 'could not convert' in str(ve): new_ve = ValueError('Cannot use {} strategy with non-numeric data:\n{}'.format(self.strategy, ve)) raise new_ve from None else: raise ve if False: self._fit_dtype = X.dtype _check_inputs_dtype(X, self.missing_values) if X.dtype.kind not in ('i', 'u', 'f', 'O'): raise ValueError('SimpleImputer does not support data with dtype {0}. Please provide either a numeric array (with a floating point or integer dtype) or categorical data represented either as an array with integer dtype or an array of string values with an object dtype.'.format(X.dtype)) X = X </DeepExtract> elif not (hasattr(X, 'dtype') and X.dtype.kind == 'b'): raise ValueError('precomputed is True but the input data is not a mask') <DeepExtract> if not self._precomputed: imputer_mask = _get_mask(X, self.missing_values) else: imputer_mask = X if sp.issparse(X): imputer_mask.eliminate_zeros() if self.features == 'missing-only': n_missing = imputer_mask.getnnz(axis=0) if self.sparse is False: imputer_mask = imputer_mask.toarray() elif imputer_mask.format == 'csr': imputer_mask = imputer_mask.tocsc() else: if not self._precomputed: imputer_mask = _get_mask(X, self.missing_values) else: imputer_mask = X if self.features == 'missing-only': n_missing = imputer_mask.sum(axis=0) if self.sparse is True: imputer_mask = sp.csc_matrix(imputer_mask) if self.features == 'all': features_indices = np.arange(X.shape[1]) else: features_indices = np.flatnonzero(n_missing) (imputer_mask, features) = (imputer_mask, features_indices) </DeepExtract> if self.features == 'missing-only': features_diff_fit_trans = np.setdiff1d(features, self.features_) if self.error_on_new and features_diff_fit_trans.size > 0: raise ValueError('The features {} have missing values in transform but have no missing values in fit.'.format(features_diff_fit_trans)) if self.features_.size < self._n_features: imputer_mask = imputer_mask[:, self.features_] return imputer_mask
def fit(self, X, y=None): """Perform OPTICS clustering. Extracts an ordered list of points and reachability distances, and performs initial clustering using ``max_eps`` distance specified at OPTICS object instantiation. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features), or (n_samples, n_samples) if metric='precomputed' A feature array, or array of distances between samples if metric='precomputed'. If a sparse matrix is provided, it will be converted into CSR format. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns a fitted instance of self. """ self._validate_params() dtype = bool if self.metric in PAIRWISE_BOOLEAN_FUNCTIONS else float if dtype == bool and X.dtype != bool: msg = f'Data will be converted to boolean for metric {self.metric}, to avoid this warning, you may convert the data prior to calling fit.' warnings.warn(msg, DataConversionWarning) X = self._validate_data(X, dtype=dtype, accept_sparse='csr') if self.metric == 'precomputed' and issparse(X): with warnings.catch_warnings(): warnings.simplefilter('ignore', SparseEfficiencyWarning) X.setdiag(X.diagonal()) memory = check_memory(self.memory) (self.ordering_, self.core_distances_, self.reachability_, self.predecessor_) = memory.cache(compute_optics_graph)(X=X, min_samples=self.min_samples, algorithm=self.algorithm, leaf_size=self.leaf_size, metric=self.metric, metric_params=self.metric_params, p=self.p, n_jobs=self.n_jobs, max_eps=self.max_eps) if self.cluster_method == 'xi': n_samples = len(self.reachability_) _validate_size(self.min_samples, n_samples, 'min_samples') if self.min_samples <= 1: self.min_samples = max(2, int(self.min_samples * n_samples)) if self.min_cluster_size is None: self.min_cluster_size = self.min_samples _validate_size(self.min_cluster_size, n_samples, 'min_cluster_size') if self.min_cluster_size <= 1: self.min_cluster_size = max(2, int(self.min_cluster_size * n_samples)) clusters = _xi_cluster(self.reachability_[self.ordering_], self.predecessor_[self.ordering_], self.ordering_, self.xi, self.min_samples, self.min_cluster_size, self.predecessor_correction) labels = _extract_xi_labels(self.ordering_, clusters) (labels_, clusters_) = (labels, clusters) self.cluster_hierarchy_ = clusters_ elif self.cluster_method == 'dbscan': if self.eps is None: eps = self.max_eps else: eps = self.eps if eps > self.max_eps: raise ValueError('Specify an epsilon smaller than %s. Got %s.' % (self.max_eps, eps)) n_samples = len(self.core_distances_) labels = np.zeros(n_samples, dtype=int) far_reach = self.reachability_ > eps near_core = self.core_distances_ <= eps labels[self.ordering_] = np.cumsum(far_reach[self.ordering_] & near_core[self.ordering_]) - 1 labels[far_reach & ~near_core] = -1 labels_ = labels self.labels_ = labels_ return self
def fit(self, X, y=None): """Perform OPTICS clustering. Extracts an ordered list of points and reachability distances, and performs initial clustering using ``max_eps`` distance specified at OPTICS object instantiation. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features), or (n_samples, n_samples) if metric='precomputed' A feature array, or array of distances between samples if metric='precomputed'. If a sparse matrix is provided, it will be converted into CSR format. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns a fitted instance of self. """ self._validate_params() dtype = bool if self.metric in PAIRWISE_BOOLEAN_FUNCTIONS else float if dtype == bool and X.dtype != bool: msg = f'Data will be converted to boolean for metric {self.metric}, to avoid this warning, you may convert the data prior to calling fit.' warnings.warn(msg, DataConversionWarning) X = self._validate_data(X, dtype=dtype, accept_sparse='csr') if self.metric == 'precomputed' and issparse(X): with warnings.catch_warnings(): warnings.simplefilter('ignore', SparseEfficiencyWarning) X.setdiag(X.diagonal()) memory = check_memory(self.memory) (self.ordering_, self.core_distances_, self.reachability_, self.predecessor_) = memory.cache(compute_optics_graph)(X=X, min_samples=self.min_samples, algorithm=self.algorithm, leaf_size=self.leaf_size, metric=self.metric, metric_params=self.metric_params, p=self.p, n_jobs=self.n_jobs, max_eps=self.max_eps) if self.cluster_method == 'xi': <DeepExtract> n_samples = len(self.reachability_) _validate_size(self.min_samples, n_samples, 'min_samples') if self.min_samples <= 1: self.min_samples = max(2, int(self.min_samples * n_samples)) if self.min_cluster_size is None: self.min_cluster_size = self.min_samples _validate_size(self.min_cluster_size, n_samples, 'min_cluster_size') if self.min_cluster_size <= 1: self.min_cluster_size = max(2, int(self.min_cluster_size * n_samples)) clusters = _xi_cluster(self.reachability_[self.ordering_], self.predecessor_[self.ordering_], self.ordering_, self.xi, self.min_samples, self.min_cluster_size, self.predecessor_correction) labels = _extract_xi_labels(self.ordering_, clusters) (labels_, clusters_) = (labels, clusters) </DeepExtract> self.cluster_hierarchy_ = clusters_ elif self.cluster_method == 'dbscan': if self.eps is None: eps = self.max_eps else: eps = self.eps if eps > self.max_eps: raise ValueError('Specify an epsilon smaller than %s. Got %s.' % (self.max_eps, eps)) <DeepExtract> n_samples = len(self.core_distances_) labels = np.zeros(n_samples, dtype=int) far_reach = self.reachability_ > eps near_core = self.core_distances_ <= eps labels[self.ordering_] = np.cumsum(far_reach[self.ordering_] & near_core[self.ordering_]) - 1 labels[far_reach & ~near_core] = -1 labels_ = labels </DeepExtract> self.labels_ = labels_ return self
def kneighbors_graph(self, X=None, n_neighbors=None, mode='connectivity'): """Compute the (weighted) graph of k-Neighbors for points in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', default=None The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. For ``metric='precomputed'`` the shape should be (n_queries, n_indexed). Otherwise the shape should be (n_queries, n_features). n_neighbors : int, default=None Number of neighbors for each sample. The default is the value passed to the constructor. mode : {'connectivity', 'distance'}, default='connectivity' Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, in 'distance' the edges are distances between points, type of distance depends on the selected metric parameter in NearestNeighbors class. Returns ------- A : sparse-matrix of shape (n_queries, n_samples_fit) `n_samples_fit` is the number of samples in the fitted data. `A[i, j]` gives the weight of the edge connecting `i` to `j`. The matrix is of CSR format. See Also -------- NearestNeighbors.radius_neighbors_graph : Compute the (weighted) graph of Neighbors for points in X. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(n_neighbors=2) >>> neigh.fit(X) NearestNeighbors(n_neighbors=2) >>> A = neigh.kneighbors_graph(X) >>> A.toarray() array([[1., 0., 1.], [0., 1., 1.], [1., 0., 1.]]) """ check_is_fitted(self) if n_neighbors is None: n_neighbors = self.n_neighbors if mode == 'connectivity': check_is_fitted(self) if n_neighbors is None: n_neighbors = self.n_neighbors elif n_neighbors <= 0: raise ValueError('Expected n_neighbors > 0. Got %d' % n_neighbors) elif not isinstance(n_neighbors, numbers.Integral): raise TypeError('n_neighbors does not take %s value, enter integer value' % type(n_neighbors)) query_is_train = X is None if query_is_train: X = self._fit_X n_neighbors += 1 elif self.metric == 'precomputed': X = _check_precomputed(X) else: X = self._validate_data(X, accept_sparse='csr', reset=False, order='C') n_samples_fit = self.n_samples_fit_ if n_neighbors > n_samples_fit: raise ValueError('Expected n_neighbors <= n_samples, but n_samples = %d, n_neighbors = %d' % (n_samples_fit, n_neighbors)) n_jobs = effective_n_jobs(self.n_jobs) chunked_results = None use_pairwise_distances_reductions = self._fit_method == 'brute' and ArgKmin.is_usable_for(X if X is not None else self._fit_X, self._fit_X, self.effective_metric_) if use_pairwise_distances_reductions: results = ArgKmin.compute(X=X, Y=self._fit_X, k=n_neighbors, metric=self.effective_metric_, metric_kwargs=self.effective_metric_params_, strategy='auto', return_distance=False) elif self._fit_method == 'brute' and self.metric == 'precomputed' and issparse(X): results = _kneighbors_from_graph(X, n_neighbors=n_neighbors, return_distance=False) elif self._fit_method == 'brute': reduce_func = partial(self._kneighbors_reduce_func, n_neighbors=n_neighbors, return_distance=False) if self.effective_metric_ == 'euclidean': kwds = {'squared': True} else: kwds = self.effective_metric_params_ chunked_results = list(pairwise_distances_chunked(X, self._fit_X, reduce_func=reduce_func, metric=self.effective_metric_, n_jobs=n_jobs, **kwds)) elif self._fit_method in ['ball_tree', 'kd_tree']: if issparse(X): raise ValueError("%s does not work with sparse matrices. Densify the data, or set algorithm='brute'" % self._fit_method) chunked_results = Parallel(n_jobs, prefer='threads')((delayed(_tree_query_parallel_helper)(self._tree, X[s], n_neighbors, False) for s in gen_even_slices(X.shape[0], n_jobs))) else: raise ValueError('internal: _fit_method not recognized') if chunked_results is not None: if False: (neigh_dist, neigh_ind) = zip(*chunked_results) results = (np.vstack(neigh_dist), np.vstack(neigh_ind)) else: results = np.vstack(chunked_results) if not query_is_train: A_ind = results else: if False: (neigh_dist, neigh_ind) = results else: neigh_ind = results (n_queries, _) = X.shape sample_range = np.arange(n_queries)[:, None] sample_mask = neigh_ind != sample_range dup_gr_nbrs = np.all(sample_mask, axis=1) sample_mask[:, 0][dup_gr_nbrs] = False neigh_ind = np.reshape(neigh_ind[sample_mask], (n_queries, n_neighbors - 1)) if False: neigh_dist = np.reshape(neigh_dist[sample_mask], (n_queries, n_neighbors - 1)) A_ind = (neigh_dist, neigh_ind) A_ind = neigh_ind n_queries = A_ind.shape[0] A_data = np.ones(n_queries * n_neighbors) elif mode == 'distance': check_is_fitted(self) if n_neighbors is None: n_neighbors = self.n_neighbors elif n_neighbors <= 0: raise ValueError('Expected n_neighbors > 0. Got %d' % n_neighbors) elif not isinstance(n_neighbors, numbers.Integral): raise TypeError('n_neighbors does not take %s value, enter integer value' % type(n_neighbors)) query_is_train = X is None if query_is_train: X = self._fit_X n_neighbors += 1 elif self.metric == 'precomputed': X = _check_precomputed(X) else: X = self._validate_data(X, accept_sparse='csr', reset=False, order='C') n_samples_fit = self.n_samples_fit_ if n_neighbors > n_samples_fit: raise ValueError('Expected n_neighbors <= n_samples, but n_samples = %d, n_neighbors = %d' % (n_samples_fit, n_neighbors)) n_jobs = effective_n_jobs(self.n_jobs) chunked_results = None use_pairwise_distances_reductions = self._fit_method == 'brute' and ArgKmin.is_usable_for(X if X is not None else self._fit_X, self._fit_X, self.effective_metric_) if use_pairwise_distances_reductions: results = ArgKmin.compute(X=X, Y=self._fit_X, k=n_neighbors, metric=self.effective_metric_, metric_kwargs=self.effective_metric_params_, strategy='auto', return_distance=True) elif self._fit_method == 'brute' and self.metric == 'precomputed' and issparse(X): results = _kneighbors_from_graph(X, n_neighbors=n_neighbors, return_distance=True) elif self._fit_method == 'brute': reduce_func = partial(self._kneighbors_reduce_func, n_neighbors=n_neighbors, return_distance=True) if self.effective_metric_ == 'euclidean': kwds = {'squared': True} else: kwds = self.effective_metric_params_ chunked_results = list(pairwise_distances_chunked(X, self._fit_X, reduce_func=reduce_func, metric=self.effective_metric_, n_jobs=n_jobs, **kwds)) elif self._fit_method in ['ball_tree', 'kd_tree']: if issparse(X): raise ValueError("%s does not work with sparse matrices. Densify the data, or set algorithm='brute'" % self._fit_method) chunked_results = Parallel(n_jobs, prefer='threads')((delayed(_tree_query_parallel_helper)(self._tree, X[s], n_neighbors, True) for s in gen_even_slices(X.shape[0], n_jobs))) else: raise ValueError('internal: _fit_method not recognized') if chunked_results is not None: if True: (neigh_dist, neigh_ind) = zip(*chunked_results) results = (np.vstack(neigh_dist), np.vstack(neigh_ind)) else: results = np.vstack(chunked_results) if not query_is_train: (A_data, A_ind) = results else: if True: (neigh_dist, neigh_ind) = results else: neigh_ind = results (n_queries, _) = X.shape sample_range = np.arange(n_queries)[:, None] sample_mask = neigh_ind != sample_range dup_gr_nbrs = np.all(sample_mask, axis=1) sample_mask[:, 0][dup_gr_nbrs] = False neigh_ind = np.reshape(neigh_ind[sample_mask], (n_queries, n_neighbors - 1)) if True: neigh_dist = np.reshape(neigh_dist[sample_mask], (n_queries, n_neighbors - 1)) (A_data, A_ind) = (neigh_dist, neigh_ind) (A_data, A_ind) = neigh_ind A_data = np.ravel(A_data) else: raise ValueError(f'Unsupported mode, must be one of "connectivity", or "distance" but got "{mode}" instead') n_queries = A_ind.shape[0] n_samples_fit = self.n_samples_fit_ n_nonzero = n_queries * n_neighbors A_indptr = np.arange(0, n_nonzero + 1, n_neighbors) kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr), shape=(n_queries, n_samples_fit)) return kneighbors_graph
def kneighbors_graph(self, X=None, n_neighbors=None, mode='connectivity'): """Compute the (weighted) graph of k-Neighbors for points in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', default=None The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. For ``metric='precomputed'`` the shape should be (n_queries, n_indexed). Otherwise the shape should be (n_queries, n_features). n_neighbors : int, default=None Number of neighbors for each sample. The default is the value passed to the constructor. mode : {'connectivity', 'distance'}, default='connectivity' Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, in 'distance' the edges are distances between points, type of distance depends on the selected metric parameter in NearestNeighbors class. Returns ------- A : sparse-matrix of shape (n_queries, n_samples_fit) `n_samples_fit` is the number of samples in the fitted data. `A[i, j]` gives the weight of the edge connecting `i` to `j`. The matrix is of CSR format. See Also -------- NearestNeighbors.radius_neighbors_graph : Compute the (weighted) graph of Neighbors for points in X. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(n_neighbors=2) >>> neigh.fit(X) NearestNeighbors(n_neighbors=2) >>> A = neigh.kneighbors_graph(X) >>> A.toarray() array([[1., 0., 1.], [0., 1., 1.], [1., 0., 1.]]) """ check_is_fitted(self) if n_neighbors is None: n_neighbors = self.n_neighbors if mode == 'connectivity': <DeepExtract> check_is_fitted(self) if n_neighbors is None: n_neighbors = self.n_neighbors elif n_neighbors <= 0: raise ValueError('Expected n_neighbors > 0. Got %d' % n_neighbors) elif not isinstance(n_neighbors, numbers.Integral): raise TypeError('n_neighbors does not take %s value, enter integer value' % type(n_neighbors)) query_is_train = X is None if query_is_train: X = self._fit_X n_neighbors += 1 elif self.metric == 'precomputed': X = _check_precomputed(X) else: X = self._validate_data(X, accept_sparse='csr', reset=False, order='C') n_samples_fit = self.n_samples_fit_ if n_neighbors > n_samples_fit: raise ValueError('Expected n_neighbors <= n_samples, but n_samples = %d, n_neighbors = %d' % (n_samples_fit, n_neighbors)) n_jobs = effective_n_jobs(self.n_jobs) chunked_results = None use_pairwise_distances_reductions = self._fit_method == 'brute' and ArgKmin.is_usable_for(X if X is not None else self._fit_X, self._fit_X, self.effective_metric_) if use_pairwise_distances_reductions: results = ArgKmin.compute(X=X, Y=self._fit_X, k=n_neighbors, metric=self.effective_metric_, metric_kwargs=self.effective_metric_params_, strategy='auto', return_distance=False) elif self._fit_method == 'brute' and self.metric == 'precomputed' and issparse(X): results = _kneighbors_from_graph(X, n_neighbors=n_neighbors, return_distance=False) elif self._fit_method == 'brute': reduce_func = partial(self._kneighbors_reduce_func, n_neighbors=n_neighbors, return_distance=False) if self.effective_metric_ == 'euclidean': kwds = {'squared': True} else: kwds = self.effective_metric_params_ chunked_results = list(pairwise_distances_chunked(X, self._fit_X, reduce_func=reduce_func, metric=self.effective_metric_, n_jobs=n_jobs, **kwds)) elif self._fit_method in ['ball_tree', 'kd_tree']: if issparse(X): raise ValueError("%s does not work with sparse matrices. Densify the data, or set algorithm='brute'" % self._fit_method) chunked_results = Parallel(n_jobs, prefer='threads')((delayed(_tree_query_parallel_helper)(self._tree, X[s], n_neighbors, False) for s in gen_even_slices(X.shape[0], n_jobs))) else: raise ValueError('internal: _fit_method not recognized') if chunked_results is not None: if False: (neigh_dist, neigh_ind) = zip(*chunked_results) results = (np.vstack(neigh_dist), np.vstack(neigh_ind)) else: results = np.vstack(chunked_results) if not query_is_train: A_ind = results else: if False: (neigh_dist, neigh_ind) = results else: neigh_ind = results (n_queries, _) = X.shape sample_range = np.arange(n_queries)[:, None] sample_mask = neigh_ind != sample_range dup_gr_nbrs = np.all(sample_mask, axis=1) sample_mask[:, 0][dup_gr_nbrs] = False neigh_ind = np.reshape(neigh_ind[sample_mask], (n_queries, n_neighbors - 1)) if False: neigh_dist = np.reshape(neigh_dist[sample_mask], (n_queries, n_neighbors - 1)) A_ind = (neigh_dist, neigh_ind) A_ind = neigh_ind </DeepExtract> n_queries = A_ind.shape[0] A_data = np.ones(n_queries * n_neighbors) elif mode == 'distance': <DeepExtract> check_is_fitted(self) if n_neighbors is None: n_neighbors = self.n_neighbors elif n_neighbors <= 0: raise ValueError('Expected n_neighbors > 0. Got %d' % n_neighbors) elif not isinstance(n_neighbors, numbers.Integral): raise TypeError('n_neighbors does not take %s value, enter integer value' % type(n_neighbors)) query_is_train = X is None if query_is_train: X = self._fit_X n_neighbors += 1 elif self.metric == 'precomputed': X = _check_precomputed(X) else: X = self._validate_data(X, accept_sparse='csr', reset=False, order='C') n_samples_fit = self.n_samples_fit_ if n_neighbors > n_samples_fit: raise ValueError('Expected n_neighbors <= n_samples, but n_samples = %d, n_neighbors = %d' % (n_samples_fit, n_neighbors)) n_jobs = effective_n_jobs(self.n_jobs) chunked_results = None use_pairwise_distances_reductions = self._fit_method == 'brute' and ArgKmin.is_usable_for(X if X is not None else self._fit_X, self._fit_X, self.effective_metric_) if use_pairwise_distances_reductions: results = ArgKmin.compute(X=X, Y=self._fit_X, k=n_neighbors, metric=self.effective_metric_, metric_kwargs=self.effective_metric_params_, strategy='auto', return_distance=True) elif self._fit_method == 'brute' and self.metric == 'precomputed' and issparse(X): results = _kneighbors_from_graph(X, n_neighbors=n_neighbors, return_distance=True) elif self._fit_method == 'brute': reduce_func = partial(self._kneighbors_reduce_func, n_neighbors=n_neighbors, return_distance=True) if self.effective_metric_ == 'euclidean': kwds = {'squared': True} else: kwds = self.effective_metric_params_ chunked_results = list(pairwise_distances_chunked(X, self._fit_X, reduce_func=reduce_func, metric=self.effective_metric_, n_jobs=n_jobs, **kwds)) elif self._fit_method in ['ball_tree', 'kd_tree']: if issparse(X): raise ValueError("%s does not work with sparse matrices. Densify the data, or set algorithm='brute'" % self._fit_method) chunked_results = Parallel(n_jobs, prefer='threads')((delayed(_tree_query_parallel_helper)(self._tree, X[s], n_neighbors, True) for s in gen_even_slices(X.shape[0], n_jobs))) else: raise ValueError('internal: _fit_method not recognized') if chunked_results is not None: if True: (neigh_dist, neigh_ind) = zip(*chunked_results) results = (np.vstack(neigh_dist), np.vstack(neigh_ind)) else: results = np.vstack(chunked_results) if not query_is_train: (A_data, A_ind) = results else: if True: (neigh_dist, neigh_ind) = results else: neigh_ind = results (n_queries, _) = X.shape sample_range = np.arange(n_queries)[:, None] sample_mask = neigh_ind != sample_range dup_gr_nbrs = np.all(sample_mask, axis=1) sample_mask[:, 0][dup_gr_nbrs] = False neigh_ind = np.reshape(neigh_ind[sample_mask], (n_queries, n_neighbors - 1)) if True: neigh_dist = np.reshape(neigh_dist[sample_mask], (n_queries, n_neighbors - 1)) (A_data, A_ind) = (neigh_dist, neigh_ind) (A_data, A_ind) = neigh_ind </DeepExtract> A_data = np.ravel(A_data) else: raise ValueError(f'Unsupported mode, must be one of "connectivity", or "distance" but got "{mode}" instead') n_queries = A_ind.shape[0] n_samples_fit = self.n_samples_fit_ n_nonzero = n_queries * n_neighbors A_indptr = np.arange(0, n_nonzero + 1, n_neighbors) kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr), shape=(n_queries, n_samples_fit)) return kneighbors_graph
def weight_intercept_raw(self, coef, X): """Helper function to get coefficients, intercept and raw_prediction. Parameters ---------- coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) Coefficients of a linear model. If shape (n_classes * n_dof,), the classes of one feature are contiguous, i.e. one reconstructs the 2d-array via coef.reshape((n_classes, -1), order="F"). X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. Returns ------- weights : ndarray of shape (n_features,) or (n_classes, n_features) Coefficients without intercept term. intercept : float or ndarray of shape (n_classes,) Intercept terms. raw_prediction : ndarray of shape (n_samples,) or (n_samples, n_classes) """ if not self.base_loss.is_multiclass: if self.fit_intercept: intercept = coef[-1] weights = coef[:-1] else: intercept = 0.0 weights = coef else: if coef.ndim == 1: weights = coef.reshape((self.base_loss.n_classes, -1), order='F') else: weights = coef if self.fit_intercept: intercept = weights[:, -1] weights = weights[:, :-1] else: intercept = 0.0 (weights, intercept) = (weights, intercept) if not self.base_loss.is_multiclass: raw_prediction = X @ weights + intercept else: raw_prediction = X @ weights.T + intercept return (weights, intercept, raw_prediction)
def weight_intercept_raw(self, coef, X): """Helper function to get coefficients, intercept and raw_prediction. Parameters ---------- coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) Coefficients of a linear model. If shape (n_classes * n_dof,), the classes of one feature are contiguous, i.e. one reconstructs the 2d-array via coef.reshape((n_classes, -1), order="F"). X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. Returns ------- weights : ndarray of shape (n_features,) or (n_classes, n_features) Coefficients without intercept term. intercept : float or ndarray of shape (n_classes,) Intercept terms. raw_prediction : ndarray of shape (n_samples,) or (n_samples, n_classes) """ <DeepExtract> if not self.base_loss.is_multiclass: if self.fit_intercept: intercept = coef[-1] weights = coef[:-1] else: intercept = 0.0 weights = coef else: if coef.ndim == 1: weights = coef.reshape((self.base_loss.n_classes, -1), order='F') else: weights = coef if self.fit_intercept: intercept = weights[:, -1] weights = weights[:, :-1] else: intercept = 0.0 (weights, intercept) = (weights, intercept) </DeepExtract> if not self.base_loss.is_multiclass: raw_prediction = X @ weights + intercept else: raw_prediction = X @ weights.T + intercept return (weights, intercept, raw_prediction)
def test_normalizer_l1(): rng = np.random.RandomState(0) X_dense = rng.randn(4, 5) X_sparse_unpruned = sparse.csr_matrix(X_dense) X_dense[3, :] = 0.0 indptr_3 = X_sparse_unpruned.indptr[3] indptr_4 = X_sparse_unpruned.indptr[4] X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 X_sparse_pruned = sparse.csr_matrix(X_dense) for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): normalizer = Normalizer(norm='l1', copy=True) X_norm = normalizer.transform(X) assert X_norm is not X if hasattr(X_norm, 'toarray'): X_norm = X_norm.toarray() X_norm1 = X_norm normalizer = Normalizer(norm='l1', copy=False) X_norm = normalizer.transform(X) assert X_norm is X if hasattr(X_norm, 'toarray'): X_norm = X_norm.toarray() X_norm2 = X_norm for X_norm in (X_norm1, X_norm2): row_sums = np.abs(X_norm).sum(axis=1) for i in range(3): assert_almost_equal(row_sums[i], 1.0) assert_almost_equal(row_sums[3], 0.0) for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) assert X_norm is not X assert isinstance(X_norm, sparse.csr_matrix) if hasattr(X_norm, 'toarray'): X_norm = X_norm.toarray() X_norm = X_norm for i in range(3): assert_almost_equal(row_sums[i], 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l1(): rng = np.random.RandomState(0) X_dense = rng.randn(4, 5) X_sparse_unpruned = sparse.csr_matrix(X_dense) X_dense[3, :] = 0.0 indptr_3 = X_sparse_unpruned.indptr[3] indptr_4 = X_sparse_unpruned.indptr[4] X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 X_sparse_pruned = sparse.csr_matrix(X_dense) for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): normalizer = Normalizer(norm='l1', copy=True) X_norm = normalizer.transform(X) assert X_norm is not X <DeepExtract> if hasattr(X_norm, 'toarray'): X_norm = X_norm.toarray() X_norm1 = X_norm </DeepExtract> normalizer = Normalizer(norm='l1', copy=False) X_norm = normalizer.transform(X) assert X_norm is X <DeepExtract> if hasattr(X_norm, 'toarray'): X_norm = X_norm.toarray() X_norm2 = X_norm </DeepExtract> for X_norm in (X_norm1, X_norm2): row_sums = np.abs(X_norm).sum(axis=1) for i in range(3): assert_almost_equal(row_sums[i], 1.0) assert_almost_equal(row_sums[3], 0.0) for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) assert X_norm is not X assert isinstance(X_norm, sparse.csr_matrix) <DeepExtract> if hasattr(X_norm, 'toarray'): X_norm = X_norm.toarray() X_norm = X_norm </DeepExtract> for i in range(3): assert_almost_equal(row_sums[i], 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0)
@pytest.mark.parametrize('solver, sparse_X', ((solver, sparse_X) for (solver, sparse_X) in product(['cholesky', 'sag', 'sparse_cg', 'lsqr', 'saga', 'ridgecv'], [False, True]) if not (sparse_X and solver not in ['sparse_cg', 'ridgecv']))) @pytest.mark.parametrize('n_samples,dtype,proportion_nonzero', [(20, 'float32', 0.1), (40, 'float32', 1.0), (20, 'float64', 0.2)]) @pytest.mark.parametrize('seed', np.arange(3)) def test_solver_consistency(solver, proportion_nonzero, n_samples, dtype, sparse_X, seed): alpha = 1.0 noise = 50.0 if proportion_nonzero > 0.9 else 500.0 (X, y, c) = make_regression(n_samples=n_samples, n_features=30, n_informative=n_informative, n_targets=n_targets, bias=10, noise=noise, shuffle=shuffle, coef=True, random_state=seed) if 30 == 1: c = np.asarray([c]) X += X_offset mask = np.random.RandomState(seed).binomial(1, proportion_nonzero, X.shape) > 0 removed_X = X.copy() X[~mask] = 0.0 removed_X[mask] = 0.0 y -= removed_X.dot(c) if positive: y += X.dot(np.abs(c) + 1 - c) c = np.abs(c) + 1 if 30 == 1: c = c[0] if coef: (X, y) = (X, y, c) (X, y) = (X, y) X = minmax_scale(X) svd_ridge = Ridge(solver='svd', alpha=alpha).fit(X, y) X = X.astype(dtype, copy=False) y = y.astype(dtype, copy=False) if sparse_X: X = sp.csr_matrix(X) if solver == 'ridgecv': ridge = RidgeCV(alphas=[alpha]) else: ridge = Ridge(solver=solver, tol=1e-10, alpha=alpha) ridge.fit(X, y) assert_allclose(ridge.coef_, svd_ridge.coef_, atol=0.001, rtol=0.001) assert_allclose(ridge.intercept_, svd_ridge.intercept_, atol=0.001, rtol=0.001)
@pytest.mark.parametrize('solver, sparse_X', ((solver, sparse_X) for (solver, sparse_X) in product(['cholesky', 'sag', 'sparse_cg', 'lsqr', 'saga', 'ridgecv'], [False, True]) if not (sparse_X and solver not in ['sparse_cg', 'ridgecv']))) @pytest.mark.parametrize('n_samples,dtype,proportion_nonzero', [(20, 'float32', 0.1), (40, 'float32', 1.0), (20, 'float64', 0.2)]) @pytest.mark.parametrize('seed', np.arange(3)) def test_solver_consistency(solver, proportion_nonzero, n_samples, dtype, sparse_X, seed): alpha = 1.0 noise = 50.0 if proportion_nonzero > 0.9 else 500.0 <DeepExtract> (X, y, c) = make_regression(n_samples=n_samples, n_features=30, n_informative=n_informative, n_targets=n_targets, bias=10, noise=noise, shuffle=shuffle, coef=True, random_state=seed) if 30 == 1: c = np.asarray([c]) X += X_offset mask = np.random.RandomState(seed).binomial(1, proportion_nonzero, X.shape) > 0 removed_X = X.copy() X[~mask] = 0.0 removed_X[mask] = 0.0 y -= removed_X.dot(c) if positive: y += X.dot(np.abs(c) + 1 - c) c = np.abs(c) + 1 if 30 == 1: c = c[0] if coef: (X, y) = (X, y, c) (X, y) = (X, y) </DeepExtract> X = minmax_scale(X) svd_ridge = Ridge(solver='svd', alpha=alpha).fit(X, y) X = X.astype(dtype, copy=False) y = y.astype(dtype, copy=False) if sparse_X: X = sp.csr_matrix(X) if solver == 'ridgecv': ridge = RidgeCV(alphas=[alpha]) else: ridge = Ridge(solver=solver, tol=1e-10, alpha=alpha) ridge.fit(X, y) assert_allclose(ridge.coef_, svd_ridge.coef_, atol=0.001, rtol=0.001) assert_allclose(ridge.intercept_, svd_ridge.intercept_, atol=0.001, rtol=0.001)
def check_param_validation(name, estimator_orig): rng = np.random.RandomState(0) X = rng.uniform(size=(20, 5)) y = rng.randint(0, 2, size=20) if _safe_tags(estimator_orig, key='requires_positive_y'): y += 1 + abs(y.min()) if _safe_tags(estimator_orig, key='binary_only') and y.size > 0: y = np.where(y == y.flat[0], y, y.flat[0] + 1) if _safe_tags(estimator_orig, key='multioutput_only'): y = np.reshape(y, (-1, 1)) y = y estimator_params = estimator_orig.get_params(deep=False).keys() if estimator_params: validation_params = estimator_orig._parameter_constraints.keys() unexpected_params = set(validation_params) - set(estimator_params) missing_params = set(estimator_params) - set(validation_params) err_msg = f'Mismatch between _parameter_constraints and the parameters of {name}.\nConsider the unexpected parameters {unexpected_params} and expected but missing parameters {missing_params}' assert validation_params == estimator_params, err_msg param_with_bad_type = type('BadType', (), {})() fit_methods = ['fit', 'partial_fit', 'fit_transform', 'fit_predict'] for param_name in estimator_params: constraints = estimator_orig._parameter_constraints[param_name] if constraints == 'no_validation': continue if any((isinstance(constraint, Interval) and constraint.type == Integral for constraint in constraints)) and any((isinstance(constraint, Interval) and constraint.type == Real for constraint in constraints)): raise ValueError(f"The constraint for parameter {param_name} of {name} can't have a mix of intervals of Integral and Real types. Use the type RealNotInt instead of Real.") match = f"The '{param_name}' parameter of {name} must be .* Got .* instead." err_msg = f'{name} does not raise an informative error message when the parameter {param_name} does not have a valid type or value.' estimator = clone(estimator_orig) estimator.set_params(**{param_name: param_with_bad_type}) for method in fit_methods: if not hasattr(estimator, method): continue with raises(InvalidParameterError, match=match, err_msg=err_msg): if any((isinstance(X_type, str) and X_type.endswith('labels') for X_type in _safe_tags(estimator, key='X_types'))): getattr(estimator, method)(y) else: getattr(estimator, method)(X, y) constraints = [make_constraint(constraint) for constraint in constraints] for constraint in constraints: try: bad_value = generate_invalid_param_val(constraint) except NotImplementedError: continue estimator.set_params(**{param_name: bad_value}) for method in fit_methods: if not hasattr(estimator, method): continue with raises(InvalidParameterError, match=match, err_msg=err_msg): if any((X_type.endswith('labels') for X_type in _safe_tags(estimator, key='X_types'))): getattr(estimator, method)(y) else: getattr(estimator, method)(X, y)
def check_param_validation(name, estimator_orig): rng = np.random.RandomState(0) X = rng.uniform(size=(20, 5)) y = rng.randint(0, 2, size=20) <DeepExtract> if _safe_tags(estimator_orig, key='requires_positive_y'): y += 1 + abs(y.min()) if _safe_tags(estimator_orig, key='binary_only') and y.size > 0: y = np.where(y == y.flat[0], y, y.flat[0] + 1) if _safe_tags(estimator_orig, key='multioutput_only'): y = np.reshape(y, (-1, 1)) y = y </DeepExtract> estimator_params = estimator_orig.get_params(deep=False).keys() if estimator_params: validation_params = estimator_orig._parameter_constraints.keys() unexpected_params = set(validation_params) - set(estimator_params) missing_params = set(estimator_params) - set(validation_params) err_msg = f'Mismatch between _parameter_constraints and the parameters of {name}.\nConsider the unexpected parameters {unexpected_params} and expected but missing parameters {missing_params}' assert validation_params == estimator_params, err_msg param_with_bad_type = type('BadType', (), {})() fit_methods = ['fit', 'partial_fit', 'fit_transform', 'fit_predict'] for param_name in estimator_params: constraints = estimator_orig._parameter_constraints[param_name] if constraints == 'no_validation': continue if any((isinstance(constraint, Interval) and constraint.type == Integral for constraint in constraints)) and any((isinstance(constraint, Interval) and constraint.type == Real for constraint in constraints)): raise ValueError(f"The constraint for parameter {param_name} of {name} can't have a mix of intervals of Integral and Real types. Use the type RealNotInt instead of Real.") match = f"The '{param_name}' parameter of {name} must be .* Got .* instead." err_msg = f'{name} does not raise an informative error message when the parameter {param_name} does not have a valid type or value.' estimator = clone(estimator_orig) estimator.set_params(**{param_name: param_with_bad_type}) for method in fit_methods: if not hasattr(estimator, method): continue with raises(InvalidParameterError, match=match, err_msg=err_msg): if any((isinstance(X_type, str) and X_type.endswith('labels') for X_type in _safe_tags(estimator, key='X_types'))): getattr(estimator, method)(y) else: getattr(estimator, method)(X, y) constraints = [make_constraint(constraint) for constraint in constraints] for constraint in constraints: try: bad_value = generate_invalid_param_val(constraint) except NotImplementedError: continue estimator.set_params(**{param_name: bad_value}) for method in fit_methods: if not hasattr(estimator, method): continue with raises(InvalidParameterError, match=match, err_msg=err_msg): if any((X_type.endswith('labels') for X_type in _safe_tags(estimator, key='X_types'))): getattr(estimator, method)(y) else: getattr(estimator, method)(X, y)
@hides def decision_function(self, X, *args, **kwargs): check_is_fitted(self) return np.ones(X.shape[0])
@hides def decision_function(self, X, *args, **kwargs): <DeepExtract> check_is_fitted(self) </DeepExtract> return np.ones(X.shape[0])
@pytest.mark.parametrize('drop', [True, False]) def test_precision_recall_curve(drop): if dataset is None: dataset = datasets.load_iris() X = dataset.data y = dataset.target if True: (X, y) = (X[y < 2], y[y < 2]) (n_samples, n_features) = X.shape p = np.arange(n_samples) rng = check_random_state(37) rng.shuffle(p) (X, y) = (X[p], y[p]) half = int(n_samples / 2) rng = np.random.RandomState(0) X = np.c_[X, rng.randn(n_samples, 200 * n_features)] clf = svm.SVC(kernel='linear', probability=True, random_state=0) y_score = clf.fit(X[:half], y[:half]).predict_proba(X[half:]) if True: y_score = y_score[:, 1] y_pred = clf.predict(X[half:]) y_true = y[half:] (y_true, _, y_score) = (y_true, y_pred, y_score) (p, r, thresholds) = precision_recall_curve(y_true, y_score, drop_intermediate=drop) precision_recall_auc = _average_precision_slow(y_true, y_score) assert_array_almost_equal(precision_recall_auc, 0.859, 3) assert_array_almost_equal(precision_recall_auc, average_precision_score(y_true, y_score)) assert_almost_equal(_average_precision(y_true, y_score), precision_recall_auc, decimal=2) assert p.size == r.size assert p.size == thresholds.size + 1 (p, r, thresholds) = precision_recall_curve(y_true, np.zeros_like(y_score), drop_intermediate=drop) assert p.size == r.size assert p.size == thresholds.size + 1 (p, r, t) = precision_recall_curve(y_true[1:], y_score[1:], drop_intermediate=drop) assert r[0] == 1.0 assert p[0] == y_true[1:].mean() y_true[np.where(y_true == 0)] = -1 y_true_copy = y_true.copy() (p, r, thresholds) = precision_recall_curve(y_true, y_score, drop_intermediate=drop) precision_recall_auc = _average_precision_slow(y_true, y_score) assert_array_almost_equal(precision_recall_auc, 0.859, 3) assert_array_almost_equal(precision_recall_auc, average_precision_score(y_true, y_score)) assert_almost_equal(_average_precision(y_true, y_score), precision_recall_auc, decimal=2) assert p.size == r.size assert p.size == thresholds.size + 1 (p, r, thresholds) = precision_recall_curve(y_true, np.zeros_like(y_score), drop_intermediate=drop) assert p.size == r.size assert p.size == thresholds.size + 1 assert_array_equal(y_true_copy, y_true) labels = [1, 0, 0, 1] predict_probas = [1, 2, 3, 4] (p, r, t) = precision_recall_curve(labels, predict_probas, drop_intermediate=drop) if drop: assert_allclose(p, [0.5, 0.33333333, 1.0, 1.0]) assert_allclose(r, [1.0, 0.5, 0.5, 0.0]) assert_allclose(t, [1, 2, 4]) else: assert_allclose(p, [0.5, 0.33333333, 0.5, 1.0, 1.0]) assert_allclose(r, [1.0, 0.5, 0.5, 0.5, 0.0]) assert_allclose(t, [1, 2, 3, 4]) assert p.size == r.size assert p.size == t.size + 1
@pytest.mark.parametrize('drop', [True, False]) def test_precision_recall_curve(drop): <DeepExtract> if dataset is None: dataset = datasets.load_iris() X = dataset.data y = dataset.target if True: (X, y) = (X[y < 2], y[y < 2]) (n_samples, n_features) = X.shape p = np.arange(n_samples) rng = check_random_state(37) rng.shuffle(p) (X, y) = (X[p], y[p]) half = int(n_samples / 2) rng = np.random.RandomState(0) X = np.c_[X, rng.randn(n_samples, 200 * n_features)] clf = svm.SVC(kernel='linear', probability=True, random_state=0) y_score = clf.fit(X[:half], y[:half]).predict_proba(X[half:]) if True: y_score = y_score[:, 1] y_pred = clf.predict(X[half:]) y_true = y[half:] (y_true, _, y_score) = (y_true, y_pred, y_score) </DeepExtract> <DeepExtract> (p, r, thresholds) = precision_recall_curve(y_true, y_score, drop_intermediate=drop) precision_recall_auc = _average_precision_slow(y_true, y_score) assert_array_almost_equal(precision_recall_auc, 0.859, 3) assert_array_almost_equal(precision_recall_auc, average_precision_score(y_true, y_score)) assert_almost_equal(_average_precision(y_true, y_score), precision_recall_auc, decimal=2) assert p.size == r.size assert p.size == thresholds.size + 1 (p, r, thresholds) = precision_recall_curve(y_true, np.zeros_like(y_score), drop_intermediate=drop) assert p.size == r.size assert p.size == thresholds.size + 1 </DeepExtract> (p, r, t) = precision_recall_curve(y_true[1:], y_score[1:], drop_intermediate=drop) assert r[0] == 1.0 assert p[0] == y_true[1:].mean() y_true[np.where(y_true == 0)] = -1 y_true_copy = y_true.copy() <DeepExtract> (p, r, thresholds) = precision_recall_curve(y_true, y_score, drop_intermediate=drop) precision_recall_auc = _average_precision_slow(y_true, y_score) assert_array_almost_equal(precision_recall_auc, 0.859, 3) assert_array_almost_equal(precision_recall_auc, average_precision_score(y_true, y_score)) assert_almost_equal(_average_precision(y_true, y_score), precision_recall_auc, decimal=2) assert p.size == r.size assert p.size == thresholds.size + 1 (p, r, thresholds) = precision_recall_curve(y_true, np.zeros_like(y_score), drop_intermediate=drop) assert p.size == r.size assert p.size == thresholds.size + 1 </DeepExtract> assert_array_equal(y_true_copy, y_true) labels = [1, 0, 0, 1] predict_probas = [1, 2, 3, 4] (p, r, t) = precision_recall_curve(labels, predict_probas, drop_intermediate=drop) if drop: assert_allclose(p, [0.5, 0.33333333, 1.0, 1.0]) assert_allclose(r, [1.0, 0.5, 0.5, 0.0]) assert_allclose(t, [1, 2, 4]) else: assert_allclose(p, [0.5, 0.33333333, 0.5, 1.0, 1.0]) assert_allclose(r, [1.0, 0.5, 0.5, 0.5, 0.0]) assert_allclose(t, [1, 2, 3, 4]) assert p.size == r.size assert p.size == t.size + 1
def fit(self, X, y=None): """Compute the median and quantiles to be used for scaling. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the median and quantiles used for later scaling along the features axis. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted scaler. """ self._validate_params() X = self._validate_data(X, accept_sparse='csc', dtype=FLOAT_DTYPES, force_all_finite='allow-nan') (q_min, q_max) = self.quantile_range if not 0 <= q_min <= q_max <= 100: raise ValueError('Invalid quantile range: %s' % str(self.quantile_range)) if self.with_centering: if sparse.issparse(X): raise ValueError('Cannot center sparse matrices: use `with_centering=False` instead. See docstring for motivation and alternatives.') self.center_ = np.nanmedian(X, axis=0) else: self.center_ = None if self.with_scaling: quantiles = [] for feature_idx in range(X.shape[1]): if sparse.issparse(X): column_nnz_data = X.data[X.indptr[feature_idx]:X.indptr[feature_idx + 1]] column_data = np.zeros(shape=X.shape[0], dtype=X.dtype) column_data[:len(column_nnz_data)] = column_nnz_data else: column_data = X[:, feature_idx] quantiles.append(np.nanpercentile(column_data, self.quantile_range)) quantiles = np.transpose(quantiles) self.scale_ = quantiles[1] - quantiles[0] if np.isscalar(self.scale_): if self.scale_ == 0.0: self.scale_ = 1.0 self.scale_ = self.scale_ elif isinstance(self.scale_, np.ndarray): if constant_mask is None: constant_mask = self.scale_ < 10 * np.finfo(self.scale_.dtype).eps if False: self.scale_ = self.scale_.copy() self.scale_[constant_mask] = 1.0 self.scale_ = self.scale_ if self.unit_variance: adjust = stats.norm.ppf(q_max / 100.0) - stats.norm.ppf(q_min / 100.0) self.scale_ = self.scale_ / adjust else: self.scale_ = None return self
def fit(self, X, y=None): """Compute the median and quantiles to be used for scaling. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the median and quantiles used for later scaling along the features axis. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted scaler. """ self._validate_params() X = self._validate_data(X, accept_sparse='csc', dtype=FLOAT_DTYPES, force_all_finite='allow-nan') (q_min, q_max) = self.quantile_range if not 0 <= q_min <= q_max <= 100: raise ValueError('Invalid quantile range: %s' % str(self.quantile_range)) if self.with_centering: if sparse.issparse(X): raise ValueError('Cannot center sparse matrices: use `with_centering=False` instead. See docstring for motivation and alternatives.') self.center_ = np.nanmedian(X, axis=0) else: self.center_ = None if self.with_scaling: quantiles = [] for feature_idx in range(X.shape[1]): if sparse.issparse(X): column_nnz_data = X.data[X.indptr[feature_idx]:X.indptr[feature_idx + 1]] column_data = np.zeros(shape=X.shape[0], dtype=X.dtype) column_data[:len(column_nnz_data)] = column_nnz_data else: column_data = X[:, feature_idx] quantiles.append(np.nanpercentile(column_data, self.quantile_range)) quantiles = np.transpose(quantiles) self.scale_ = quantiles[1] - quantiles[0] <DeepExtract> if np.isscalar(self.scale_): if self.scale_ == 0.0: self.scale_ = 1.0 self.scale_ = self.scale_ elif isinstance(self.scale_, np.ndarray): if constant_mask is None: constant_mask = self.scale_ < 10 * np.finfo(self.scale_.dtype).eps if False: self.scale_ = self.scale_.copy() self.scale_[constant_mask] = 1.0 self.scale_ = self.scale_ </DeepExtract> if self.unit_variance: adjust = stats.norm.ppf(q_max / 100.0) - stats.norm.ppf(q_min / 100.0) self.scale_ = self.scale_ / adjust else: self.scale_ = None return self
def test_raises_value_error_if_sample_weights_greater_than_1d(): n_sampless = [2, 3] n_featuress = [3, 2] rng = np.random.RandomState(42) for (n_samples, n_features) in zip(n_sampless, n_featuress): X = rng.randn(n_samples, n_features) y = rng.randn(n_samples) sample_weights_OK = rng.randn(n_samples) ** 2 + 1 sample_weights_OK_1 = 1.0 sample_weights_OK_2 = 2.0 sample_weights_not_OK = sample_weights_OK[:, np.newaxis] sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :] ridge = Ridge(alpha=1) ridge.fit(X, y, sample_weights_OK) ridge.fit(X, y, sample_weights_OK_1) ridge.fit(X, y, sample_weights_OK_2) def fit_ridge_not_ok(): ridge.fit(X, y, sample_weights_not_OK) def fit_ridge_not_ok_2(): ridge.fit(X, y, sample_weights_not_OK_2) err_msg = 'Sample weights must be 1D array or scalar' with pytest.raises(ValueError, match=err_msg): ridge.fit(X, y, sample_weights_not_OK) err_msg = 'Sample weights must be 1D array or scalar' with pytest.raises(ValueError, match=err_msg): ridge.fit(X, y, sample_weights_not_OK_2) </DeepExtract>
def test_raises_value_error_if_sample_weights_greater_than_1d(): n_sampless = [2, 3] n_featuress = [3, 2] rng = np.random.RandomState(42) for (n_samples, n_features) in zip(n_sampless, n_featuress): X = rng.randn(n_samples, n_features) y = rng.randn(n_samples) sample_weights_OK = rng.randn(n_samples) ** 2 + 1 sample_weights_OK_1 = 1.0 sample_weights_OK_2 = 2.0 sample_weights_not_OK = sample_weights_OK[:, np.newaxis] sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :] ridge = Ridge(alpha=1) ridge.fit(X, y, sample_weights_OK) ridge.fit(X, y, sample_weights_OK_1) ridge.fit(X, y, sample_weights_OK_2) def fit_ridge_not_ok(): ridge.fit(X, y, sample_weights_not_OK) def fit_ridge_not_ok_2(): ridge.fit(X, y, sample_weights_not_OK_2) err_msg = 'Sample weights must be 1D array or scalar' with pytest.raises(ValueError, match=err_msg): <DeepExtract> ridge.fit(X, y, sample_weights_not_OK) </DeepExtract> err_msg = 'Sample weights must be 1D array or scalar' with pytest.raises(ValueError, match=err_msg): <DeepExtract> ridge.fit(X, y, sample_weights_not_OK_2) </DeepExtract>
@validate_params({'y_true': ['array-like', 'sparse matrix'], 'y_pred': ['array-like', 'sparse matrix'], 'beta': [Interval(Real, 0.0, None, closed='both')], 'labels': ['array-like', None], 'pos_label': [Real, str, 'boolean', None], 'average': [StrOptions({'micro', 'macro', 'samples', 'weighted', 'binary'}), None], 'warn_for': [list, tuple, set], 'sample_weight': ['array-like', None], 'zero_division': [Options(Real, {0.0, 1.0, np.nan}), StrOptions({'warn'})]}) def precision_recall_fscore_support(y_true, y_pred, *, beta=1.0, labels=None, pos_label=1, average=None, warn_for=('precision', 'recall', 'f-score'), sample_weight=None, zero_division='warn'): """Compute precision, recall, F-measure and support for each class. The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label a negative sample as positive. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The F-beta score can be interpreted as a weighted harmonic mean of the precision and recall, where an F-beta score reaches its best value at 1 and worst score at 0. The F-beta score weights recall more than precision by a factor of ``beta``. ``beta == 1.0`` means recall and precision are equally important. The support is the number of occurrences of each class in ``y_true``. If ``pos_label is None`` and in binary classification, this function returns the average precision, recall and F-measure if ``average`` is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. beta : float, default=1.0 The strength of recall versus precision in the F-score. labels : array-like, default=None The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. pos_label : int, float, bool or str, default=1 The class to report if ``average='binary'`` and the data is binary. If the data are multiclass or multilabel, this will be ignored; setting ``labels=[pos_label]`` and ``average != 'binary'`` will report scores for that label only. average : {'binary', 'micro', 'macro', 'samples', 'weighted'}, default=None If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). warn_for : list, tuple or set, for internal use This determines which warnings will be made in the case that this function is being used to return only one of its metrics. sample_weight : array-like of shape (n_samples,), default=None Sample weights. zero_division : {"warn", 0.0, 1.0, np.nan}, default="warn" Sets the value to return when there is a zero division: - recall: when there are no positive labels - precision: when there are no positive predictions - f-score: both Notes: - If set to "warn", this acts like 0, but a warning is also raised. - If set to `np.nan`, such values will be excluded from the average. .. versionadded:: 1.3 `np.nan` option was added. Returns ------- precision : float (if average is not None) or array of float, shape = [n_unique_labels] Precision score. recall : float (if average is not None) or array of float, shape = [n_unique_labels] Recall score. fbeta_score : float (if average is not None) or array of float, shape = [n_unique_labels] F-beta score. support : None (if average is not None) or array of int, shape = [n_unique_labels] The number of occurrences of each label in ``y_true``. Notes ----- When ``true positive + false positive == 0``, precision is undefined. When ``true positive + false negative == 0``, recall is undefined. In such cases, by default the metric will be set to 0, as will f-score, and ``UndefinedMetricWarning`` will be raised. This behavior can be modified with ``zero_division``. References ---------- .. [1] `Wikipedia entry for the Precision and recall <https://en.wikipedia.org/wiki/Precision_and_recall>`_. .. [2] `Wikipedia entry for the F1-score <https://en.wikipedia.org/wiki/F1_score>`_. .. [3] `Discriminative Methods for Multi-labeled Classification Advances in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu Godbole, Sunita Sarawagi <http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`_. Examples -------- >>> import numpy as np >>> from sklearn.metrics import precision_recall_fscore_support >>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig']) >>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog']) >>> precision_recall_fscore_support(y_true, y_pred, average='macro') (0.22..., 0.33..., 0.26..., None) >>> precision_recall_fscore_support(y_true, y_pred, average='micro') (0.33..., 0.33..., 0.33..., None) >>> precision_recall_fscore_support(y_true, y_pred, average='weighted') (0.22..., 0.33..., 0.26..., None) It is possible to compute per-label precisions, recalls, F1-scores and supports instead of averaging: >>> precision_recall_fscore_support(y_true, y_pred, average=None, ... labels=['pig', 'dog', 'cat']) (array([0. , 0. , 0.66...]), array([0., 0., 1.]), array([0. , 0. , 0.8]), array([2, 2, 2])) """ if isinstance(zero_division, str) and zero_division == 'warn': zero_division_value = np.float64(0.0) elif isinstance(zero_division, (int, float)) and zero_division in [0, 1]: zero_division_value = np.float64(zero_division) else: zero_division_value = np.nan average_options = (None, 'micro', 'macro', 'weighted', 'samples') if average not in average_options and average != 'binary': raise ValueError('average has to be one of ' + str(average_options)) (y_type, y_true, y_pred) = _check_targets(y_true, y_pred) present_labels = unique_labels(y_true, y_pred).tolist() if average == 'binary': if y_type == 'binary': if pos_label not in present_labels: if len(present_labels) >= 2: raise ValueError(f'pos_label={pos_label} is not a valid label. It should be one of {present_labels}') labels = [pos_label] else: average_options = list(average_options) if y_type == 'multiclass': average_options.remove('samples') raise ValueError("Target is %s but average='binary'. Please choose another average setting, one of %r." % (y_type, average_options)) elif pos_label not in (None, 1): warnings.warn("Note that pos_label (set to %r) is ignored when average != 'binary' (got %r). You may use labels=[pos_label] to specify a single positive class." % (pos_label, average), UserWarning) labels = labels samplewise = average == 'samples' (y_type, y_true, y_pred) = _check_targets(y_true, y_pred) if sample_weight is not None: sample_weight = column_or_1d(sample_weight) check_consistent_length(y_true, y_pred, sample_weight) if y_type not in ('binary', 'multiclass', 'multilabel-indicator'): raise ValueError('%s is not supported' % y_type) present_labels = unique_labels(y_true, y_pred) if labels is None: labels = present_labels n_labels = None else: n_labels = len(labels) labels = np.hstack([labels, np.setdiff1d(present_labels, labels, assume_unique=True)]) if y_true.ndim == 1: if samplewise: raise ValueError('Samplewise metrics are not available outside of multilabel classification.') le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) y_pred = le.transform(y_pred) sorted_labels = le.classes_ tp = y_true == y_pred tp_bins = y_true[tp] if sample_weight is not None: tp_bins_weights = np.asarray(sample_weight)[tp] else: tp_bins_weights = None if len(tp_bins): tp_sum = np.bincount(tp_bins, weights=tp_bins_weights, minlength=len(labels)) else: true_sum = pred_sum = tp_sum = np.zeros(len(labels)) if len(y_pred): pred_sum = np.bincount(y_pred, weights=sample_weight, minlength=len(labels)) if len(y_true): true_sum = np.bincount(y_true, weights=sample_weight, minlength=len(labels)) indices = np.searchsorted(sorted_labels, labels[:n_labels]) tp_sum = tp_sum[indices] true_sum = true_sum[indices] pred_sum = pred_sum[indices] else: sum_axis = 1 if samplewise else 0 if not np.array_equal(labels, present_labels): if np.max(labels) > np.max(present_labels): raise ValueError('All labels must be in [0, n labels) for multilabel targets. Got %d > %d' % (np.max(labels), np.max(present_labels))) if np.min(labels) < 0: raise ValueError('All labels must be in [0, n labels) for multilabel targets. Got %d < 0' % np.min(labels)) if n_labels is not None: y_true = y_true[:, labels[:n_labels]] y_pred = y_pred[:, labels[:n_labels]] true_and_pred = y_true.multiply(y_pred) tp_sum = count_nonzero(true_and_pred, axis=sum_axis, sample_weight=sample_weight) pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight) true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight) fp = pred_sum - tp_sum fn = true_sum - tp_sum tp = tp_sum if sample_weight is not None and samplewise: sample_weight = np.array(sample_weight) tp = np.array(tp) fp = np.array(fp) fn = np.array(fn) tn = sample_weight * y_true.shape[1] - tp - fp - fn elif sample_weight is not None: tn = sum(sample_weight) - tp - fp - fn elif samplewise: tn = y_true.shape[1] - tp - fp - fn else: tn = y_true.shape[0] - tp - fp - fn MCM = np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2) tp_sum = MCM[:, 1, 1] pred_sum = tp_sum + MCM[:, 0, 1] true_sum = tp_sum + MCM[:, 1, 0] if average == 'micro': tp_sum = np.array([tp_sum.sum()]) pred_sum = np.array([pred_sum.sum()]) true_sum = np.array([true_sum.sum()]) beta2 = beta ** 2 mask = pred_sum == 0.0 pred_sum = pred_sum.copy() pred_sum[mask] = 1 result = tp_sum / pred_sum if not np.any(mask): precision = result zero_division_value = _check_zero_division(zero_division) result[mask] = zero_division_value if zero_division != 'warn' or 'precision' not in warn_for: precision = result if 'precision' in warn_for and 'f-score' in warn_for: msg_start = '{0} and F-score are'.format('precision'.title()) elif 'precision' in warn_for: msg_start = '{0} is'.format('precision'.title()) elif 'f-score' in warn_for: msg_start = 'F-score is' else: precision = result _warn_prf(average, 'predicted', msg_start, len(result)) precision = result mask = true_sum == 0.0 true_sum = true_sum.copy() true_sum[mask] = 1 result = tp_sum / true_sum if not np.any(mask): recall = result zero_division_value = _check_zero_division(zero_division) result[mask] = zero_division_value if zero_division != 'warn' or 'recall' not in warn_for: recall = result if 'recall' in warn_for and 'f-score' in warn_for: msg_start = '{0} and F-score are'.format('recall'.title()) elif 'recall' in warn_for: msg_start = '{0} is'.format('recall'.title()) elif 'f-score' in warn_for: msg_start = 'F-score is' else: recall = result _warn_prf(average, 'true', msg_start, len(result)) recall = result if zero_division == 'warn' and ('f-score',) == warn_for: if (pred_sum[true_sum == 0] == 0).any(): (axis0, axis1) = ('sample', 'label') if average == 'samples': (axis0, axis1) = (axis1, axis0) msg = '{0} ill-defined and being set to 0.0 {{0}} no {1} {2}s. Use `zero_division` parameter to control this behavior.'.format('F-score is', 'true nor predicted', axis0) if len(true_sum) == 1: msg = msg.format('due to') else: msg = msg.format('in {0}s with'.format(axis1)) warnings.warn(msg, UndefinedMetricWarning, stacklevel=2) if np.isposinf(beta): f_score = recall elif beta == 0: f_score = precision else: denom = beta2 * precision + recall mask = np.isclose(denom, 0) | np.isclose(pred_sum + true_sum, 0) denom[mask] = 1 f_score = (1 + beta2) * precision * recall / denom f_score[mask] = zero_division_value if average == 'weighted': weights = true_sum elif average == 'samples': weights = sample_weight else: weights = None if average is not None: assert average != 'binary' or len(precision) == 1 precision = _nanaverage(precision, weights=weights) recall = _nanaverage(recall, weights=weights) f_score = _nanaverage(f_score, weights=weights) true_sum = None return (precision, recall, f_score, true_sum)
@validate_params({'y_true': ['array-like', 'sparse matrix'], 'y_pred': ['array-like', 'sparse matrix'], 'beta': [Interval(Real, 0.0, None, closed='both')], 'labels': ['array-like', None], 'pos_label': [Real, str, 'boolean', None], 'average': [StrOptions({'micro', 'macro', 'samples', 'weighted', 'binary'}), None], 'warn_for': [list, tuple, set], 'sample_weight': ['array-like', None], 'zero_division': [Options(Real, {0.0, 1.0, np.nan}), StrOptions({'warn'})]}) def precision_recall_fscore_support(y_true, y_pred, *, beta=1.0, labels=None, pos_label=1, average=None, warn_for=('precision', 'recall', 'f-score'), sample_weight=None, zero_division='warn'): """Compute precision, recall, F-measure and support for each class. The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label a negative sample as positive. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The F-beta score can be interpreted as a weighted harmonic mean of the precision and recall, where an F-beta score reaches its best value at 1 and worst score at 0. The F-beta score weights recall more than precision by a factor of ``beta``. ``beta == 1.0`` means recall and precision are equally important. The support is the number of occurrences of each class in ``y_true``. If ``pos_label is None`` and in binary classification, this function returns the average precision, recall and F-measure if ``average`` is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. beta : float, default=1.0 The strength of recall versus precision in the F-score. labels : array-like, default=None The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. pos_label : int, float, bool or str, default=1 The class to report if ``average='binary'`` and the data is binary. If the data are multiclass or multilabel, this will be ignored; setting ``labels=[pos_label]`` and ``average != 'binary'`` will report scores for that label only. average : {'binary', 'micro', 'macro', 'samples', 'weighted'}, default=None If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). warn_for : list, tuple or set, for internal use This determines which warnings will be made in the case that this function is being used to return only one of its metrics. sample_weight : array-like of shape (n_samples,), default=None Sample weights. zero_division : {"warn", 0.0, 1.0, np.nan}, default="warn" Sets the value to return when there is a zero division: - recall: when there are no positive labels - precision: when there are no positive predictions - f-score: both Notes: - If set to "warn", this acts like 0, but a warning is also raised. - If set to `np.nan`, such values will be excluded from the average. .. versionadded:: 1.3 `np.nan` option was added. Returns ------- precision : float (if average is not None) or array of float, shape = [n_unique_labels] Precision score. recall : float (if average is not None) or array of float, shape = [n_unique_labels] Recall score. fbeta_score : float (if average is not None) or array of float, shape = [n_unique_labels] F-beta score. support : None (if average is not None) or array of int, shape = [n_unique_labels] The number of occurrences of each label in ``y_true``. Notes ----- When ``true positive + false positive == 0``, precision is undefined. When ``true positive + false negative == 0``, recall is undefined. In such cases, by default the metric will be set to 0, as will f-score, and ``UndefinedMetricWarning`` will be raised. This behavior can be modified with ``zero_division``. References ---------- .. [1] `Wikipedia entry for the Precision and recall <https://en.wikipedia.org/wiki/Precision_and_recall>`_. .. [2] `Wikipedia entry for the F1-score <https://en.wikipedia.org/wiki/F1_score>`_. .. [3] `Discriminative Methods for Multi-labeled Classification Advances in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu Godbole, Sunita Sarawagi <http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`_. Examples -------- >>> import numpy as np >>> from sklearn.metrics import precision_recall_fscore_support >>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig']) >>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog']) >>> precision_recall_fscore_support(y_true, y_pred, average='macro') (0.22..., 0.33..., 0.26..., None) >>> precision_recall_fscore_support(y_true, y_pred, average='micro') (0.33..., 0.33..., 0.33..., None) >>> precision_recall_fscore_support(y_true, y_pred, average='weighted') (0.22..., 0.33..., 0.26..., None) It is possible to compute per-label precisions, recalls, F1-scores and supports instead of averaging: >>> precision_recall_fscore_support(y_true, y_pred, average=None, ... labels=['pig', 'dog', 'cat']) (array([0. , 0. , 0.66...]), array([0., 0., 1.]), array([0. , 0. , 0.8]), array([2, 2, 2])) """ <DeepExtract> if isinstance(zero_division, str) and zero_division == 'warn': zero_division_value = np.float64(0.0) elif isinstance(zero_division, (int, float)) and zero_division in [0, 1]: zero_division_value = np.float64(zero_division) else: zero_division_value = np.nan </DeepExtract> <DeepExtract> average_options = (None, 'micro', 'macro', 'weighted', 'samples') if average not in average_options and average != 'binary': raise ValueError('average has to be one of ' + str(average_options)) (y_type, y_true, y_pred) = _check_targets(y_true, y_pred) present_labels = unique_labels(y_true, y_pred).tolist() if average == 'binary': if y_type == 'binary': if pos_label not in present_labels: if len(present_labels) >= 2: raise ValueError(f'pos_label={pos_label} is not a valid label. It should be one of {present_labels}') labels = [pos_label] else: average_options = list(average_options) if y_type == 'multiclass': average_options.remove('samples') raise ValueError("Target is %s but average='binary'. Please choose another average setting, one of %r." % (y_type, average_options)) elif pos_label not in (None, 1): warnings.warn("Note that pos_label (set to %r) is ignored when average != 'binary' (got %r). You may use labels=[pos_label] to specify a single positive class." % (pos_label, average), UserWarning) labels = labels </DeepExtract> samplewise = average == 'samples' <DeepExtract> (y_type, y_true, y_pred) = _check_targets(y_true, y_pred) if sample_weight is not None: sample_weight = column_or_1d(sample_weight) check_consistent_length(y_true, y_pred, sample_weight) if y_type not in ('binary', 'multiclass', 'multilabel-indicator'): raise ValueError('%s is not supported' % y_type) present_labels = unique_labels(y_true, y_pred) if labels is None: labels = present_labels n_labels = None else: n_labels = len(labels) labels = np.hstack([labels, np.setdiff1d(present_labels, labels, assume_unique=True)]) if y_true.ndim == 1: if samplewise: raise ValueError('Samplewise metrics are not available outside of multilabel classification.') le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) y_pred = le.transform(y_pred) sorted_labels = le.classes_ tp = y_true == y_pred tp_bins = y_true[tp] if sample_weight is not None: tp_bins_weights = np.asarray(sample_weight)[tp] else: tp_bins_weights = None if len(tp_bins): tp_sum = np.bincount(tp_bins, weights=tp_bins_weights, minlength=len(labels)) else: true_sum = pred_sum = tp_sum = np.zeros(len(labels)) if len(y_pred): pred_sum = np.bincount(y_pred, weights=sample_weight, minlength=len(labels)) if len(y_true): true_sum = np.bincount(y_true, weights=sample_weight, minlength=len(labels)) indices = np.searchsorted(sorted_labels, labels[:n_labels]) tp_sum = tp_sum[indices] true_sum = true_sum[indices] pred_sum = pred_sum[indices] else: sum_axis = 1 if samplewise else 0 if not np.array_equal(labels, present_labels): if np.max(labels) > np.max(present_labels): raise ValueError('All labels must be in [0, n labels) for multilabel targets. Got %d > %d' % (np.max(labels), np.max(present_labels))) if np.min(labels) < 0: raise ValueError('All labels must be in [0, n labels) for multilabel targets. Got %d < 0' % np.min(labels)) if n_labels is not None: y_true = y_true[:, labels[:n_labels]] y_pred = y_pred[:, labels[:n_labels]] true_and_pred = y_true.multiply(y_pred) tp_sum = count_nonzero(true_and_pred, axis=sum_axis, sample_weight=sample_weight) pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight) true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight) fp = pred_sum - tp_sum fn = true_sum - tp_sum tp = tp_sum if sample_weight is not None and samplewise: sample_weight = np.array(sample_weight) tp = np.array(tp) fp = np.array(fp) fn = np.array(fn) tn = sample_weight * y_true.shape[1] - tp - fp - fn elif sample_weight is not None: tn = sum(sample_weight) - tp - fp - fn elif samplewise: tn = y_true.shape[1] - tp - fp - fn else: tn = y_true.shape[0] - tp - fp - fn MCM = np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2) </DeepExtract> tp_sum = MCM[:, 1, 1] pred_sum = tp_sum + MCM[:, 0, 1] true_sum = tp_sum + MCM[:, 1, 0] if average == 'micro': tp_sum = np.array([tp_sum.sum()]) pred_sum = np.array([pred_sum.sum()]) true_sum = np.array([true_sum.sum()]) beta2 = beta ** 2 <DeepExtract> mask = pred_sum == 0.0 pred_sum = pred_sum.copy() pred_sum[mask] = 1 result = tp_sum / pred_sum if not np.any(mask): precision = result zero_division_value = _check_zero_division(zero_division) result[mask] = zero_division_value if zero_division != 'warn' or 'precision' not in warn_for: precision = result if 'precision' in warn_for and 'f-score' in warn_for: msg_start = '{0} and F-score are'.format('precision'.title()) elif 'precision' in warn_for: msg_start = '{0} is'.format('precision'.title()) elif 'f-score' in warn_for: msg_start = 'F-score is' else: precision = result _warn_prf(average, 'predicted', msg_start, len(result)) precision = result </DeepExtract> <DeepExtract> mask = true_sum == 0.0 true_sum = true_sum.copy() true_sum[mask] = 1 result = tp_sum / true_sum if not np.any(mask): recall = result zero_division_value = _check_zero_division(zero_division) result[mask] = zero_division_value if zero_division != 'warn' or 'recall' not in warn_for: recall = result if 'recall' in warn_for and 'f-score' in warn_for: msg_start = '{0} and F-score are'.format('recall'.title()) elif 'recall' in warn_for: msg_start = '{0} is'.format('recall'.title()) elif 'f-score' in warn_for: msg_start = 'F-score is' else: recall = result _warn_prf(average, 'true', msg_start, len(result)) recall = result </DeepExtract> if zero_division == 'warn' and ('f-score',) == warn_for: if (pred_sum[true_sum == 0] == 0).any(): <DeepExtract> (axis0, axis1) = ('sample', 'label') if average == 'samples': (axis0, axis1) = (axis1, axis0) msg = '{0} ill-defined and being set to 0.0 {{0}} no {1} {2}s. Use `zero_division` parameter to control this behavior.'.format('F-score is', 'true nor predicted', axis0) if len(true_sum) == 1: msg = msg.format('due to') else: msg = msg.format('in {0}s with'.format(axis1)) warnings.warn(msg, UndefinedMetricWarning, stacklevel=2) </DeepExtract> if np.isposinf(beta): f_score = recall elif beta == 0: f_score = precision else: denom = beta2 * precision + recall mask = np.isclose(denom, 0) | np.isclose(pred_sum + true_sum, 0) denom[mask] = 1 f_score = (1 + beta2) * precision * recall / denom f_score[mask] = zero_division_value if average == 'weighted': weights = true_sum elif average == 'samples': weights = sample_weight else: weights = None if average is not None: assert average != 'binary' or len(precision) == 1 precision = _nanaverage(precision, weights=weights) recall = _nanaverage(recall, weights=weights) f_score = _nanaverage(f_score, weights=weights) true_sum = None return (precision, recall, f_score, true_sum)
def test_lda_partial_fit(): rng = np.random.RandomState(0) n_components = 3 block = np.full((3, 3), n_components, dtype=int) blocks = [block] * n_components X = block_diag(*blocks) X = csr_matrix(X) (n_components, X) = (n_components, X) lda = LatentDirichletAllocation(n_components=n_components, learning_offset=10.0, total_samples=100, random_state=rng) for i in range(3): lda.partial_fit(X) correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for c in lda.components_: top_idx = set(c.argsort()[-3:][::-1]) assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_partial_fit(): rng = np.random.RandomState(0) <DeepExtract> n_components = 3 block = np.full((3, 3), n_components, dtype=int) blocks = [block] * n_components X = block_diag(*blocks) X = csr_matrix(X) (n_components, X) = (n_components, X) </DeepExtract> lda = LatentDirichletAllocation(n_components=n_components, learning_offset=10.0, total_samples=100, random_state=rng) for i in range(3): lda.partial_fit(X) correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for c in lda.components_: top_idx = set(c.argsort()[-3:][::-1]) assert tuple(sorted(top_idx)) in correct_idx_grps
def fit(self, X, y): """Fit linear model. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training data. y : ndarray of shape (n_samples,) Target values. Returns ------- self : returns an instance of self. Fitted `TheilSenRegressor` estimator. """ self._validate_params() random_state = check_random_state(self.random_state) (X, y) = self._validate_data(X, y, y_numeric=True) (n_samples, n_features) = X.shape n_subsamples = self.n_subsamples if self.fit_intercept: n_dim = n_features + 1 else: n_dim = n_features if n_subsamples is not None: if n_subsamples > n_samples: raise ValueError('Invalid parameter since n_subsamples > n_samples ({0} > {1}).'.format(n_subsamples, n_samples)) if n_samples >= n_features: if n_dim > n_subsamples: plus_1 = '+1' if self.fit_intercept else '' raise ValueError('Invalid parameter since n_features{0} > n_subsamples ({1} > {2}).'.format(plus_1, n_dim, n_subsamples)) elif n_subsamples != n_samples: raise ValueError('Invalid parameter since n_subsamples != n_samples ({0} != {1}) while n_samples < n_features.'.format(n_subsamples, n_samples)) else: n_subsamples = min(n_dim, n_samples) all_combinations = max(1, np.rint(binom(n_samples, n_subsamples))) n_subpopulation = int(min(self.max_subpopulation, all_combinations)) (n_subsamples, self.n_subpopulation_) = (n_subsamples, n_subpopulation) self.breakdown_ = 1 - (0.5 ** (1 / n_subsamples) * (n_samples - n_subsamples + 1) + n_subsamples - 1) / n_samples if self.verbose: print('Breakdown point: {0}'.format(self.breakdown_)) print('Number of samples: {0}'.format(n_samples)) tol_outliers = int(self.breakdown_ * n_samples) print('Tolerable outliers: {0}'.format(tol_outliers)) print('Number of subpopulations: {0}'.format(self.n_subpopulation_)) if np.rint(binom(n_samples, n_subsamples)) <= self.max_subpopulation: indices = list(combinations(range(n_samples), n_subsamples)) else: indices = [random_state.choice(n_samples, size=n_subsamples, replace=False) for _ in range(self.n_subpopulation_)] n_jobs = effective_n_jobs(self.n_jobs) index_list = np.array_split(indices, n_jobs) weights = Parallel(n_jobs=n_jobs, verbose=self.verbose)((delayed(_lstsq)(X, y, index_list[job], self.fit_intercept) for job in range(n_jobs))) weights = np.vstack(weights) if weights.shape[1] == 1: (self.n_iter_, coefs) = (1, np.median(weights.ravel(), keepdims=True)) self.tol **= 2 spatial_median_old = np.mean(weights, axis=0) for n_iter in range(self.max_iter): spatial_median = _modified_weiszfeld_step(weights, spatial_median_old) if np.sum((spatial_median_old - spatial_median) ** 2) < self.tol: break else: spatial_median_old = spatial_median else: warnings.warn('Maximum number of iterations {max_iter} reached in spatial median for TheilSen regressor.'.format(max_iter=self.max_iter), ConvergenceWarning) (self.n_iter_, coefs) = (n_iter, spatial_median) if self.fit_intercept: self.intercept_ = coefs[0] self.coef_ = coefs[1:] else: self.intercept_ = 0.0 self.coef_ = coefs return self
def fit(self, X, y): """Fit linear model. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training data. y : ndarray of shape (n_samples,) Target values. Returns ------- self : returns an instance of self. Fitted `TheilSenRegressor` estimator. """ self._validate_params() random_state = check_random_state(self.random_state) (X, y) = self._validate_data(X, y, y_numeric=True) (n_samples, n_features) = X.shape <DeepExtract> n_subsamples = self.n_subsamples if self.fit_intercept: n_dim = n_features + 1 else: n_dim = n_features if n_subsamples is not None: if n_subsamples > n_samples: raise ValueError('Invalid parameter since n_subsamples > n_samples ({0} > {1}).'.format(n_subsamples, n_samples)) if n_samples >= n_features: if n_dim > n_subsamples: plus_1 = '+1' if self.fit_intercept else '' raise ValueError('Invalid parameter since n_features{0} > n_subsamples ({1} > {2}).'.format(plus_1, n_dim, n_subsamples)) elif n_subsamples != n_samples: raise ValueError('Invalid parameter since n_subsamples != n_samples ({0} != {1}) while n_samples < n_features.'.format(n_subsamples, n_samples)) else: n_subsamples = min(n_dim, n_samples) all_combinations = max(1, np.rint(binom(n_samples, n_subsamples))) n_subpopulation = int(min(self.max_subpopulation, all_combinations)) (n_subsamples, self.n_subpopulation_) = (n_subsamples, n_subpopulation) </DeepExtract> <DeepExtract> self.breakdown_ = 1 - (0.5 ** (1 / n_subsamples) * (n_samples - n_subsamples + 1) + n_subsamples - 1) / n_samples </DeepExtract> if self.verbose: print('Breakdown point: {0}'.format(self.breakdown_)) print('Number of samples: {0}'.format(n_samples)) tol_outliers = int(self.breakdown_ * n_samples) print('Tolerable outliers: {0}'.format(tol_outliers)) print('Number of subpopulations: {0}'.format(self.n_subpopulation_)) if np.rint(binom(n_samples, n_subsamples)) <= self.max_subpopulation: indices = list(combinations(range(n_samples), n_subsamples)) else: indices = [random_state.choice(n_samples, size=n_subsamples, replace=False) for _ in range(self.n_subpopulation_)] n_jobs = effective_n_jobs(self.n_jobs) index_list = np.array_split(indices, n_jobs) weights = Parallel(n_jobs=n_jobs, verbose=self.verbose)((delayed(_lstsq)(X, y, index_list[job], self.fit_intercept) for job in range(n_jobs))) weights = np.vstack(weights) <DeepExtract> if weights.shape[1] == 1: (self.n_iter_, coefs) = (1, np.median(weights.ravel(), keepdims=True)) self.tol **= 2 spatial_median_old = np.mean(weights, axis=0) for n_iter in range(self.max_iter): spatial_median = _modified_weiszfeld_step(weights, spatial_median_old) if np.sum((spatial_median_old - spatial_median) ** 2) < self.tol: break else: spatial_median_old = spatial_median else: warnings.warn('Maximum number of iterations {max_iter} reached in spatial median for TheilSen regressor.'.format(max_iter=self.max_iter), ConvergenceWarning) (self.n_iter_, coefs) = (n_iter, spatial_median) </DeepExtract> if self.fit_intercept: self.intercept_ = coefs[0] self.coef_ = coefs[1:] else: self.intercept_ = 0.0 self.coef_ = coefs return self
@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None], 'multioutput': [StrOptions({'raw_values', 'uniform_average'}), 'array-like'], 'squared': ['boolean']}) def mean_squared_log_error(y_true, y_pred, *, sample_weight=None, multioutput='uniform_average', squared=True): """Mean squared logarithmic error regression loss. Read more in the :ref:`User Guide <mean_squared_log_error>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. multioutput : {'raw_values', 'uniform_average'} or array-like of shape (n_outputs,), default='uniform_average' Defines aggregating of multiple output values. Array-like value defines weights used to average errors. 'raw_values' : Returns a full set of errors when the input is of multioutput format. 'uniform_average' : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSLE (mean squared log error) value. If False returns RMSLE (root mean squared log error) value. Returns ------- loss : float or ndarray of floats A non-negative floating point value (the best value is 0.0), or an array of floating point values, one for each individual target. Examples -------- >>> from sklearn.metrics import mean_squared_log_error >>> y_true = [3, 5, 2.5, 7] >>> y_pred = [2.5, 5, 4, 8] >>> mean_squared_log_error(y_true, y_pred) 0.039... >>> mean_squared_log_error(y_true, y_pred, squared=False) 0.199... >>> y_true = [[0.5, 1], [1, 2], [7, 6]] >>> y_pred = [[0.5, 2], [1, 2.5], [8, 8]] >>> mean_squared_log_error(y_true, y_pred) 0.044... >>> mean_squared_log_error(y_true, y_pred, multioutput='raw_values') array([0.00462428, 0.08377444]) >>> mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7]) 0.060... """ check_consistent_length(y_true, y_pred) y_true = check_array(y_true, ensure_2d=False, dtype=dtype) y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype) if y_true.ndim == 1: y_true = y_true.reshape((-1, 1)) if y_pred.ndim == 1: y_pred = y_pred.reshape((-1, 1)) if y_true.shape[1] != y_pred.shape[1]: raise ValueError('y_true and y_pred have different number of output ({0}!={1})'.format(y_true.shape[1], y_pred.shape[1])) n_outputs = y_true.shape[1] allowed_multioutput_str = ('raw_values', 'uniform_average', 'variance_weighted') if isinstance(multioutput, str): if multioutput not in allowed_multioutput_str: raise ValueError("Allowed 'multioutput' string values are {}. You provided multioutput={!r}".format(allowed_multioutput_str, multioutput)) elif multioutput is not None: multioutput = check_array(multioutput, ensure_2d=False) if n_outputs == 1: raise ValueError('Custom weights are useful only in multi-output cases.') elif n_outputs != len(multioutput): raise ValueError('There must be equally many custom weights (%d) as outputs (%d).' % (len(multioutput), n_outputs)) y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput' (y_type, y_true, y_pred, multioutput) = (y_type, y_true, y_pred, multioutput) check_consistent_length(y_true, y_pred, sample_weight) if (y_true < 0).any() or (y_pred < 0).any(): raise ValueError('Mean Squared Logarithmic Error cannot be used when targets contain negative values.') return mean_squared_error(np.log1p(y_true), np.log1p(y_pred), sample_weight=sample_weight, multioutput=multioutput, squared=squared)
@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None], 'multioutput': [StrOptions({'raw_values', 'uniform_average'}), 'array-like'], 'squared': ['boolean']}) def mean_squared_log_error(y_true, y_pred, *, sample_weight=None, multioutput='uniform_average', squared=True): """Mean squared logarithmic error regression loss. Read more in the :ref:`User Guide <mean_squared_log_error>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. multioutput : {'raw_values', 'uniform_average'} or array-like of shape (n_outputs,), default='uniform_average' Defines aggregating of multiple output values. Array-like value defines weights used to average errors. 'raw_values' : Returns a full set of errors when the input is of multioutput format. 'uniform_average' : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSLE (mean squared log error) value. If False returns RMSLE (root mean squared log error) value. Returns ------- loss : float or ndarray of floats A non-negative floating point value (the best value is 0.0), or an array of floating point values, one for each individual target. Examples -------- >>> from sklearn.metrics import mean_squared_log_error >>> y_true = [3, 5, 2.5, 7] >>> y_pred = [2.5, 5, 4, 8] >>> mean_squared_log_error(y_true, y_pred) 0.039... >>> mean_squared_log_error(y_true, y_pred, squared=False) 0.199... >>> y_true = [[0.5, 1], [1, 2], [7, 6]] >>> y_pred = [[0.5, 2], [1, 2.5], [8, 8]] >>> mean_squared_log_error(y_true, y_pred) 0.044... >>> mean_squared_log_error(y_true, y_pred, multioutput='raw_values') array([0.00462428, 0.08377444]) >>> mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7]) 0.060... """ <DeepExtract> check_consistent_length(y_true, y_pred) y_true = check_array(y_true, ensure_2d=False, dtype=dtype) y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype) if y_true.ndim == 1: y_true = y_true.reshape((-1, 1)) if y_pred.ndim == 1: y_pred = y_pred.reshape((-1, 1)) if y_true.shape[1] != y_pred.shape[1]: raise ValueError('y_true and y_pred have different number of output ({0}!={1})'.format(y_true.shape[1], y_pred.shape[1])) n_outputs = y_true.shape[1] allowed_multioutput_str = ('raw_values', 'uniform_average', 'variance_weighted') if isinstance(multioutput, str): if multioutput not in allowed_multioutput_str: raise ValueError("Allowed 'multioutput' string values are {}. You provided multioutput={!r}".format(allowed_multioutput_str, multioutput)) elif multioutput is not None: multioutput = check_array(multioutput, ensure_2d=False) if n_outputs == 1: raise ValueError('Custom weights are useful only in multi-output cases.') elif n_outputs != len(multioutput): raise ValueError('There must be equally many custom weights (%d) as outputs (%d).' % (len(multioutput), n_outputs)) y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput' (y_type, y_true, y_pred, multioutput) = (y_type, y_true, y_pred, multioutput) </DeepExtract> check_consistent_length(y_true, y_pred, sample_weight) if (y_true < 0).any() or (y_pred < 0).any(): raise ValueError('Mean Squared Logarithmic Error cannot be used when targets contain negative values.') return mean_squared_error(np.log1p(y_true), np.log1p(y_pred), sample_weight=sample_weight, multioutput=multioutput, squared=squared)
def test_label_binarize_multiclass(): y = [0, 1, 2] classes = [0, 1, 2] pos_label = 2 neg_label = 0 expected = 2 * np.eye(3) for sparse_output in [True, False]: if (pos_label == 0 or neg_label != 0) and sparse_output: with pytest.raises(ValueError): label_binarize(y, classes=classes, neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output) continue binarized = label_binarize(y, classes=classes, neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output) assert_array_equal(toarray(binarized), expected) assert issparse(binarized) == sparse_output y_type = type_of_target(y) if y_type == 'multiclass': inversed = _inverse_binarize_multiclass(binarized, classes=classes) else: inversed = _inverse_binarize_thresholding(binarized, output_type=y_type, classes=classes, threshold=(neg_label + pos_label) / 2.0) assert_array_equal(toarray(inversed), toarray(y)) lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output) binarized = lb.fit_transform(y) assert_array_equal(toarray(binarized), expected) assert issparse(binarized) == sparse_output inverse_output = lb.inverse_transform(binarized) assert_array_equal(toarray(inverse_output), toarray(y)) assert issparse(inverse_output) == issparse(y) with pytest.raises(ValueError): label_binarize(y, classes=classes, neg_label=-1, pos_label=pos_label, sparse_output=True)
def test_label_binarize_multiclass(): y = [0, 1, 2] classes = [0, 1, 2] pos_label = 2 neg_label = 0 expected = 2 * np.eye(3) <DeepExtract> for sparse_output in [True, False]: if (pos_label == 0 or neg_label != 0) and sparse_output: with pytest.raises(ValueError): label_binarize(y, classes=classes, neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output) continue binarized = label_binarize(y, classes=classes, neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output) assert_array_equal(toarray(binarized), expected) assert issparse(binarized) == sparse_output y_type = type_of_target(y) if y_type == 'multiclass': inversed = _inverse_binarize_multiclass(binarized, classes=classes) else: inversed = _inverse_binarize_thresholding(binarized, output_type=y_type, classes=classes, threshold=(neg_label + pos_label) / 2.0) assert_array_equal(toarray(inversed), toarray(y)) lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output) binarized = lb.fit_transform(y) assert_array_equal(toarray(binarized), expected) assert issparse(binarized) == sparse_output inverse_output = lb.inverse_transform(binarized) assert_array_equal(toarray(inverse_output), toarray(y)) assert issparse(inverse_output) == issparse(y) </DeepExtract> with pytest.raises(ValueError): label_binarize(y, classes=classes, neg_label=-1, pos_label=pos_label, sparse_output=True)
@pytest.mark.parametrize('shuffle', [False, True]) @pytest.mark.parametrize('k', [4, 6, 7]) @pytest.mark.parametrize('kfold', [StratifiedKFold, StratifiedGroupKFold]) def test_stratified_kfold_label_invariance(k, shuffle, kfold): n_samples = 100 y = np.array([2] * int(0.1 * n_samples) + [0] * int(0.89 * n_samples) + [1] * int(0.01 * n_samples)) X = np.ones(len(y)) groups = np.arange(len(y)) def get_splits(y): random_state = None if not shuffle else 0 return [(list(train), list(test)) for (train, test) in kfold(k, random_state=random_state, shuffle=shuffle).split(X, y, groups=groups)] random_state = None if not shuffle else 0 splits_base = [(list(train), list(test)) for (train, test) in kfold(k, random_state=random_state, shuffle=shuffle).split(X, y, groups=groups)] for perm in permutations([0, 1, 2]): y_perm = np.take(perm, y) random_state = None if not shuffle else 0 splits_perm = [(list(train), list(test)) for (train, test) in kfold(k, random_state=random_state, shuffle=shuffle).split(X, y_perm, groups=groups)] assert splits_perm == splits_base
@pytest.mark.parametrize('shuffle', [False, True]) @pytest.mark.parametrize('k', [4, 6, 7]) @pytest.mark.parametrize('kfold', [StratifiedKFold, StratifiedGroupKFold]) def test_stratified_kfold_label_invariance(k, shuffle, kfold): n_samples = 100 y = np.array([2] * int(0.1 * n_samples) + [0] * int(0.89 * n_samples) + [1] * int(0.01 * n_samples)) X = np.ones(len(y)) groups = np.arange(len(y)) def get_splits(y): random_state = None if not shuffle else 0 return [(list(train), list(test)) for (train, test) in kfold(k, random_state=random_state, shuffle=shuffle).split(X, y, groups=groups)] <DeepExtract> random_state = None if not shuffle else 0 splits_base = [(list(train), list(test)) for (train, test) in kfold(k, random_state=random_state, shuffle=shuffle).split(X, y, groups=groups)] </DeepExtract> for perm in permutations([0, 1, 2]): y_perm = np.take(perm, y) <DeepExtract> random_state = None if not shuffle else 0 splits_perm = [(list(train), list(test)) for (train, test) in kfold(k, random_state=random_state, shuffle=shuffle).split(X, y_perm, groups=groups)] </DeepExtract> assert splits_perm == splits_base
@pytest.mark.parametrize('check_input', [True, False]) def test_enet_copy_X_True(check_input): random_state = np.random.RandomState(0) if n_targets > 1: w = random_state.randn(n_features, n_targets) else: w = random_state.randn(n_features) w[n_informative_features:] = 0.0 X = random_state.randn(n_samples, n_features) y = np.dot(X, w) X_test = random_state.randn(n_samples, n_features) y_test = np.dot(X_test, w) (X, y, _, _) = (X, y, X_test, y_test) X = X.copy(order='F') original_X = X.copy() enet = ElasticNet(copy_X=True) enet.fit(X, y, check_input=check_input) assert_array_equal(original_X, X)
@pytest.mark.parametrize('check_input', [True, False]) def test_enet_copy_X_True(check_input): <DeepExtract> random_state = np.random.RandomState(0) if n_targets > 1: w = random_state.randn(n_features, n_targets) else: w = random_state.randn(n_features) w[n_informative_features:] = 0.0 X = random_state.randn(n_samples, n_features) y = np.dot(X, w) X_test = random_state.randn(n_samples, n_features) y_test = np.dot(X_test, w) (X, y, _, _) = (X, y, X_test, y_test) </DeepExtract> X = X.copy(order='F') original_X = X.copy() enet = ElasticNet(copy_X=True) enet.fit(X, y, check_input=check_input) assert_array_equal(original_X, X)
def _fit_multiplicative_update(X, W, H, beta_loss='frobenius', max_iter=200, tol=0.0001, l1_reg_W=0, l1_reg_H=0, l2_reg_W=0, l2_reg_H=0, update_H=True, verbose=0): """Compute Non-negative Matrix Factorization with Multiplicative Update. The objective function is _beta_divergence(X, WH) and is minimized with an alternating minimization of W and H. Each minimization is done with a Multiplicative Update. Parameters ---------- X : array-like of shape (n_samples, n_features) Constant input matrix. W : array-like of shape (n_samples, n_components) Initial guess for the solution. H : array-like of shape (n_components, n_features) Initial guess for the solution. beta_loss : float or {'frobenius', 'kullback-leibler', 'itakura-saito'}, default='frobenius' String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}. Beta divergence to be minimized, measuring the distance between X and the dot product WH. Note that values different from 'frobenius' (or 2) and 'kullback-leibler' (or 1) lead to significantly slower fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input matrix X cannot contain zeros. max_iter : int, default=200 Number of iterations. tol : float, default=1e-4 Tolerance of the stopping condition. l1_reg_W : float, default=0. L1 regularization parameter for W. l1_reg_H : float, default=0. L1 regularization parameter for H. l2_reg_W : float, default=0. L2 regularization parameter for W. l2_reg_H : float, default=0. L2 regularization parameter for H. update_H : bool, default=True Set to True, both W and H will be estimated from initial guesses. Set to False, only W will be estimated. verbose : int, default=0 The verbosity level. Returns ------- W : ndarray of shape (n_samples, n_components) Solution to the non-negative least squares problem. H : ndarray of shape (n_components, n_features) Solution to the non-negative least squares problem. n_iter : int The number of iterations done by the algorithm. References ---------- Lee, D. D., & Seung, H., S. (2001). Algorithms for Non-negative Matrix Factorization. Adv. Neural Inform. Process. Syst.. 13. Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix factorization with the beta-divergence. Neural Computation, 23(9). """ start_time = time.time() beta_loss_map = {'frobenius': 2, 'kullback-leibler': 1, 'itakura-saito': 0} if isinstance(beta_loss, str): beta_loss = beta_loss_map[beta_loss] beta_loss = beta_loss if beta_loss < 1: gamma = 1.0 / (2.0 - beta_loss) elif beta_loss > 2: gamma = 1.0 / (beta_loss - 1.0) else: gamma = 1.0 beta_loss = _beta_loss_to_float(beta_loss) if not sp.issparse(X): X = np.atleast_2d(X) W = np.atleast_2d(W) H = np.atleast_2d(H) if beta_loss == 2: if sp.issparse(X): norm_X = np.dot(X.data, X.data) norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H) cross_prod = trace_dot(X * H.T, W) res = (norm_X + norm_WH - 2.0 * cross_prod) / 2.0 else: res = squared_norm(X - np.dot(W, H)) / 2.0 if True: error_at_init = np.sqrt(res * 2) else: error_at_init = res if sp.issparse(X): WH_data = _special_sparse_dot(W, H, X).data X_data = X.data else: WH = np.dot(W, H) WH_data = WH.ravel() X_data = X.ravel() indices = X_data > EPSILON WH_data = WH_data[indices] X_data = X_data[indices] WH_data[WH_data < EPSILON] = EPSILON if beta_loss == 1: sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1)) div = X_data / WH_data res = np.dot(X_data, np.log(div)) res += sum_WH - X_data.sum() elif beta_loss == 0: div = X_data / WH_data res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div)) else: if sp.issparse(X): sum_WH_beta = 0 for i in range(X.shape[1]): sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta_loss) else: sum_WH_beta = np.sum(WH ** beta_loss) sum_X_WH = np.dot(X_data, WH_data ** (beta_loss - 1)) res = (X_data ** beta_loss).sum() - beta_loss * sum_X_WH res += sum_WH_beta * (beta_loss - 1) res /= beta_loss * (beta_loss - 1) if True: res = max(res, 0) error_at_init = np.sqrt(2 * res) else: error_at_init = res previous_error = error_at_init (H_sum, HHt, XHt) = (None, None, None) for n_iter in range(1, max_iter + 1): if beta_loss == 2: if XHt is None: XHt = safe_sparse_dot(X, H.T) if update_H: numerator = XHt else: numerator = XHt.copy() if HHt is None: HHt = np.dot(H, H.T) denominator = np.dot(W, HHt) else: WH_safe_X = _special_sparse_dot(W, H, X) if sp.issparse(X): WH_safe_X_data = WH_safe_X.data X_data = X.data else: WH_safe_X_data = WH_safe_X X_data = X WH = WH_safe_X.copy() if beta_loss - 1.0 < 0: WH[WH < EPSILON] = EPSILON if beta_loss - 2.0 < 0: WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON if beta_loss == 1: np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data) elif beta_loss == 0: WH_safe_X_data **= -1 WH_safe_X_data **= 2 WH_safe_X_data *= X_data else: WH_safe_X_data **= beta_loss - 2 WH_safe_X_data *= X_data numerator = safe_sparse_dot(WH_safe_X, H.T) if beta_loss == 1: if H_sum is None: H_sum = np.sum(H, axis=1) denominator = H_sum[np.newaxis, :] else: if sp.issparse(X): WHHt = np.empty(W.shape) for i in range(X.shape[0]): WHi = np.dot(W[i, :], H) if beta_loss - 1 < 0: WHi[WHi < EPSILON] = EPSILON WHi **= beta_loss - 1 WHHt[i, :] = np.dot(WHi, H.T) else: WH **= beta_loss - 1 WHHt = np.dot(WH, H.T) denominator = WHHt if l1_reg_W > 0: denominator += l1_reg_W if l2_reg_W > 0: denominator = denominator + l2_reg_W * W denominator[denominator == 0] = EPSILON numerator /= denominator delta_W = numerator if gamma != 1: delta_W **= gamma W *= delta_W (W, H_sum, HHt, XHt) = (W, H_sum, HHt, XHt) if beta_loss < 1: W[W < np.finfo(np.float64).eps] = 0.0 if update_H: if beta_loss == 2: numerator = safe_sparse_dot(W.T, X) denominator = np.linalg.multi_dot([W.T, W, H]) else: WH_safe_X = _special_sparse_dot(W, H, X) if sp.issparse(X): WH_safe_X_data = WH_safe_X.data X_data = X.data else: WH_safe_X_data = WH_safe_X X_data = X WH = WH_safe_X.copy() if beta_loss - 1.0 < 0: WH[WH < EPSILON] = EPSILON if beta_loss - 2.0 < 0: WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON if beta_loss == 1: np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data) elif beta_loss == 0: WH_safe_X_data **= -1 WH_safe_X_data **= 2 WH_safe_X_data *= X_data else: WH_safe_X_data **= beta_loss - 2 WH_safe_X_data *= X_data numerator = safe_sparse_dot(W.T, WH_safe_X) if beta_loss == 1: W_sum = np.sum(W, axis=0) W_sum[W_sum == 0] = 1.0 denominator = W_sum[:, np.newaxis] else: if sp.issparse(X): WtWH = np.empty(H.shape) for i in range(X.shape[1]): WHi = np.dot(W, H[:, i]) if beta_loss - 1 < 0: WHi[WHi < EPSILON] = EPSILON WHi **= beta_loss - 1 WtWH[:, i] = np.dot(W.T, WHi) else: WH **= beta_loss - 1 WtWH = np.dot(W.T, WH) denominator = WtWH if l1_reg_H > 0: denominator += l1_reg_H if l2_reg_H > 0: denominator = denominator + l2_reg_H * H denominator[denominator == 0] = EPSILON if A is not None and B is not None: if gamma != 1: H **= 1 / gamma numerator *= H A *= rho B *= rho A += numerator B += denominator H = A / B if gamma != 1: H **= gamma else: delta_H = numerator delta_H /= denominator if gamma != 1: delta_H **= gamma H *= delta_H H = H (H_sum, HHt, XHt) = (None, None, None) if beta_loss <= 1: H[H < np.finfo(np.float64).eps] = 0.0 if tol > 0 and n_iter % 10 == 0: beta_loss = _beta_loss_to_float(beta_loss) if not sp.issparse(X): X = np.atleast_2d(X) W = np.atleast_2d(W) H = np.atleast_2d(H) if beta_loss == 2: if sp.issparse(X): norm_X = np.dot(X.data, X.data) norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H) cross_prod = trace_dot(X * H.T, W) res = (norm_X + norm_WH - 2.0 * cross_prod) / 2.0 else: res = squared_norm(X - np.dot(W, H)) / 2.0 if True: error = np.sqrt(res * 2) else: error = res if sp.issparse(X): WH_data = _special_sparse_dot(W, H, X).data X_data = X.data else: WH = np.dot(W, H) WH_data = WH.ravel() X_data = X.ravel() indices = X_data > EPSILON WH_data = WH_data[indices] X_data = X_data[indices] WH_data[WH_data < EPSILON] = EPSILON if beta_loss == 1: sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1)) div = X_data / WH_data res = np.dot(X_data, np.log(div)) res += sum_WH - X_data.sum() elif beta_loss == 0: div = X_data / WH_data res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div)) else: if sp.issparse(X): sum_WH_beta = 0 for i in range(X.shape[1]): sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta_loss) else: sum_WH_beta = np.sum(WH ** beta_loss) sum_X_WH = np.dot(X_data, WH_data ** (beta_loss - 1)) res = (X_data ** beta_loss).sum() - beta_loss * sum_X_WH res += sum_WH_beta * (beta_loss - 1) res /= beta_loss * (beta_loss - 1) if True: res = max(res, 0) error = np.sqrt(2 * res) else: error = res if verbose: iter_time = time.time() print('Epoch %02d reached after %.3f seconds, error: %f' % (n_iter, iter_time - start_time, error)) if (previous_error - error) / error_at_init < tol: break previous_error = error if verbose and (tol == 0 or n_iter % 10 != 0): end_time = time.time() print('Epoch %02d reached after %.3f seconds.' % (n_iter, end_time - start_time)) return (W, H, n_iter)
def _fit_multiplicative_update(X, W, H, beta_loss='frobenius', max_iter=200, tol=0.0001, l1_reg_W=0, l1_reg_H=0, l2_reg_W=0, l2_reg_H=0, update_H=True, verbose=0): """Compute Non-negative Matrix Factorization with Multiplicative Update. The objective function is _beta_divergence(X, WH) and is minimized with an alternating minimization of W and H. Each minimization is done with a Multiplicative Update. Parameters ---------- X : array-like of shape (n_samples, n_features) Constant input matrix. W : array-like of shape (n_samples, n_components) Initial guess for the solution. H : array-like of shape (n_components, n_features) Initial guess for the solution. beta_loss : float or {'frobenius', 'kullback-leibler', 'itakura-saito'}, default='frobenius' String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}. Beta divergence to be minimized, measuring the distance between X and the dot product WH. Note that values different from 'frobenius' (or 2) and 'kullback-leibler' (or 1) lead to significantly slower fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input matrix X cannot contain zeros. max_iter : int, default=200 Number of iterations. tol : float, default=1e-4 Tolerance of the stopping condition. l1_reg_W : float, default=0. L1 regularization parameter for W. l1_reg_H : float, default=0. L1 regularization parameter for H. l2_reg_W : float, default=0. L2 regularization parameter for W. l2_reg_H : float, default=0. L2 regularization parameter for H. update_H : bool, default=True Set to True, both W and H will be estimated from initial guesses. Set to False, only W will be estimated. verbose : int, default=0 The verbosity level. Returns ------- W : ndarray of shape (n_samples, n_components) Solution to the non-negative least squares problem. H : ndarray of shape (n_components, n_features) Solution to the non-negative least squares problem. n_iter : int The number of iterations done by the algorithm. References ---------- Lee, D. D., & Seung, H., S. (2001). Algorithms for Non-negative Matrix Factorization. Adv. Neural Inform. Process. Syst.. 13. Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix factorization with the beta-divergence. Neural Computation, 23(9). """ start_time = time.time() <DeepExtract> beta_loss_map = {'frobenius': 2, 'kullback-leibler': 1, 'itakura-saito': 0} if isinstance(beta_loss, str): beta_loss = beta_loss_map[beta_loss] beta_loss = beta_loss </DeepExtract> if beta_loss < 1: gamma = 1.0 / (2.0 - beta_loss) elif beta_loss > 2: gamma = 1.0 / (beta_loss - 1.0) else: gamma = 1.0 <DeepExtract> beta_loss = _beta_loss_to_float(beta_loss) if not sp.issparse(X): X = np.atleast_2d(X) W = np.atleast_2d(W) H = np.atleast_2d(H) if beta_loss == 2: if sp.issparse(X): norm_X = np.dot(X.data, X.data) norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H) cross_prod = trace_dot(X * H.T, W) res = (norm_X + norm_WH - 2.0 * cross_prod) / 2.0 else: res = squared_norm(X - np.dot(W, H)) / 2.0 if True: error_at_init = np.sqrt(res * 2) else: error_at_init = res if sp.issparse(X): WH_data = _special_sparse_dot(W, H, X).data X_data = X.data else: WH = np.dot(W, H) WH_data = WH.ravel() X_data = X.ravel() indices = X_data > EPSILON WH_data = WH_data[indices] X_data = X_data[indices] WH_data[WH_data < EPSILON] = EPSILON if beta_loss == 1: sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1)) div = X_data / WH_data res = np.dot(X_data, np.log(div)) res += sum_WH - X_data.sum() elif beta_loss == 0: div = X_data / WH_data res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div)) else: if sp.issparse(X): sum_WH_beta = 0 for i in range(X.shape[1]): sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta_loss) else: sum_WH_beta = np.sum(WH ** beta_loss) sum_X_WH = np.dot(X_data, WH_data ** (beta_loss - 1)) res = (X_data ** beta_loss).sum() - beta_loss * sum_X_WH res += sum_WH_beta * (beta_loss - 1) res /= beta_loss * (beta_loss - 1) if True: res = max(res, 0) error_at_init = np.sqrt(2 * res) else: error_at_init = res </DeepExtract> previous_error = error_at_init (H_sum, HHt, XHt) = (None, None, None) for n_iter in range(1, max_iter + 1): <DeepExtract> if beta_loss == 2: if XHt is None: XHt = safe_sparse_dot(X, H.T) if update_H: numerator = XHt else: numerator = XHt.copy() if HHt is None: HHt = np.dot(H, H.T) denominator = np.dot(W, HHt) else: WH_safe_X = _special_sparse_dot(W, H, X) if sp.issparse(X): WH_safe_X_data = WH_safe_X.data X_data = X.data else: WH_safe_X_data = WH_safe_X X_data = X WH = WH_safe_X.copy() if beta_loss - 1.0 < 0: WH[WH < EPSILON] = EPSILON if beta_loss - 2.0 < 0: WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON if beta_loss == 1: np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data) elif beta_loss == 0: WH_safe_X_data **= -1 WH_safe_X_data **= 2 WH_safe_X_data *= X_data else: WH_safe_X_data **= beta_loss - 2 WH_safe_X_data *= X_data numerator = safe_sparse_dot(WH_safe_X, H.T) if beta_loss == 1: if H_sum is None: H_sum = np.sum(H, axis=1) denominator = H_sum[np.newaxis, :] else: if sp.issparse(X): WHHt = np.empty(W.shape) for i in range(X.shape[0]): WHi = np.dot(W[i, :], H) if beta_loss - 1 < 0: WHi[WHi < EPSILON] = EPSILON WHi **= beta_loss - 1 WHHt[i, :] = np.dot(WHi, H.T) else: WH **= beta_loss - 1 WHHt = np.dot(WH, H.T) denominator = WHHt if l1_reg_W > 0: denominator += l1_reg_W if l2_reg_W > 0: denominator = denominator + l2_reg_W * W denominator[denominator == 0] = EPSILON numerator /= denominator delta_W = numerator if gamma != 1: delta_W **= gamma W *= delta_W (W, H_sum, HHt, XHt) = (W, H_sum, HHt, XHt) </DeepExtract> if beta_loss < 1: W[W < np.finfo(np.float64).eps] = 0.0 if update_H: <DeepExtract> if beta_loss == 2: numerator = safe_sparse_dot(W.T, X) denominator = np.linalg.multi_dot([W.T, W, H]) else: WH_safe_X = _special_sparse_dot(W, H, X) if sp.issparse(X): WH_safe_X_data = WH_safe_X.data X_data = X.data else: WH_safe_X_data = WH_safe_X X_data = X WH = WH_safe_X.copy() if beta_loss - 1.0 < 0: WH[WH < EPSILON] = EPSILON if beta_loss - 2.0 < 0: WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON if beta_loss == 1: np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data) elif beta_loss == 0: WH_safe_X_data **= -1 WH_safe_X_data **= 2 WH_safe_X_data *= X_data else: WH_safe_X_data **= beta_loss - 2 WH_safe_X_data *= X_data numerator = safe_sparse_dot(W.T, WH_safe_X) if beta_loss == 1: W_sum = np.sum(W, axis=0) W_sum[W_sum == 0] = 1.0 denominator = W_sum[:, np.newaxis] else: if sp.issparse(X): WtWH = np.empty(H.shape) for i in range(X.shape[1]): WHi = np.dot(W, H[:, i]) if beta_loss - 1 < 0: WHi[WHi < EPSILON] = EPSILON WHi **= beta_loss - 1 WtWH[:, i] = np.dot(W.T, WHi) else: WH **= beta_loss - 1 WtWH = np.dot(W.T, WH) denominator = WtWH if l1_reg_H > 0: denominator += l1_reg_H if l2_reg_H > 0: denominator = denominator + l2_reg_H * H denominator[denominator == 0] = EPSILON if A is not None and B is not None: if gamma != 1: H **= 1 / gamma numerator *= H A *= rho B *= rho A += numerator B += denominator H = A / B if gamma != 1: H **= gamma else: delta_H = numerator delta_H /= denominator if gamma != 1: delta_H **= gamma H *= delta_H H = H </DeepExtract> (H_sum, HHt, XHt) = (None, None, None) if beta_loss <= 1: H[H < np.finfo(np.float64).eps] = 0.0 if tol > 0 and n_iter % 10 == 0: <DeepExtract> beta_loss = _beta_loss_to_float(beta_loss) if not sp.issparse(X): X = np.atleast_2d(X) W = np.atleast_2d(W) H = np.atleast_2d(H) if beta_loss == 2: if sp.issparse(X): norm_X = np.dot(X.data, X.data) norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H) cross_prod = trace_dot(X * H.T, W) res = (norm_X + norm_WH - 2.0 * cross_prod) / 2.0 else: res = squared_norm(X - np.dot(W, H)) / 2.0 if True: error = np.sqrt(res * 2) else: error = res if sp.issparse(X): WH_data = _special_sparse_dot(W, H, X).data X_data = X.data else: WH = np.dot(W, H) WH_data = WH.ravel() X_data = X.ravel() indices = X_data > EPSILON WH_data = WH_data[indices] X_data = X_data[indices] WH_data[WH_data < EPSILON] = EPSILON if beta_loss == 1: sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1)) div = X_data / WH_data res = np.dot(X_data, np.log(div)) res += sum_WH - X_data.sum() elif beta_loss == 0: div = X_data / WH_data res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div)) else: if sp.issparse(X): sum_WH_beta = 0 for i in range(X.shape[1]): sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta_loss) else: sum_WH_beta = np.sum(WH ** beta_loss) sum_X_WH = np.dot(X_data, WH_data ** (beta_loss - 1)) res = (X_data ** beta_loss).sum() - beta_loss * sum_X_WH res += sum_WH_beta * (beta_loss - 1) res /= beta_loss * (beta_loss - 1) if True: res = max(res, 0) error = np.sqrt(2 * res) else: error = res </DeepExtract> if verbose: iter_time = time.time() print('Epoch %02d reached after %.3f seconds, error: %f' % (n_iter, iter_time - start_time, error)) if (previous_error - error) / error_at_init < tol: break previous_error = error if verbose and (tol == 0 or n_iter % 10 != 0): end_time = time.time() print('Epoch %02d reached after %.3f seconds.' % (n_iter, end_time - start_time)) return (W, H, n_iter)
def fit(self, X, y, sample_weight=None): """Build a boosted classifier/regressor from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. y : array-like of shape (n_samples,) The target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, the sample weights are initialized to 1 / n_samples. Returns ------- self : object Fitted estimator. """ self._validate_params() (X, y) = self._validate_data(X, y, accept_sparse=['csr', 'csc'], ensure_2d=True, allow_nd=True, dtype=None, y_numeric=is_regressor(self)) sample_weight = _check_sample_weight(sample_weight, X, np.float64, copy=True, only_non_negative=True) sample_weight /= sample_weight.sum() super()._validate_estimator(default=DecisionTreeClassifier(max_depth=1)) if self.algorithm == 'SAMME.R': if not hasattr(self.estimator_, 'predict_proba'): raise TypeError("AdaBoostClassifier with algorithm='SAMME.R' requires that the weak learner supports the calculation of class probabilities with a predict_proba method.\nPlease change the base estimator or set algorithm='SAMME' instead.") if not has_fit_parameter(self.estimator_, 'sample_weight'): raise ValueError(f"{self.estimator.__class__.__name__} doesn't support sample_weight.") self.estimators_ = [] self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64) self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64) random_state = check_random_state(self.random_state) epsilon = np.finfo(sample_weight.dtype).eps zero_weight_mask = sample_weight == 0.0 for iboost in range(self.n_estimators): sample_weight = np.clip(sample_weight, a_min=epsilon, a_max=None) sample_weight[zero_weight_mask] = 0.0 pass if sample_weight is None: break self.estimator_weights_[iboost] = estimator_weight self.estimator_errors_[iboost] = estimator_error if estimator_error == 0: break sample_weight_sum = np.sum(sample_weight) if not np.isfinite(sample_weight_sum): warnings.warn(f'Sample weights have reached infinite values, at iteration {iboost}, causing overflow. Iterations stopped. Try lowering the learning rate.', stacklevel=2) break if sample_weight_sum <= 0: break if iboost < self.n_estimators - 1: sample_weight /= sample_weight_sum return self
def fit(self, X, y, sample_weight=None): """Build a boosted classifier/regressor from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. y : array-like of shape (n_samples,) The target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, the sample weights are initialized to 1 / n_samples. Returns ------- self : object Fitted estimator. """ self._validate_params() (X, y) = self._validate_data(X, y, accept_sparse=['csr', 'csc'], ensure_2d=True, allow_nd=True, dtype=None, y_numeric=is_regressor(self)) sample_weight = _check_sample_weight(sample_weight, X, np.float64, copy=True, only_non_negative=True) sample_weight /= sample_weight.sum() <DeepExtract> super()._validate_estimator(default=DecisionTreeClassifier(max_depth=1)) if self.algorithm == 'SAMME.R': if not hasattr(self.estimator_, 'predict_proba'): raise TypeError("AdaBoostClassifier with algorithm='SAMME.R' requires that the weak learner supports the calculation of class probabilities with a predict_proba method.\nPlease change the base estimator or set algorithm='SAMME' instead.") if not has_fit_parameter(self.estimator_, 'sample_weight'): raise ValueError(f"{self.estimator.__class__.__name__} doesn't support sample_weight.") </DeepExtract> self.estimators_ = [] self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64) self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64) random_state = check_random_state(self.random_state) epsilon = np.finfo(sample_weight.dtype).eps zero_weight_mask = sample_weight == 0.0 for iboost in range(self.n_estimators): sample_weight = np.clip(sample_weight, a_min=epsilon, a_max=None) sample_weight[zero_weight_mask] = 0.0 <DeepExtract> pass </DeepExtract> if sample_weight is None: break self.estimator_weights_[iboost] = estimator_weight self.estimator_errors_[iboost] = estimator_error if estimator_error == 0: break sample_weight_sum = np.sum(sample_weight) if not np.isfinite(sample_weight_sum): warnings.warn(f'Sample weights have reached infinite values, at iteration {iboost}, causing overflow. Iterations stopped. Try lowering the learning rate.', stacklevel=2) break if sample_weight_sum <= 0: break if iboost < self.n_estimators - 1: sample_weight /= sample_weight_sum return self
def _fit_stages(X, y, raw_predictions, sample_weight, random_state, X_val, y_val, sample_weight_val, begin_at_stage=0, monitor=None): """Iteratively fits the stages. For each stage it computes the progress (OOB, train score) and delegates to ``_fit_stage``. Returns the number of stages fit; might differ from ``n_estimators`` due to early stopping. """ n_samples = X.shape[0] do_oob = self.subsample < 1.0 sample_mask = np.ones((n_samples,), dtype=bool) n_inbag = max(1, int(self.subsample * n_samples)) loss_ = self._loss if self.verbose: verbose_reporter = VerboseReporter(verbose=self.verbose) verbose_reporter.init(self, begin_at_stage) X_csc = csc_matrix(X) if issparse(X) else None X_csr = csr_matrix(X) if issparse(X) else None if self.n_iter_no_change is not None: loss_history = np.full(self.n_iter_no_change, np.inf) if False: X_val = self._validate_data(X_val, dtype=DTYPE, order='C', accept_sparse='csr', reset=False) raw_predictions = self._raw_predict_init(X_val) for i in range(self.estimators_.shape[0]): predict_stage(self.estimators_, i, X_val, self.learning_rate, raw_predictions) yield raw_predictions.copy() i = begin_at_stage for i in range(begin_at_stage, self.n_estimators): if do_oob: sample_mask = _random_sample_mask(n_samples, n_inbag, random_state) if i == 0: initial_loss = loss_(y[~sample_mask], raw_predictions[~sample_mask], sample_weight[~sample_mask]) assert sample_mask.dtype == bool loss = self._loss original_y = y raw_predictions_copy = raw_predictions.copy() for k in range(loss.K): if loss.is_multi_class: y = np.array(original_y == k, dtype=np.float64) residual = loss.negative_gradient(y, raw_predictions_copy, k=k, sample_weight=sample_weight) tree = DecisionTreeRegressor(criterion=self.criterion, splitter='best', max_depth=self.max_depth, min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, min_weight_fraction_leaf=self.min_weight_fraction_leaf, min_impurity_decrease=self.min_impurity_decrease, max_features=self.max_features, max_leaf_nodes=self.max_leaf_nodes, random_state=random_state, ccp_alpha=self.ccp_alpha) if self.subsample < 1.0: sample_weight = sample_weight * sample_mask.astype(np.float64) X = X_csr if X_csr is not None else X tree.fit(X, residual, sample_weight=sample_weight, check_input=False) loss.update_terminal_regions(tree.tree_, X, y, residual, raw_predictions, sample_weight, sample_mask, learning_rate=self.learning_rate, k=k) self.estimators_[i, k] = tree raw_predictions = raw_predictions if do_oob: self.train_score_[i] = loss_(y[sample_mask], raw_predictions[sample_mask], sample_weight[sample_mask]) self.oob_scores_[i] = loss_(y[~sample_mask], raw_predictions[~sample_mask], sample_weight[~sample_mask]) previous_loss = initial_loss if i == 0 else self.oob_scores_[i - 1] self.oob_improvement_[i] = previous_loss - self.oob_scores_[i] self.oob_score_ = self.oob_scores_[-1] else: self.train_score_[i] = loss_(y, raw_predictions, sample_weight) if self.verbose > 0: verbose_reporter.update(i, self) if monitor is not None: early_stopping = monitor(i, self, locals()) if early_stopping: break if self.n_iter_no_change is not None: validation_loss = loss_(y_val, next(y_val_pred_iter), sample_weight_val) if np.any(validation_loss + self.tol < loss_history): loss_history[i % len(loss_history)] = validation_loss else: break return i + 1
def _fit_stages(X, y, raw_predictions, sample_weight, random_state, X_val, y_val, sample_weight_val, begin_at_stage=0, monitor=None): """Iteratively fits the stages. For each stage it computes the progress (OOB, train score) and delegates to ``_fit_stage``. Returns the number of stages fit; might differ from ``n_estimators`` due to early stopping. """ n_samples = X.shape[0] do_oob = self.subsample < 1.0 sample_mask = np.ones((n_samples,), dtype=bool) n_inbag = max(1, int(self.subsample * n_samples)) loss_ = self._loss if self.verbose: verbose_reporter = VerboseReporter(verbose=self.verbose) verbose_reporter.init(self, begin_at_stage) X_csc = csc_matrix(X) if issparse(X) else None X_csr = csr_matrix(X) if issparse(X) else None if self.n_iter_no_change is not None: loss_history = np.full(self.n_iter_no_change, np.inf) <DeepExtract> if False: X_val = self._validate_data(X_val, dtype=DTYPE, order='C', accept_sparse='csr', reset=False) raw_predictions = self._raw_predict_init(X_val) for i in range(self.estimators_.shape[0]): predict_stage(self.estimators_, i, X_val, self.learning_rate, raw_predictions) yield raw_predictions.copy() </DeepExtract> i = begin_at_stage for i in range(begin_at_stage, self.n_estimators): if do_oob: sample_mask = _random_sample_mask(n_samples, n_inbag, random_state) if i == 0: initial_loss = loss_(y[~sample_mask], raw_predictions[~sample_mask], sample_weight[~sample_mask]) <DeepExtract> assert sample_mask.dtype == bool loss = self._loss original_y = y raw_predictions_copy = raw_predictions.copy() for k in range(loss.K): if loss.is_multi_class: y = np.array(original_y == k, dtype=np.float64) residual = loss.negative_gradient(y, raw_predictions_copy, k=k, sample_weight=sample_weight) tree = DecisionTreeRegressor(criterion=self.criterion, splitter='best', max_depth=self.max_depth, min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, min_weight_fraction_leaf=self.min_weight_fraction_leaf, min_impurity_decrease=self.min_impurity_decrease, max_features=self.max_features, max_leaf_nodes=self.max_leaf_nodes, random_state=random_state, ccp_alpha=self.ccp_alpha) if self.subsample < 1.0: sample_weight = sample_weight * sample_mask.astype(np.float64) X = X_csr if X_csr is not None else X tree.fit(X, residual, sample_weight=sample_weight, check_input=False) loss.update_terminal_regions(tree.tree_, X, y, residual, raw_predictions, sample_weight, sample_mask, learning_rate=self.learning_rate, k=k) self.estimators_[i, k] = tree raw_predictions = raw_predictions </DeepExtract> if do_oob: self.train_score_[i] = loss_(y[sample_mask], raw_predictions[sample_mask], sample_weight[sample_mask]) self.oob_scores_[i] = loss_(y[~sample_mask], raw_predictions[~sample_mask], sample_weight[~sample_mask]) previous_loss = initial_loss if i == 0 else self.oob_scores_[i - 1] self.oob_improvement_[i] = previous_loss - self.oob_scores_[i] self.oob_score_ = self.oob_scores_[-1] else: self.train_score_[i] = loss_(y, raw_predictions, sample_weight) if self.verbose > 0: verbose_reporter.update(i, self) if monitor is not None: early_stopping = monitor(i, self, locals()) if early_stopping: break if self.n_iter_no_change is not None: validation_loss = loss_(y_val, next(y_val_pred_iter), sample_weight_val) if np.any(validation_loss + self.tol < loss_history): loss_history[i % len(loss_history)] = validation_loss else: break return i + 1
def solve(self, X, y, sample_weight): """Solve the optimization problem. This is the main routine. Order of calls: self.setup() while iteration: self.update_gradient_hessian() self.inner_solve() self.line_search() self.check_convergence() self.finalize() Returns ------- coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) Solution of the optimization problem. """ (_, _, self.raw_prediction) = self.linear_loss.weight_intercept_raw(self.coef, X) self.loss_value = self.linear_loss.loss(coef=self.coef, X=X, y=y, sample_weight=sample_weight, l2_reg_strength=self.l2_reg_strength, n_threads=self.n_threads, raw_prediction=self.raw_prediction) self.iteration = 1 self.converged = False while self.iteration <= self.max_iter and (not self.converged): if self.verbose: print(f'Newton iter={self.iteration}') self.use_fallback_lbfgs_solve = False if self.use_fallback_lbfgs_solve: break (beta, sigma) = (0.5, 0.00048828125) eps = 16 * np.finfo(self.loss_value.dtype).eps t = 1 armijo_term = sigma * self.gradient_times_newton (_, _, raw_prediction_newton) = self.linear_loss.weight_intercept_raw(self.coef_newton, X) self.coef_old = self.coef self.loss_value_old = self.loss_value self.gradient_old = self.gradient sum_abs_grad_old = -1 is_verbose = self.verbose >= 2 if is_verbose: print(' Backtracking Line Search') print(f' eps=10 * finfo.eps={eps}') for i in range(21): self.coef = self.coef_old + t * self.coef_newton raw = self.raw_prediction + t * raw_prediction_newton (self.loss_value, self.gradient) = self.linear_loss.loss_gradient(coef=self.coef, X=X, y=y, sample_weight=sample_weight, l2_reg_strength=self.l2_reg_strength, n_threads=self.n_threads, raw_prediction=raw) loss_improvement = self.loss_value - self.loss_value_old check = loss_improvement <= t * armijo_term if is_verbose: print(f' line search iteration={i + 1}, step size={t}\n check loss improvement <= armijo term: {loss_improvement} <= {t * armijo_term} {check}') if check: break tiny_loss = np.abs(self.loss_value_old * eps) check = np.abs(loss_improvement) <= tiny_loss if is_verbose: print(f' check loss |improvement| <= eps * |loss_old|: {np.abs(loss_improvement)} <= {tiny_loss} {check}') if check: if sum_abs_grad_old < 0: sum_abs_grad_old = scipy.linalg.norm(self.gradient_old, ord=1) sum_abs_grad = scipy.linalg.norm(self.gradient, ord=1) check = sum_abs_grad < sum_abs_grad_old if is_verbose: print(f' check sum(|gradient|) < sum(|gradient_old|): {sum_abs_grad} < {sum_abs_grad_old} {check}') if check: break t *= beta else: warnings.warn(f'Line search of Newton solver {self.__class__.__name__} at iteration #{self.iteration} did no converge after 21 line search refinement iterations. It will now resort to lbfgs instead.', ConvergenceWarning) if self.verbose: print(' Line search did not converge and resorts to lbfgs instead.') self.use_fallback_lbfgs_solve = True return self.raw_prediction = raw if self.use_fallback_lbfgs_solve: break if self.verbose: print(' Check Convergence') check = np.max(np.abs(self.gradient)) if self.verbose: print(f' 1. max |gradient| {check} <= {self.tol}') if check > self.tol: return d2 = self.coef_newton @ self.hessian @ self.coef_newton if self.verbose: print(f' 2. Newton decrement {0.5 * d2} <= {self.tol}') if 0.5 * d2 > self.tol: return if self.verbose: loss_value = self.linear_loss.loss(coef=self.coef, X=X, y=y, sample_weight=sample_weight, l2_reg_strength=self.l2_reg_strength, n_threads=self.n_threads) print(f' Solver did converge at loss = {loss_value}.') self.converged = True self.iteration += 1 if not self.converged: if self.use_fallback_lbfgs_solve: opt_res = scipy.optimize.minimize(self.linear_loss.loss_gradient, self.coef, method='L-BFGS-B', jac=True, options={'maxiter': self.max_iter, 'maxls': 50, 'iprint': self.verbose - 1, 'gtol': self.tol, 'ftol': 64 * np.finfo(np.float64).eps}, args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads)) self.n_iter_ = _check_optimize_result('lbfgs', opt_res) self.coef = opt_res.x self.converged = opt_res.status == 0 else: warnings.warn(f'Newton solver did not converge after {self.iteration - 1} iterations.', ConvergenceWarning) self.iteration -= 1 pass return self.coef
def solve(self, X, y, sample_weight): """Solve the optimization problem. This is the main routine. Order of calls: self.setup() while iteration: self.update_gradient_hessian() self.inner_solve() self.line_search() self.check_convergence() self.finalize() Returns ------- coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) Solution of the optimization problem. """ <DeepExtract> (_, _, self.raw_prediction) = self.linear_loss.weight_intercept_raw(self.coef, X) self.loss_value = self.linear_loss.loss(coef=self.coef, X=X, y=y, sample_weight=sample_weight, l2_reg_strength=self.l2_reg_strength, n_threads=self.n_threads, raw_prediction=self.raw_prediction) </DeepExtract> self.iteration = 1 self.converged = False while self.iteration <= self.max_iter and (not self.converged): if self.verbose: print(f'Newton iter={self.iteration}') self.use_fallback_lbfgs_solve = False <DeepExtract> </DeepExtract> <DeepExtract> </DeepExtract> if self.use_fallback_lbfgs_solve: break <DeepExtract> (beta, sigma) = (0.5, 0.00048828125) eps = 16 * np.finfo(self.loss_value.dtype).eps t = 1 armijo_term = sigma * self.gradient_times_newton (_, _, raw_prediction_newton) = self.linear_loss.weight_intercept_raw(self.coef_newton, X) self.coef_old = self.coef self.loss_value_old = self.loss_value self.gradient_old = self.gradient sum_abs_grad_old = -1 is_verbose = self.verbose >= 2 if is_verbose: print(' Backtracking Line Search') print(f' eps=10 * finfo.eps={eps}') for i in range(21): self.coef = self.coef_old + t * self.coef_newton raw = self.raw_prediction + t * raw_prediction_newton (self.loss_value, self.gradient) = self.linear_loss.loss_gradient(coef=self.coef, X=X, y=y, sample_weight=sample_weight, l2_reg_strength=self.l2_reg_strength, n_threads=self.n_threads, raw_prediction=raw) loss_improvement = self.loss_value - self.loss_value_old check = loss_improvement <= t * armijo_term if is_verbose: print(f' line search iteration={i + 1}, step size={t}\n check loss improvement <= armijo term: {loss_improvement} <= {t * armijo_term} {check}') if check: break tiny_loss = np.abs(self.loss_value_old * eps) check = np.abs(loss_improvement) <= tiny_loss if is_verbose: print(f' check loss |improvement| <= eps * |loss_old|: {np.abs(loss_improvement)} <= {tiny_loss} {check}') if check: if sum_abs_grad_old < 0: sum_abs_grad_old = scipy.linalg.norm(self.gradient_old, ord=1) sum_abs_grad = scipy.linalg.norm(self.gradient, ord=1) check = sum_abs_grad < sum_abs_grad_old if is_verbose: print(f' check sum(|gradient|) < sum(|gradient_old|): {sum_abs_grad} < {sum_abs_grad_old} {check}') if check: break t *= beta else: warnings.warn(f'Line search of Newton solver {self.__class__.__name__} at iteration #{self.iteration} did no converge after 21 line search refinement iterations. It will now resort to lbfgs instead.', ConvergenceWarning) if self.verbose: print(' Line search did not converge and resorts to lbfgs instead.') self.use_fallback_lbfgs_solve = True return self.raw_prediction = raw </DeepExtract> if self.use_fallback_lbfgs_solve: break <DeepExtract> if self.verbose: print(' Check Convergence') check = np.max(np.abs(self.gradient)) if self.verbose: print(f' 1. max |gradient| {check} <= {self.tol}') if check > self.tol: return d2 = self.coef_newton @ self.hessian @ self.coef_newton if self.verbose: print(f' 2. Newton decrement {0.5 * d2} <= {self.tol}') if 0.5 * d2 > self.tol: return if self.verbose: loss_value = self.linear_loss.loss(coef=self.coef, X=X, y=y, sample_weight=sample_weight, l2_reg_strength=self.l2_reg_strength, n_threads=self.n_threads) print(f' Solver did converge at loss = {loss_value}.') self.converged = True </DeepExtract> self.iteration += 1 if not self.converged: if self.use_fallback_lbfgs_solve: <DeepExtract> opt_res = scipy.optimize.minimize(self.linear_loss.loss_gradient, self.coef, method='L-BFGS-B', jac=True, options={'maxiter': self.max_iter, 'maxls': 50, 'iprint': self.verbose - 1, 'gtol': self.tol, 'ftol': 64 * np.finfo(np.float64).eps}, args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads)) self.n_iter_ = _check_optimize_result('lbfgs', opt_res) self.coef = opt_res.x self.converged = opt_res.status == 0 </DeepExtract> else: warnings.warn(f'Newton solver did not converge after {self.iteration - 1} iterations.', ConvergenceWarning) self.iteration -= 1 <DeepExtract> pass </DeepExtract> return self.coef
def _sparse_fit(X, strategy, missing_values, fill_value): """Fit the transformer on sparse data.""" missing_mask = _get_mask(X, missing_values) mask_data = missing_mask.data n_implicit_zeros = X.shape[0] - np.diff(X.indptr) statistics = np.empty(X.shape[1]) if strategy == 'constant': statistics.fill(fill_value) else: for i in range(X.shape[1]): column = X.data[X.indptr[i]:X.indptr[i + 1]] mask_column = mask_data[X.indptr[i]:X.indptr[i + 1]] column = column[~mask_column] mask_zeros = _get_mask(column, 0) column = column[~mask_zeros] n_explicit_zeros = mask_zeros.sum() n_zeros = n_implicit_zeros[i] + n_explicit_zeros if len(column) == 0 and self.keep_empty_features: statistics[i] = 0 elif strategy == 'mean': s = column.size + n_zeros statistics[i] = np.nan if s == 0 else column.sum() / s elif strategy == 'median': statistics[i] = _get_median(column, n_zeros) elif strategy == 'most_frequent': if column.size > 0: if column.dtype == object: counter = Counter(column) most_frequent_count = counter.most_common(1)[0][1] most_frequent_value = min((value for (value, count) in counter.items() if count == most_frequent_count)) else: mode = _mode(column) most_frequent_value = mode[0][0] most_frequent_count = mode[1][0] else: most_frequent_value = 0 most_frequent_count = 0 if most_frequent_count == 0 and n_zeros == 0: statistics[i] = np.nan elif most_frequent_count < n_zeros: statistics[i] = 0 elif most_frequent_count > n_zeros: statistics[i] = most_frequent_value elif most_frequent_count == n_zeros: statistics[i] = min(most_frequent_value, 0) super()._fit_indicator(missing_mask) return statistics
def _sparse_fit(X, strategy, missing_values, fill_value): """Fit the transformer on sparse data.""" missing_mask = _get_mask(X, missing_values) mask_data = missing_mask.data n_implicit_zeros = X.shape[0] - np.diff(X.indptr) statistics = np.empty(X.shape[1]) if strategy == 'constant': statistics.fill(fill_value) else: for i in range(X.shape[1]): column = X.data[X.indptr[i]:X.indptr[i + 1]] mask_column = mask_data[X.indptr[i]:X.indptr[i + 1]] column = column[~mask_column] mask_zeros = _get_mask(column, 0) column = column[~mask_zeros] n_explicit_zeros = mask_zeros.sum() n_zeros = n_implicit_zeros[i] + n_explicit_zeros if len(column) == 0 and self.keep_empty_features: statistics[i] = 0 elif strategy == 'mean': s = column.size + n_zeros statistics[i] = np.nan if s == 0 else column.sum() / s elif strategy == 'median': statistics[i] = _get_median(column, n_zeros) elif strategy == 'most_frequent': <DeepExtract> if column.size > 0: if column.dtype == object: counter = Counter(column) most_frequent_count = counter.most_common(1)[0][1] most_frequent_value = min((value for (value, count) in counter.items() if count == most_frequent_count)) else: mode = _mode(column) most_frequent_value = mode[0][0] most_frequent_count = mode[1][0] else: most_frequent_value = 0 most_frequent_count = 0 if most_frequent_count == 0 and n_zeros == 0: statistics[i] = np.nan elif most_frequent_count < n_zeros: statistics[i] = 0 elif most_frequent_count > n_zeros: statistics[i] = most_frequent_value elif most_frequent_count == n_zeros: statistics[i] = min(most_frequent_value, 0) </DeepExtract> super()._fit_indicator(missing_mask) return statistics
def test_warm_start_multitask_lasso(): random_state = np.random.RandomState(0) if n_targets > 1: w = random_state.randn(n_features, n_targets) else: w = random_state.randn(n_features) w[n_informative_features:] = 0.0 X = random_state.randn(n_samples, n_features) y = np.dot(X, w) X_test = random_state.randn(n_samples, n_features) y_test = np.dot(X_test, w) (X, y, X_test, y_test) = (X, y, X_test, y_test) Y = np.c_[y, y] clf = MultiTaskLasso(alpha=0.1, max_iter=5, warm_start=True) ignore_warnings(clf.fit)(X, Y) ignore_warnings(clf.fit)(X, Y) clf2 = MultiTaskLasso(alpha=0.1, max_iter=10) ignore_warnings(clf2.fit)(X, Y) assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_warm_start_multitask_lasso(): <DeepExtract> random_state = np.random.RandomState(0) if n_targets > 1: w = random_state.randn(n_features, n_targets) else: w = random_state.randn(n_features) w[n_informative_features:] = 0.0 X = random_state.randn(n_samples, n_features) y = np.dot(X, w) X_test = random_state.randn(n_samples, n_features) y_test = np.dot(X_test, w) (X, y, X_test, y_test) = (X, y, X_test, y_test) </DeepExtract> Y = np.c_[y, y] clf = MultiTaskLasso(alpha=0.1, max_iter=5, warm_start=True) ignore_warnings(clf.fit)(X, Y) ignore_warnings(clf.fit)(X, Y) clf2 = MultiTaskLasso(alpha=0.1, max_iter=10) ignore_warnings(clf2.fit)(X, Y) assert_array_almost_equal(clf2.coef_, clf.coef_)
@pytest.mark.parametrize('dataset_constructor', [make_dense_dataset_32, make_dense_dataset_64, make_sparse_dataset_32, make_sparse_dataset_64]) def test_seq_dataset_basic_iteration(dataset_constructor): NUMBER_OF_RUNS = 5 dataset = dataset_constructor() for _ in range(NUMBER_OF_RUNS): (xi_, yi, swi, idx) = dataset._next_py() xi = sp.csr_matrix(xi_, shape=(1, X64.shape[1])) xi.eliminate_zeros() X_csr64[idx].eliminate_zeros() X_csr64[idx] = X_csr64[idx].astype(xi.dtype) assert xi.shape[0] == X_csr64[idx].shape[0] assert xi.shape[1] == X_csr64[idx].shape[1] assert_array_equal(xi.data, X_csr64[idx].data) assert_array_equal(xi.indices, X_csr64[idx].indices) assert_array_equal(xi.indptr, X_csr64[idx].indptr) assert yi == y64[idx] assert swi == sample_weight64[idx] (xi_, yi, swi, idx) = dataset._random_py() xi = sp.csr_matrix(xi_, shape=(1, X64.shape[1])) xi.eliminate_zeros() X_csr64[idx].eliminate_zeros() X_csr64[idx] = X_csr64[idx].astype(xi.dtype) assert xi.shape[0] == X_csr64[idx].shape[0] assert xi.shape[1] == X_csr64[idx].shape[1] assert_array_equal(xi.data, X_csr64[idx].data) assert_array_equal(xi.indices, X_csr64[idx].indices) assert_array_equal(xi.indptr, X_csr64[idx].indptr) assert yi == y64[idx] assert swi == sample_weight64[idx]
@pytest.mark.parametrize('dataset_constructor', [make_dense_dataset_32, make_dense_dataset_64, make_sparse_dataset_32, make_sparse_dataset_64]) def test_seq_dataset_basic_iteration(dataset_constructor): NUMBER_OF_RUNS = 5 dataset = dataset_constructor() for _ in range(NUMBER_OF_RUNS): (xi_, yi, swi, idx) = dataset._next_py() xi = sp.csr_matrix(xi_, shape=(1, X64.shape[1])) <DeepExtract> xi.eliminate_zeros() X_csr64[idx].eliminate_zeros() X_csr64[idx] = X_csr64[idx].astype(xi.dtype) assert xi.shape[0] == X_csr64[idx].shape[0] assert xi.shape[1] == X_csr64[idx].shape[1] assert_array_equal(xi.data, X_csr64[idx].data) assert_array_equal(xi.indices, X_csr64[idx].indices) assert_array_equal(xi.indptr, X_csr64[idx].indptr) </DeepExtract> assert yi == y64[idx] assert swi == sample_weight64[idx] (xi_, yi, swi, idx) = dataset._random_py() xi = sp.csr_matrix(xi_, shape=(1, X64.shape[1])) <DeepExtract> xi.eliminate_zeros() X_csr64[idx].eliminate_zeros() X_csr64[idx] = X_csr64[idx].astype(xi.dtype) assert xi.shape[0] == X_csr64[idx].shape[0] assert xi.shape[1] == X_csr64[idx].shape[1] assert_array_equal(xi.data, X_csr64[idx].data) assert_array_equal(xi.indices, X_csr64[idx].indices) assert_array_equal(xi.indptr, X_csr64[idx].indptr) </DeepExtract> assert yi == y64[idx] assert swi == sample_weight64[idx]
def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like of shape (n_samples, n_features) The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. y : None Ignored. Returns ------- self : object Fitted scaler. """ if hasattr(self, 'scale_'): del self.scale_ del self.min_ del self.n_samples_seen_ del self.data_min_ del self.data_max_ del self.data_range_ return self.partial_fit(X, y)
def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like of shape (n_samples, n_features) The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. y : None Ignored. Returns ------- self : object Fitted scaler. """ <DeepExtract> if hasattr(self, 'scale_'): del self.scale_ del self.min_ del self.n_samples_seen_ del self.data_min_ del self.data_max_ del self.data_range_ </DeepExtract> return self.partial_fit(X, y)
def dict_learning_online(X, n_components=2, *, alpha=1, n_iter='deprecated', max_iter=None, return_code=True, dict_init=None, callback=None, batch_size=256, verbose=False, shuffle=True, n_jobs=None, method='lars', iter_offset='deprecated', random_state=None, return_inner_stats='deprecated', inner_stats='deprecated', return_n_iter='deprecated', positive_dict=False, positive_code=False, method_max_iter=1000, tol=0.001, max_no_improvement=10): """Solve a dictionary learning matrix factorization problem online. Finds the best dictionary and the corresponding sparse code for approximating the data matrix X by solving:: (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 (U,V) with || V_k ||_2 = 1 for all 0 <= k < n_components where V is the dictionary and U is the sparse code. ||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm which is the sum of the absolute values of all the entries in the matrix. This is accomplished by repeatedly iterating over mini-batches by slicing the input data. Read more in the :ref:`User Guide <DictionaryLearning>`. Parameters ---------- X : ndarray of shape (n_samples, n_features) Data matrix. n_components : int or None, default=2 Number of dictionary atoms to extract. If None, then ``n_components`` is set to ``n_features``. alpha : float, default=1 Sparsity controlling parameter. n_iter : int, default=100 Number of mini-batch iterations to perform. .. deprecated:: 1.1 `n_iter` is deprecated in 1.1 and will be removed in 1.4. Use `max_iter` instead. max_iter : int, default=None Maximum number of iterations over the complete dataset before stopping independently of any early stopping criterion heuristics. If ``max_iter`` is not None, ``n_iter`` is ignored. .. versionadded:: 1.1 return_code : bool, default=True Whether to also return the code U or just the dictionary `V`. dict_init : ndarray of shape (n_components, n_features), default=None Initial values for the dictionary for warm restart scenarios. If `None`, the initial values for the dictionary are created with an SVD decomposition of the data via :func:`~sklearn.utils.randomized_svd`. callback : callable, default=None A callable that gets invoked at the end of each iteration. batch_size : int, default=256 The number of samples to take in each batch. .. versionchanged:: 1.3 The default value of `batch_size` changed from 3 to 256 in version 1.3. verbose : bool, default=False To control the verbosity of the procedure. shuffle : bool, default=True Whether to shuffle the data before splitting it in batches. n_jobs : int, default=None Number of parallel jobs to run. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. method : {'lars', 'cd'}, default='lars' * `'lars'`: uses the least angle regression method to solve the lasso problem (`linear_model.lars_path`); * `'cd'`: uses the coordinate descent method to compute the Lasso solution (`linear_model.Lasso`). Lars will be faster if the estimated components are sparse. iter_offset : int, default=0 Number of previous iterations completed on the dictionary used for initialization. .. deprecated:: 1.1 `iter_offset` serves internal purpose only and will be removed in 1.4. random_state : int, RandomState instance or None, default=None Used for initializing the dictionary when ``dict_init`` is not specified, randomly shuffling the data when ``shuffle`` is set to ``True``, and updating the dictionary. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. return_inner_stats : bool, default=False Return the inner statistics A (dictionary covariance) and B (data approximation). Useful to restart the algorithm in an online setting. If `return_inner_stats` is `True`, `return_code` is ignored. .. deprecated:: 1.1 `return_inner_stats` serves internal purpose only and will be removed in 1.4. inner_stats : tuple of (A, B) ndarrays, default=None Inner sufficient statistics that are kept by the algorithm. Passing them at initialization is useful in online settings, to avoid losing the history of the evolution. `A` `(n_components, n_components)` is the dictionary covariance matrix. `B` `(n_features, n_components)` is the data approximation matrix. .. deprecated:: 1.1 `inner_stats` serves internal purpose only and will be removed in 1.4. return_n_iter : bool, default=False Whether or not to return the number of iterations. .. deprecated:: 1.1 `return_n_iter` will be removed in 1.4 and n_iter will never be returned. positive_dict : bool, default=False Whether to enforce positivity when finding the dictionary. .. versionadded:: 0.20 positive_code : bool, default=False Whether to enforce positivity when finding the code. .. versionadded:: 0.20 method_max_iter : int, default=1000 Maximum number of iterations to perform when solving the lasso problem. .. versionadded:: 0.22 tol : float, default=1e-3 Control early stopping based on the norm of the differences in the dictionary between 2 steps. Used only if `max_iter` is not None. To disable early stopping based on changes in the dictionary, set `tol` to 0.0. .. versionadded:: 1.1 max_no_improvement : int, default=10 Control early stopping based on the consecutive number of mini batches that does not yield an improvement on the smoothed cost function. Used only if `max_iter` is not None. To disable convergence detection based on cost function, set `max_no_improvement` to None. .. versionadded:: 1.1 Returns ------- code : ndarray of shape (n_samples, n_components), The sparse code (only returned if `return_code=True`). dictionary : ndarray of shape (n_components, n_features), The solutions to the dictionary learning problem. n_iter : int Number of iterations run. Returned only if `return_n_iter` is set to `True`. See Also -------- dict_learning : Solve a dictionary learning matrix factorization problem. DictionaryLearning : Find a dictionary that sparsely encodes data. MiniBatchDictionaryLearning : A faster, less accurate, version of the dictionary learning algorithm. SparsePCA : Sparse Principal Components Analysis. MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. """ deps = (return_n_iter, return_inner_stats, iter_offset, inner_stats) if max_iter is not None and (not all((arg == 'deprecated' for arg in deps))): raise ValueError("The following arguments are incompatible with 'max_iter': return_n_iter, return_inner_stats, iter_offset, inner_stats") if iter_offset != 'deprecated': msg = f"'{'iter_offset'}' is deprecated in version 1.1 and will be removed in version 1.4." if additional_message: msg += f' {additional_message}' warnings.warn(msg, FutureWarning) iter_offset = iter_offset else: iter_offset = 0 if return_inner_stats != 'deprecated': msg = f"'{'return_inner_stats'}' is deprecated in version 1.1 and will be removed in version 1.4." if 'From 1.4 inner_stats will never be returned.': msg += f" {'From 1.4 inner_stats will never be returned.'}" warnings.warn(msg, FutureWarning) return_inner_stats = return_inner_stats else: return_inner_stats = False if inner_stats != 'deprecated': msg = f"'{'inner_stats'}' is deprecated in version 1.1 and will be removed in version 1.4." if additional_message: msg += f' {additional_message}' warnings.warn(msg, FutureWarning) inner_stats = inner_stats else: inner_stats = None if return_n_iter != 'deprecated': msg = f"'{'return_n_iter'}' is deprecated in version 1.1 and will be removed in version 1.4." if "From 1.4 'n_iter' will never be returned. Refer to the 'n_iter_' and 'n_steps_' attributes of the MiniBatchDictionaryLearning object instead.": msg += f""" {"From 1.4 'n_iter' will never be returned. Refer to the 'n_iter_' and 'n_steps_' attributes of the MiniBatchDictionaryLearning object instead."}""" warnings.warn(msg, FutureWarning) return_n_iter = return_n_iter else: return_n_iter = False if max_iter is not None: transform_algorithm = 'lasso_' + method est = MiniBatchDictionaryLearning(n_components=n_components, alpha=alpha, n_iter=n_iter, n_jobs=n_jobs, fit_algorithm=method, batch_size=batch_size, shuffle=shuffle, dict_init=dict_init, random_state=random_state, transform_algorithm=transform_algorithm, transform_alpha=alpha, positive_code=positive_code, positive_dict=positive_dict, transform_max_iter=method_max_iter, verbose=verbose, callback=callback, tol=tol, max_no_improvement=max_no_improvement).fit(X) if not return_code: return est.components_ else: code = est.transform(X) return (code, est.components_) if n_iter != 'deprecated': msg = f"'{'n_iter'}' is deprecated in version 1.1 and will be removed in version 1.4." if "Use 'max_iter' instead.": msg += f""" {"Use 'max_iter' instead."}""" warnings.warn(msg, FutureWarning) n_iter = n_iter else: n_iter = 100 if n_components is None: n_components = X.shape[1] if method not in ('lars', 'cd'): raise ValueError('Coding method not supported as a fit algorithm.') if positive_code and method in ['omp', 'lars']: raise ValueError("Positive constraint not supported for '{}' coding method.".format(method)) method = 'lasso_' + method t0 = time.time() (n_samples, n_features) = X.shape alpha = float(alpha) random_state = check_random_state(random_state) if dict_init is not None: dictionary = dict_init else: (_, S, dictionary) = randomized_svd(X, n_components, random_state=random_state) dictionary = S[:, np.newaxis] * dictionary r = len(dictionary) if n_components <= r: dictionary = dictionary[:n_components, :] else: dictionary = np.r_[dictionary, np.zeros((n_components - r, dictionary.shape[1]), dtype=dictionary.dtype)] if verbose == 1: print('[dict_learning]', end=' ') if shuffle: X_train = X.copy() random_state.shuffle(X_train) else: X_train = X X_train = check_array(X_train, order='C', dtype=[np.float64, np.float32], copy=False) dictionary = check_array(dictionary, order='F', dtype=X_train.dtype, copy=False) dictionary = np.require(dictionary, requirements='W') batches = gen_batches(n_samples, batch_size) batches = itertools.cycle(batches) if inner_stats is None: A = np.zeros((n_components, n_components), dtype=X_train.dtype) B = np.zeros((n_features, n_components), dtype=X_train.dtype) else: A = inner_stats[0].copy() B = inner_stats[1].copy() ii = iter_offset - 1 for (ii, batch) in zip(range(iter_offset, iter_offset + n_iter), batches): this_X = X_train[batch] dt = time.time() - t0 if verbose == 1: sys.stdout.write('.') sys.stdout.flush() elif verbose: if verbose > 10 or ii % ceil(100.0 / verbose) == 0: print('Iteration % 3i (elapsed time: % 3is, % 4.1fmn)' % (ii, dt, dt / 60)) if False: if method == 'lasso_cd': dictionary = check_array(dictionary, order='C', dtype=[np.float64, np.float32]) this_X = check_array(this_X, order='C', dtype=[np.float64, np.float32]) else: dictionary = check_array(dictionary) this_X = check_array(this_X) if dictionary.shape[1] != this_X.shape[1]: raise ValueError('Dictionary and X have different numbers of features:dictionary.shape: {} X.shape{}'.format(dictionary.shape, this_X.shape)) _check_positive_coding(method, positive_code) this_code = _sparse_encode(this_X, dictionary, gram=gram, cov=cov, algorithm=method, n_nonzero_coefs=n_nonzero_coefs, alpha=alpha, copy_cov=copy_cov, init=init, max_iter=method_max_iter, n_jobs=n_jobs, verbose=verbose, positive=positive_code) if ii < batch_size - 1: theta = float((ii + 1) * batch_size) else: theta = float(batch_size ** 2 + ii + 1 - batch_size) beta = (theta + 1 - batch_size) / (theta + 1) A *= beta A += np.dot(this_code.T, this_code) B *= beta B += np.dot(this_X.T, this_code) (n_samples, n_components) = this_code.shape random_state = check_random_state(random_state) if A is None: A = this_code.T @ this_code if B is None: B = this_X.T @ this_code n_unused = 0 for k in range(n_components): if A[k, k] > 1e-06: dictionary[k] += (B[:, k] - A[k] @ dictionary) / A[k, k] else: newd = this_X[random_state.choice(n_samples)] noise_level = 0.01 * (newd.std() or 1) noise = random_state.normal(0, noise_level, size=len(newd)) dictionary[k] = newd + noise this_code[:, k] = 0 n_unused += 1 if positive_dict: np.clip(dictionary[k], 0, None, out=dictionary[k]) dictionary[k] /= max(linalg.norm(dictionary[k]), 1) if verbose and n_unused > 0: print(f'{n_unused} unused atoms resampled.') if callback is not None: callback(locals()) if return_inner_stats: if return_n_iter: return (dictionary, (A, B), ii - iter_offset + 1) else: return (dictionary, (A, B)) if return_code: if verbose > 1: print('Learning code...', end=' ') elif verbose == 1: print('|', end=' ') if False: if method == 'lasso_cd': dictionary = check_array(dictionary, order='C', dtype=[np.float64, np.float32]) X = check_array(X, order='C', dtype=[np.float64, np.float32]) else: dictionary = check_array(dictionary) X = check_array(X) if dictionary.shape[1] != X.shape[1]: raise ValueError('Dictionary and X have different numbers of features:dictionary.shape: {} X.shape{}'.format(dictionary.shape, X.shape)) _check_positive_coding(method, positive_code) code = _sparse_encode(X, dictionary, gram=gram, cov=cov, algorithm=method, n_nonzero_coefs=n_nonzero_coefs, alpha=alpha, copy_cov=copy_cov, init=init, max_iter=method_max_iter, n_jobs=n_jobs, verbose=verbose, positive=positive_code) if verbose > 1: dt = time.time() - t0 print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60)) if return_n_iter: return (code, dictionary, ii - iter_offset + 1) else: return (code, dictionary) if return_n_iter: return (dictionary, ii - iter_offset + 1) else: return dictionary
def dict_learning_online(X, n_components=2, *, alpha=1, n_iter='deprecated', max_iter=None, return_code=True, dict_init=None, callback=None, batch_size=256, verbose=False, shuffle=True, n_jobs=None, method='lars', iter_offset='deprecated', random_state=None, return_inner_stats='deprecated', inner_stats='deprecated', return_n_iter='deprecated', positive_dict=False, positive_code=False, method_max_iter=1000, tol=0.001, max_no_improvement=10): """Solve a dictionary learning matrix factorization problem online. Finds the best dictionary and the corresponding sparse code for approximating the data matrix X by solving:: (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1 (U,V) with || V_k ||_2 = 1 for all 0 <= k < n_components where V is the dictionary and U is the sparse code. ||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm which is the sum of the absolute values of all the entries in the matrix. This is accomplished by repeatedly iterating over mini-batches by slicing the input data. Read more in the :ref:`User Guide <DictionaryLearning>`. Parameters ---------- X : ndarray of shape (n_samples, n_features) Data matrix. n_components : int or None, default=2 Number of dictionary atoms to extract. If None, then ``n_components`` is set to ``n_features``. alpha : float, default=1 Sparsity controlling parameter. n_iter : int, default=100 Number of mini-batch iterations to perform. .. deprecated:: 1.1 `n_iter` is deprecated in 1.1 and will be removed in 1.4. Use `max_iter` instead. max_iter : int, default=None Maximum number of iterations over the complete dataset before stopping independently of any early stopping criterion heuristics. If ``max_iter`` is not None, ``n_iter`` is ignored. .. versionadded:: 1.1 return_code : bool, default=True Whether to also return the code U or just the dictionary `V`. dict_init : ndarray of shape (n_components, n_features), default=None Initial values for the dictionary for warm restart scenarios. If `None`, the initial values for the dictionary are created with an SVD decomposition of the data via :func:`~sklearn.utils.randomized_svd`. callback : callable, default=None A callable that gets invoked at the end of each iteration. batch_size : int, default=256 The number of samples to take in each batch. .. versionchanged:: 1.3 The default value of `batch_size` changed from 3 to 256 in version 1.3. verbose : bool, default=False To control the verbosity of the procedure. shuffle : bool, default=True Whether to shuffle the data before splitting it in batches. n_jobs : int, default=None Number of parallel jobs to run. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. method : {'lars', 'cd'}, default='lars' * `'lars'`: uses the least angle regression method to solve the lasso problem (`linear_model.lars_path`); * `'cd'`: uses the coordinate descent method to compute the Lasso solution (`linear_model.Lasso`). Lars will be faster if the estimated components are sparse. iter_offset : int, default=0 Number of previous iterations completed on the dictionary used for initialization. .. deprecated:: 1.1 `iter_offset` serves internal purpose only and will be removed in 1.4. random_state : int, RandomState instance or None, default=None Used for initializing the dictionary when ``dict_init`` is not specified, randomly shuffling the data when ``shuffle`` is set to ``True``, and updating the dictionary. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. return_inner_stats : bool, default=False Return the inner statistics A (dictionary covariance) and B (data approximation). Useful to restart the algorithm in an online setting. If `return_inner_stats` is `True`, `return_code` is ignored. .. deprecated:: 1.1 `return_inner_stats` serves internal purpose only and will be removed in 1.4. inner_stats : tuple of (A, B) ndarrays, default=None Inner sufficient statistics that are kept by the algorithm. Passing them at initialization is useful in online settings, to avoid losing the history of the evolution. `A` `(n_components, n_components)` is the dictionary covariance matrix. `B` `(n_features, n_components)` is the data approximation matrix. .. deprecated:: 1.1 `inner_stats` serves internal purpose only and will be removed in 1.4. return_n_iter : bool, default=False Whether or not to return the number of iterations. .. deprecated:: 1.1 `return_n_iter` will be removed in 1.4 and n_iter will never be returned. positive_dict : bool, default=False Whether to enforce positivity when finding the dictionary. .. versionadded:: 0.20 positive_code : bool, default=False Whether to enforce positivity when finding the code. .. versionadded:: 0.20 method_max_iter : int, default=1000 Maximum number of iterations to perform when solving the lasso problem. .. versionadded:: 0.22 tol : float, default=1e-3 Control early stopping based on the norm of the differences in the dictionary between 2 steps. Used only if `max_iter` is not None. To disable early stopping based on changes in the dictionary, set `tol` to 0.0. .. versionadded:: 1.1 max_no_improvement : int, default=10 Control early stopping based on the consecutive number of mini batches that does not yield an improvement on the smoothed cost function. Used only if `max_iter` is not None. To disable convergence detection based on cost function, set `max_no_improvement` to None. .. versionadded:: 1.1 Returns ------- code : ndarray of shape (n_samples, n_components), The sparse code (only returned if `return_code=True`). dictionary : ndarray of shape (n_components, n_features), The solutions to the dictionary learning problem. n_iter : int Number of iterations run. Returned only if `return_n_iter` is set to `True`. See Also -------- dict_learning : Solve a dictionary learning matrix factorization problem. DictionaryLearning : Find a dictionary that sparsely encodes data. MiniBatchDictionaryLearning : A faster, less accurate, version of the dictionary learning algorithm. SparsePCA : Sparse Principal Components Analysis. MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis. """ deps = (return_n_iter, return_inner_stats, iter_offset, inner_stats) if max_iter is not None and (not all((arg == 'deprecated' for arg in deps))): raise ValueError("The following arguments are incompatible with 'max_iter': return_n_iter, return_inner_stats, iter_offset, inner_stats") <DeepExtract> if iter_offset != 'deprecated': msg = f"'{'iter_offset'}' is deprecated in version 1.1 and will be removed in version 1.4." if additional_message: msg += f' {additional_message}' warnings.warn(msg, FutureWarning) iter_offset = iter_offset else: iter_offset = 0 </DeepExtract> <DeepExtract> if return_inner_stats != 'deprecated': msg = f"'{'return_inner_stats'}' is deprecated in version 1.1 and will be removed in version 1.4." if 'From 1.4 inner_stats will never be returned.': msg += f" {'From 1.4 inner_stats will never be returned.'}" warnings.warn(msg, FutureWarning) return_inner_stats = return_inner_stats else: return_inner_stats = False </DeepExtract> <DeepExtract> if inner_stats != 'deprecated': msg = f"'{'inner_stats'}' is deprecated in version 1.1 and will be removed in version 1.4." if additional_message: msg += f' {additional_message}' warnings.warn(msg, FutureWarning) inner_stats = inner_stats else: inner_stats = None </DeepExtract> <DeepExtract> if return_n_iter != 'deprecated': msg = f"'{'return_n_iter'}' is deprecated in version 1.1 and will be removed in version 1.4." if "From 1.4 'n_iter' will never be returned. Refer to the 'n_iter_' and 'n_steps_' attributes of the MiniBatchDictionaryLearning object instead.": msg += f""" {"From 1.4 'n_iter' will never be returned. Refer to the 'n_iter_' and 'n_steps_' attributes of the MiniBatchDictionaryLearning object instead."}""" warnings.warn(msg, FutureWarning) return_n_iter = return_n_iter else: return_n_iter = False </DeepExtract> if max_iter is not None: transform_algorithm = 'lasso_' + method est = MiniBatchDictionaryLearning(n_components=n_components, alpha=alpha, n_iter=n_iter, n_jobs=n_jobs, fit_algorithm=method, batch_size=batch_size, shuffle=shuffle, dict_init=dict_init, random_state=random_state, transform_algorithm=transform_algorithm, transform_alpha=alpha, positive_code=positive_code, positive_dict=positive_dict, transform_max_iter=method_max_iter, verbose=verbose, callback=callback, tol=tol, max_no_improvement=max_no_improvement).fit(X) if not return_code: return est.components_ else: code = est.transform(X) return (code, est.components_) <DeepExtract> if n_iter != 'deprecated': msg = f"'{'n_iter'}' is deprecated in version 1.1 and will be removed in version 1.4." if "Use 'max_iter' instead.": msg += f""" {"Use 'max_iter' instead."}""" warnings.warn(msg, FutureWarning) n_iter = n_iter else: n_iter = 100 </DeepExtract> if n_components is None: n_components = X.shape[1] if method not in ('lars', 'cd'): raise ValueError('Coding method not supported as a fit algorithm.') <DeepExtract> if positive_code and method in ['omp', 'lars']: raise ValueError("Positive constraint not supported for '{}' coding method.".format(method)) </DeepExtract> method = 'lasso_' + method t0 = time.time() (n_samples, n_features) = X.shape alpha = float(alpha) random_state = check_random_state(random_state) if dict_init is not None: dictionary = dict_init else: (_, S, dictionary) = randomized_svd(X, n_components, random_state=random_state) dictionary = S[:, np.newaxis] * dictionary r = len(dictionary) if n_components <= r: dictionary = dictionary[:n_components, :] else: dictionary = np.r_[dictionary, np.zeros((n_components - r, dictionary.shape[1]), dtype=dictionary.dtype)] if verbose == 1: print('[dict_learning]', end=' ') if shuffle: X_train = X.copy() random_state.shuffle(X_train) else: X_train = X X_train = check_array(X_train, order='C', dtype=[np.float64, np.float32], copy=False) dictionary = check_array(dictionary, order='F', dtype=X_train.dtype, copy=False) dictionary = np.require(dictionary, requirements='W') batches = gen_batches(n_samples, batch_size) batches = itertools.cycle(batches) if inner_stats is None: A = np.zeros((n_components, n_components), dtype=X_train.dtype) B = np.zeros((n_features, n_components), dtype=X_train.dtype) else: A = inner_stats[0].copy() B = inner_stats[1].copy() ii = iter_offset - 1 for (ii, batch) in zip(range(iter_offset, iter_offset + n_iter), batches): this_X = X_train[batch] dt = time.time() - t0 if verbose == 1: sys.stdout.write('.') sys.stdout.flush() elif verbose: if verbose > 10 or ii % ceil(100.0 / verbose) == 0: print('Iteration % 3i (elapsed time: % 3is, % 4.1fmn)' % (ii, dt, dt / 60)) <DeepExtract> if False: if method == 'lasso_cd': dictionary = check_array(dictionary, order='C', dtype=[np.float64, np.float32]) this_X = check_array(this_X, order='C', dtype=[np.float64, np.float32]) else: dictionary = check_array(dictionary) this_X = check_array(this_X) if dictionary.shape[1] != this_X.shape[1]: raise ValueError('Dictionary and X have different numbers of features:dictionary.shape: {} X.shape{}'.format(dictionary.shape, this_X.shape)) _check_positive_coding(method, positive_code) this_code = _sparse_encode(this_X, dictionary, gram=gram, cov=cov, algorithm=method, n_nonzero_coefs=n_nonzero_coefs, alpha=alpha, copy_cov=copy_cov, init=init, max_iter=method_max_iter, n_jobs=n_jobs, verbose=verbose, positive=positive_code) </DeepExtract> if ii < batch_size - 1: theta = float((ii + 1) * batch_size) else: theta = float(batch_size ** 2 + ii + 1 - batch_size) beta = (theta + 1 - batch_size) / (theta + 1) A *= beta A += np.dot(this_code.T, this_code) B *= beta B += np.dot(this_X.T, this_code) <DeepExtract> (n_samples, n_components) = this_code.shape random_state = check_random_state(random_state) if A is None: A = this_code.T @ this_code if B is None: B = this_X.T @ this_code n_unused = 0 for k in range(n_components): if A[k, k] > 1e-06: dictionary[k] += (B[:, k] - A[k] @ dictionary) / A[k, k] else: newd = this_X[random_state.choice(n_samples)] noise_level = 0.01 * (newd.std() or 1) noise = random_state.normal(0, noise_level, size=len(newd)) dictionary[k] = newd + noise this_code[:, k] = 0 n_unused += 1 if positive_dict: np.clip(dictionary[k], 0, None, out=dictionary[k]) dictionary[k] /= max(linalg.norm(dictionary[k]), 1) if verbose and n_unused > 0: print(f'{n_unused} unused atoms resampled.') </DeepExtract> if callback is not None: callback(locals()) if return_inner_stats: if return_n_iter: return (dictionary, (A, B), ii - iter_offset + 1) else: return (dictionary, (A, B)) if return_code: if verbose > 1: print('Learning code...', end=' ') elif verbose == 1: print('|', end=' ') <DeepExtract> if False: if method == 'lasso_cd': dictionary = check_array(dictionary, order='C', dtype=[np.float64, np.float32]) X = check_array(X, order='C', dtype=[np.float64, np.float32]) else: dictionary = check_array(dictionary) X = check_array(X) if dictionary.shape[1] != X.shape[1]: raise ValueError('Dictionary and X have different numbers of features:dictionary.shape: {} X.shape{}'.format(dictionary.shape, X.shape)) _check_positive_coding(method, positive_code) code = _sparse_encode(X, dictionary, gram=gram, cov=cov, algorithm=method, n_nonzero_coefs=n_nonzero_coefs, alpha=alpha, copy_cov=copy_cov, init=init, max_iter=method_max_iter, n_jobs=n_jobs, verbose=verbose, positive=positive_code) </DeepExtract> if verbose > 1: dt = time.time() - t0 print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60)) if return_n_iter: return (code, dictionary, ii - iter_offset + 1) else: return (code, dictionary) if return_n_iter: return (dictionary, ii - iter_offset + 1) else: return dictionary
def test_transform_target_regressor_2d_transformer_multioutput(): X = friedman[0] y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T transformer = StandardScaler() regr = TransformedTargetRegressor(regressor=LinearRegression(), transformer=transformer) y_pred = regr.fit(X, y).predict(X) assert y.shape == y_pred.shape y_tran = regr.transformer_.transform(y) y_mean = np.mean(y, axis=0) y_std = np.std(y, axis=0) assert_allclose((y - y_mean) / y_std, y_tran) assert y.shape == y_pred.shape assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze()) lr = LinearRegression() transformer2 = clone(transformer) lr.fit(X, transformer2.fit_transform(y)) y_lr_pred = lr.predict(X) assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred)) assert_allclose(regr.regressor_.coef_, lr.coef_)
def test_transform_target_regressor_2d_transformer_multioutput(): X = friedman[0] y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T transformer = StandardScaler() regr = TransformedTargetRegressor(regressor=LinearRegression(), transformer=transformer) y_pred = regr.fit(X, y).predict(X) assert y.shape == y_pred.shape y_tran = regr.transformer_.transform(y) <DeepExtract> y_mean = np.mean(y, axis=0) y_std = np.std(y, axis=0) assert_allclose((y - y_mean) / y_std, y_tran) </DeepExtract> assert y.shape == y_pred.shape assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze()) lr = LinearRegression() transformer2 = clone(transformer) lr.fit(X, transformer2.fit_transform(y)) y_lr_pred = lr.predict(X) assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred)) assert_allclose(regr.regressor_.coef_, lr.coef_)
def predict(self, X): """Predict the first class seen in `classes_`. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data. Returns ------- preds : ndarray of shape (n_samples,) Predictions of the first class seens in `classes_`. """ if self.methods_to_check == 'all' or 'predict' in self.methods_to_check: if should_be_fitted: check_is_fitted(self) if self.check_X is not None: params = {} if self.check_X_params is None else self.check_X_params checked_X = self.check_X(X, **params) if isinstance(checked_X, (bool, np.bool_)): assert checked_X else: X = checked_X if y is not None and self.check_y is not None: params = {} if self.check_y_params is None else self.check_y_params checked_y = self.check_y(y, **params) if isinstance(checked_y, (bool, np.bool_)): assert checked_y else: y = checked_y (X, y) = (X, y) return self.classes_[np.zeros(_num_samples(X), dtype=int)]
def predict(self, X): """Predict the first class seen in `classes_`. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data. Returns ------- preds : ndarray of shape (n_samples,) Predictions of the first class seens in `classes_`. """ if self.methods_to_check == 'all' or 'predict' in self.methods_to_check: <DeepExtract> if should_be_fitted: check_is_fitted(self) if self.check_X is not None: params = {} if self.check_X_params is None else self.check_X_params checked_X = self.check_X(X, **params) if isinstance(checked_X, (bool, np.bool_)): assert checked_X else: X = checked_X if y is not None and self.check_y is not None: params = {} if self.check_y_params is None else self.check_y_params checked_y = self.check_y(y, **params) if isinstance(checked_y, (bool, np.bool_)): assert checked_y else: y = checked_y (X, y) = (X, y) </DeepExtract> return self.classes_[np.zeros(_num_samples(X), dtype=int)]
def fit(self, X, y=None): """Learn the idf vector (global term weights). Parameters ---------- X : sparse matrix of shape n_samples, n_features) A matrix of term/token counts. y : None This parameter is not needed to compute tf-idf. Returns ------- self : object Fitted transformer. """ self._validate_params() X = self._validate_data(X, accept_sparse=('csr', 'csc'), accept_large_sparse=not _IS_32BIT) if not sp.issparse(X): X = sp.csr_matrix(X) dtype = X.dtype if X.dtype in FLOAT_DTYPES else np.float64 if self.use_idf: (n_samples, n_features) = X.shape if sp.isspmatrix_csr(X): df = np.bincount(X.indices, minlength=X.shape[1]) else: df = np.diff(X.indptr) df = df.astype(dtype, copy=False) df += int(self.smooth_idf) n_samples += int(self.smooth_idf) idf = np.log(n_samples / df) + 1 self._idf_diag = sp.diags(idf, offsets=0, shape=(n_features, n_features), format='csr', dtype=dtype) return self
def fit(self, X, y=None): """Learn the idf vector (global term weights). Parameters ---------- X : sparse matrix of shape n_samples, n_features) A matrix of term/token counts. y : None This parameter is not needed to compute tf-idf. Returns ------- self : object Fitted transformer. """ self._validate_params() X = self._validate_data(X, accept_sparse=('csr', 'csc'), accept_large_sparse=not _IS_32BIT) if not sp.issparse(X): X = sp.csr_matrix(X) dtype = X.dtype if X.dtype in FLOAT_DTYPES else np.float64 if self.use_idf: (n_samples, n_features) = X.shape <DeepExtract> if sp.isspmatrix_csr(X): df = np.bincount(X.indices, minlength=X.shape[1]) else: df = np.diff(X.indptr) </DeepExtract> df = df.astype(dtype, copy=False) df += int(self.smooth_idf) n_samples += int(self.smooth_idf) idf = np.log(n_samples / df) + 1 self._idf_diag = sp.diags(idf, offsets=0, shape=(n_features, n_features), format='csr', dtype=dtype) return self
@ignore_warnings(category=FutureWarning) def check_transformers_unfitted_stateless(name, transformer): """Check that using transform without prior fitting doesn't raise a NotFittedError for stateless transformers. """ rng = np.random.RandomState(0) X = rng.uniform(size=(20, 5)) if '1darray' in _safe_tags(transformer, key='X_types'): X = X[:, 0] if _safe_tags(transformer, key='requires_positive_X'): X = X - X.min() if 'categorical' in _safe_tags(transformer, key='X_types'): X = (X - X.min()).astype(np.int32) if transformer.__class__.__name__ == 'SkewedChi2Sampler': X = X - X.min() if _is_pairwise_metric(transformer): X = pairwise_distances(X, metric='euclidean') elif _safe_tags(transformer, key='pairwise'): X = kernel(X, X) X = X transformer = clone(transformer) X_trans = transformer.transform(X) assert X_trans.shape[0] == X.shape[0]
@ignore_warnings(category=FutureWarning) def check_transformers_unfitted_stateless(name, transformer): """Check that using transform without prior fitting doesn't raise a NotFittedError for stateless transformers. """ rng = np.random.RandomState(0) X = rng.uniform(size=(20, 5)) <DeepExtract> if '1darray' in _safe_tags(transformer, key='X_types'): X = X[:, 0] if _safe_tags(transformer, key='requires_positive_X'): X = X - X.min() if 'categorical' in _safe_tags(transformer, key='X_types'): X = (X - X.min()).astype(np.int32) if transformer.__class__.__name__ == 'SkewedChi2Sampler': X = X - X.min() if _is_pairwise_metric(transformer): X = pairwise_distances(X, metric='euclidean') elif _safe_tags(transformer, key='pairwise'): X = kernel(X, X) X = X </DeepExtract> transformer = clone(transformer) X_trans = transformer.transform(X) assert X_trans.shape[0] == X.shape[0]
@ignore_warnings def check_fit_score_takes_y(name, estimator_orig): rnd = np.random.RandomState(0) n_samples = 30 X = rnd.uniform(size=(n_samples, 3)) if '1darray' in _safe_tags(estimator_orig, key='X_types'): X = X[:, 0] if _safe_tags(estimator_orig, key='requires_positive_X'): X = X - X.min() if 'categorical' in _safe_tags(estimator_orig, key='X_types'): X = (X - X.min()).astype(np.int32) if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler': X = X - X.min() if _is_pairwise_metric(estimator_orig): X = pairwise_distances(X, metric='euclidean') elif _safe_tags(estimator_orig, key='pairwise'): X = kernel(X, X) X = X y = np.arange(n_samples) % 3 estimator = clone(estimator_orig) if _safe_tags(estimator, key='requires_positive_y'): y += 1 + abs(y.min()) if _safe_tags(estimator, key='binary_only') and y.size > 0: y = np.where(y == y.flat[0], y, y.flat[0] + 1) if _safe_tags(estimator, key='multioutput_only'): y = np.reshape(y, (-1, 1)) y = y set_random_state(estimator) funcs = ['fit', 'score', 'partial_fit', 'fit_predict', 'fit_transform'] for func_name in funcs: func = getattr(estimator, func_name, None) if func is not None: func(X, y) args = [p.name for p in signature(func).parameters.values()] if args[0] == 'self': args = args[1:] assert args[1] in ['y', 'Y'], 'Expected y or Y as second argument for method %s of %s. Got arguments: %r.' % (func_name, type(estimator).__name__, args)
@ignore_warnings def check_fit_score_takes_y(name, estimator_orig): rnd = np.random.RandomState(0) n_samples = 30 X = rnd.uniform(size=(n_samples, 3)) <DeepExtract> if '1darray' in _safe_tags(estimator_orig, key='X_types'): X = X[:, 0] if _safe_tags(estimator_orig, key='requires_positive_X'): X = X - X.min() if 'categorical' in _safe_tags(estimator_orig, key='X_types'): X = (X - X.min()).astype(np.int32) if estimator_orig.__class__.__name__ == 'SkewedChi2Sampler': X = X - X.min() if _is_pairwise_metric(estimator_orig): X = pairwise_distances(X, metric='euclidean') elif _safe_tags(estimator_orig, key='pairwise'): X = kernel(X, X) X = X </DeepExtract> y = np.arange(n_samples) % 3 estimator = clone(estimator_orig) <DeepExtract> if _safe_tags(estimator, key='requires_positive_y'): y += 1 + abs(y.min()) if _safe_tags(estimator, key='binary_only') and y.size > 0: y = np.where(y == y.flat[0], y, y.flat[0] + 1) if _safe_tags(estimator, key='multioutput_only'): y = np.reshape(y, (-1, 1)) y = y </DeepExtract> set_random_state(estimator) funcs = ['fit', 'score', 'partial_fit', 'fit_predict', 'fit_transform'] for func_name in funcs: func = getattr(estimator, func_name, None) if func is not None: func(X, y) args = [p.name for p in signature(func).parameters.values()] if args[0] == 'self': args = args[1:] assert args[1] in ['y', 'Y'], 'Expected y or Y as second argument for method %s of %s. Got arguments: %r.' % (func_name, type(estimator).__name__, args)
@pytest.mark.filterwarnings('ignore:scipy.rand is deprecated:DeprecationWarning:pyamg.*') @pytest.mark.filterwarnings('ignore:`np.float` is a deprecated alias:DeprecationWarning:pyamg.*') @pytest.mark.filterwarnings('ignore:scipy.linalg.pinv2 is deprecated:DeprecationWarning:pyamg.*') @pytest.mark.skipif(not pyamg_available, reason='PyAMG is required for the tests in this function.') @pytest.mark.parametrize('dtype', (np.float32, np.float64)) def test_spectral_embedding_amg_solver(dtype, seed=36): se_amg = SpectralEmbedding(n_components=2, affinity='nearest_neighbors', eigen_solver='amg', n_neighbors=5, random_state=np.random.RandomState(seed)) se_arpack = SpectralEmbedding(n_components=2, affinity='nearest_neighbors', eigen_solver='arpack', n_neighbors=5, random_state=np.random.RandomState(seed)) embed_amg = se_amg.fit_transform(S.astype(dtype)) embed_arpack = se_arpack.fit_transform(S.astype(dtype)) tol_squared = 1e-05 ** 2 for (A_col, B_col) in zip(embed_amg.T, embed_arpack.T): assert np.max((A_col - B_col) ** 2) <= tol_squared or np.max((A_col + B_col) ** 2) <= tol_squared row = [0, 0, 1, 2, 3, 3, 4] col = [1, 2, 2, 3, 4, 5, 5] val = [100, 100, 100, 1, 100, 100, 100] affinity = sparse.coo_matrix((val + val, (row + col, col + row)), shape=(6, 6)).toarray() se_amg.affinity = 'precomputed' se_arpack.affinity = 'precomputed' embed_amg = se_amg.fit_transform(affinity.astype(dtype)) embed_arpack = se_arpack.fit_transform(affinity.astype(dtype)) tol_squared = 1e-05 ** 2 for (A_col, B_col) in zip(embed_amg.T, embed_arpack.T): assert np.max((A_col - B_col) ** 2) <= tol_squared or np.max((A_col + B_col) ** 2) <= tol_squared </DeepExtract>
@pytest.mark.filterwarnings('ignore:scipy.rand is deprecated:DeprecationWarning:pyamg.*') @pytest.mark.filterwarnings('ignore:`np.float` is a deprecated alias:DeprecationWarning:pyamg.*') @pytest.mark.filterwarnings('ignore:scipy.linalg.pinv2 is deprecated:DeprecationWarning:pyamg.*') @pytest.mark.skipif(not pyamg_available, reason='PyAMG is required for the tests in this function.') @pytest.mark.parametrize('dtype', (np.float32, np.float64)) def test_spectral_embedding_amg_solver(dtype, seed=36): se_amg = SpectralEmbedding(n_components=2, affinity='nearest_neighbors', eigen_solver='amg', n_neighbors=5, random_state=np.random.RandomState(seed)) se_arpack = SpectralEmbedding(n_components=2, affinity='nearest_neighbors', eigen_solver='arpack', n_neighbors=5, random_state=np.random.RandomState(seed)) embed_amg = se_amg.fit_transform(S.astype(dtype)) embed_arpack = se_arpack.fit_transform(S.astype(dtype)) <DeepExtract> tol_squared = 1e-05 ** 2 for (A_col, B_col) in zip(embed_amg.T, embed_arpack.T): assert np.max((A_col - B_col) ** 2) <= tol_squared or np.max((A_col + B_col) ** 2) <= tol_squared </DeepExtract> row = [0, 0, 1, 2, 3, 3, 4] col = [1, 2, 2, 3, 4, 5, 5] val = [100, 100, 100, 1, 100, 100, 100] affinity = sparse.coo_matrix((val + val, (row + col, col + row)), shape=(6, 6)).toarray() se_amg.affinity = 'precomputed' se_arpack.affinity = 'precomputed' embed_amg = se_amg.fit_transform(affinity.astype(dtype)) embed_arpack = se_arpack.fit_transform(affinity.astype(dtype)) <DeepExtract> tol_squared = 1e-05 ** 2 for (A_col, B_col) in zip(embed_amg.T, embed_arpack.T): assert np.max((A_col - B_col) ** 2) <= tol_squared or np.max((A_col + B_col) ** 2) <= tol_squared </DeepExtract>
def decision_function(self, X): """Compute the decision function of the given observations. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. Returns ------- decision : ndarray of shape (n_samples,) Decision function of the samples. It is equal to the shifted Mahalanobis distances. The threshold for being an outlier is 0, which ensures a compatibility with other outlier detection algorithms. """ check_is_fitted(self) check_is_fitted(self) negative_mahal_dist = -self.mahalanobis(X) return negative_mahal_dist - self.offset_
def decision_function(self, X): """Compute the decision function of the given observations. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. Returns ------- decision : ndarray of shape (n_samples,) Decision function of the samples. It is equal to the shifted Mahalanobis distances. The threshold for being an outlier is 0, which ensures a compatibility with other outlier detection algorithms. """ check_is_fitted(self) <DeepExtract> check_is_fitted(self) negative_mahal_dist = -self.mahalanobis(X) </DeepExtract> return negative_mahal_dist - self.offset_
def _dense_fit(X, strategy, missing_values, fill_value): """Fit the transformer on dense data.""" missing_mask = _get_mask(X, missing_values) masked_X = ma.masked_array(X, mask=missing_mask) super()._fit_indicator(missing_mask) if strategy == 'mean': mean_masked = np.ma.mean(masked_X, axis=0) mean = np.ma.getdata(mean_masked) mean[np.ma.getmask(mean_masked)] = 0 if self.keep_empty_features else np.nan return mean elif strategy == 'median': median_masked = np.ma.median(masked_X, axis=0) median = np.ma.getdata(median_masked) median[np.ma.getmaskarray(median_masked)] = 0 if self.keep_empty_features else np.nan return median elif strategy == 'most_frequent': X = X.transpose() mask = missing_mask.transpose() if X.dtype.kind == 'O': most_frequent = np.empty(X.shape[0], dtype=object) else: most_frequent = np.empty(X.shape[0]) for (i, (row, row_mask)) in enumerate(zip(X[:], mask[:])): row_mask = np.logical_not(row_mask).astype(bool) row = row[row_mask] if len(row) == 0 and self.keep_empty_features: most_frequent[i] = 0 else: if row.size > 0: if row.dtype == object: counter = Counter(row) most_frequent_count = counter.most_common(1)[0][1] most_frequent_value = min((value for (value, count) in counter.items() if count == most_frequent_count)) else: mode = _mode(row) most_frequent_value = mode[0][0] most_frequent_count = mode[1][0] else: most_frequent_value = 0 most_frequent_count = 0 if most_frequent_count == 0 and 0 == 0: most_frequent[i] = np.nan elif most_frequent_count < 0: most_frequent[i] = np.nan elif most_frequent_count > 0: most_frequent[i] = most_frequent_value elif most_frequent_count == 0: most_frequent[i] = min(most_frequent_value, np.nan) return most_frequent elif strategy == 'constant': return np.full(X.shape[1], fill_value, dtype=X.dtype)
def _dense_fit(X, strategy, missing_values, fill_value): """Fit the transformer on dense data.""" missing_mask = _get_mask(X, missing_values) masked_X = ma.masked_array(X, mask=missing_mask) super()._fit_indicator(missing_mask) if strategy == 'mean': mean_masked = np.ma.mean(masked_X, axis=0) mean = np.ma.getdata(mean_masked) mean[np.ma.getmask(mean_masked)] = 0 if self.keep_empty_features else np.nan return mean elif strategy == 'median': median_masked = np.ma.median(masked_X, axis=0) median = np.ma.getdata(median_masked) median[np.ma.getmaskarray(median_masked)] = 0 if self.keep_empty_features else np.nan return median elif strategy == 'most_frequent': X = X.transpose() mask = missing_mask.transpose() if X.dtype.kind == 'O': most_frequent = np.empty(X.shape[0], dtype=object) else: most_frequent = np.empty(X.shape[0]) for (i, (row, row_mask)) in enumerate(zip(X[:], mask[:])): row_mask = np.logical_not(row_mask).astype(bool) row = row[row_mask] if len(row) == 0 and self.keep_empty_features: most_frequent[i] = 0 else: <DeepExtract> if row.size > 0: if row.dtype == object: counter = Counter(row) most_frequent_count = counter.most_common(1)[0][1] most_frequent_value = min((value for (value, count) in counter.items() if count == most_frequent_count)) else: mode = _mode(row) most_frequent_value = mode[0][0] most_frequent_count = mode[1][0] else: most_frequent_value = 0 most_frequent_count = 0 if most_frequent_count == 0 and 0 == 0: most_frequent[i] = np.nan elif most_frequent_count < 0: most_frequent[i] = np.nan elif most_frequent_count > 0: most_frequent[i] = most_frequent_value elif most_frequent_count == 0: most_frequent[i] = min(most_frequent_value, np.nan) </DeepExtract> return most_frequent elif strategy == 'constant': return np.full(X.shape[1], fill_value, dtype=X.dtype)
def predict_proba(self, X, check_input=True): """Predict class probabilities of the input samples X. The predicted class probability is the fraction of samples of the same class in a leaf. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : bool, default=True Allow to bypass several input checking. Don't use this parameter unless you know what you're doing. Returns ------- proba : ndarray of shape (n_samples, n_classes) or list of n_outputs such arrays if n_outputs > 1 The class probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`. """ check_is_fitted(self) if check_input: X = self._validate_data(X, dtype=DTYPE, accept_sparse='csr', reset=False) if issparse(X) and (X.indices.dtype != np.intc or X.indptr.dtype != np.intc): raise ValueError('No support for np.int64 index based sparse matrices') else: self._check_n_features(X, reset=False) X = X proba = self.tree_.predict(X) if self.n_outputs_ == 1: proba = proba[:, :self.n_classes_] normalizer = proba.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba /= normalizer return proba else: all_proba = [] for k in range(self.n_outputs_): proba_k = proba[:, k, :self.n_classes_[k]] normalizer = proba_k.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba_k /= normalizer all_proba.append(proba_k) return all_proba
def predict_proba(self, X, check_input=True): """Predict class probabilities of the input samples X. The predicted class probability is the fraction of samples of the same class in a leaf. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : bool, default=True Allow to bypass several input checking. Don't use this parameter unless you know what you're doing. Returns ------- proba : ndarray of shape (n_samples, n_classes) or list of n_outputs such arrays if n_outputs > 1 The class probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`. """ check_is_fitted(self) <DeepExtract> if check_input: X = self._validate_data(X, dtype=DTYPE, accept_sparse='csr', reset=False) if issparse(X) and (X.indices.dtype != np.intc or X.indptr.dtype != np.intc): raise ValueError('No support for np.int64 index based sparse matrices') else: self._check_n_features(X, reset=False) X = X </DeepExtract> proba = self.tree_.predict(X) if self.n_outputs_ == 1: proba = proba[:, :self.n_classes_] normalizer = proba.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba /= normalizer return proba else: all_proba = [] for k in range(self.n_outputs_): proba_k = proba[:, k, :self.n_classes_[k]] normalizer = proba_k.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba_k /= normalizer all_proba.append(proba_k) return all_proba
def _transform(self, T): """`_transform` is called by both `transform` and `predict` methods. Since `transform` is wrapped to output arrays of specific types (e.g. NumPy arrays, pandas DataFrame), we cannot make `predict` call `transform` directly. The above behaviour could be changed in the future, if we decide to output other type of arrays when calling `predict`. """ if hasattr(self, 'X_thresholds_'): dtype = self.X_thresholds_.dtype else: dtype = np.float64 T = check_array(T, dtype=dtype, ensure_2d=False) if not (T.ndim == 1 or (T.ndim == 2 and T.shape[1] == 1)): msg = 'Isotonic regression input X should be a 1d array or 2d array with 1 feature' raise ValueError(msg) T = T.reshape(-1) if self.out_of_bounds == 'clip': T = np.clip(T, self.X_min_, self.X_max_) res = self.f_(T) res = res.astype(T.dtype) return res
def _transform(self, T): """`_transform` is called by both `transform` and `predict` methods. Since `transform` is wrapped to output arrays of specific types (e.g. NumPy arrays, pandas DataFrame), we cannot make `predict` call `transform` directly. The above behaviour could be changed in the future, if we decide to output other type of arrays when calling `predict`. """ if hasattr(self, 'X_thresholds_'): dtype = self.X_thresholds_.dtype else: dtype = np.float64 T = check_array(T, dtype=dtype, ensure_2d=False) <DeepExtract> if not (T.ndim == 1 or (T.ndim == 2 and T.shape[1] == 1)): msg = 'Isotonic regression input X should be a 1d array or 2d array with 1 feature' raise ValueError(msg) </DeepExtract> T = T.reshape(-1) if self.out_of_bounds == 'clip': T = np.clip(T, self.X_min_, self.X_max_) res = self.f_(T) res = res.astype(T.dtype) return res
def check_classification_targets(y): """Ensure that target y is of a non-regression type. Only the following target types (as defined in type_of_target) are allowed: 'binary', 'multiclass', 'multiclass-multioutput', 'multilabel-indicator', 'multilabel-sequences' Parameters ---------- y : array-like Target values. """ (xp, is_array_api) = get_namespace(y) valid = (isinstance(y, Sequence) or issparse(y) or hasattr(y, '__array__')) and (not isinstance(y, str)) or is_array_api if not valid: raise ValueError('Expected array-like (array or non-string sequence), got %r' % y) sparse_pandas = y.__class__.__name__ in ['SparseSeries', 'SparseArray'] if sparse_pandas: raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'") if is_multilabel(y): y_type = 'multilabel-indicator' check_y_kwargs = dict(accept_sparse=True, allow_nd=True, force_all_finite=False, ensure_2d=False, ensure_min_samples=0, ensure_min_features=0) with warnings.catch_warnings(): warnings.simplefilter('error', np.VisibleDeprecationWarning) if not issparse(y): try: y = check_array(y, dtype=None, **check_y_kwargs) except (np.VisibleDeprecationWarning, ValueError) as e: if str(e).startswith('Complex data not supported'): raise y = check_array(y, dtype=object, **check_y_kwargs) try: if not hasattr(y[0], '__array__') and isinstance(y[0], Sequence) and (not isinstance(y[0], str)): raise ValueError('You appear to be using a legacy multi-label data representation. Sequence of sequences are no longer supported; use a binary array or sparse matrix instead - the MultiLabelBinarizer transformer can convert to this format.') except IndexError: pass if y.ndim not in (1, 2): y_type = 'unknown' if not min(y.shape): if y.ndim == 1: y_type = 'binary' y_type = 'unknown' if not issparse(y) and y.dtype == object and (not isinstance(y.flat[0], str)): y_type = 'unknown' if y.ndim == 2 and y.shape[1] > 1: suffix = '-multioutput' else: suffix = '' if y.dtype.kind == 'f': data = y.data if issparse(y) else y if xp.any(data != data.astype(int)): _assert_all_finite(data, input_name='y') y_type = 'continuous' + suffix first_row = y[0] if not issparse(y) else y.getrow(0).data if xp.unique_values(y).shape[0] > 2 or (y.ndim == 2 and len(first_row) > 1): y_type = 'multiclass' + suffix else: y_type = 'binary' if y_type not in ['binary', 'multiclass', 'multiclass-multioutput', 'multilabel-indicator', 'multilabel-sequences']: raise ValueError('Unknown label type: %r' % y_type)
def check_classification_targets(y): """Ensure that target y is of a non-regression type. Only the following target types (as defined in type_of_target) are allowed: 'binary', 'multiclass', 'multiclass-multioutput', 'multilabel-indicator', 'multilabel-sequences' Parameters ---------- y : array-like Target values. """ <DeepExtract> (xp, is_array_api) = get_namespace(y) valid = (isinstance(y, Sequence) or issparse(y) or hasattr(y, '__array__')) and (not isinstance(y, str)) or is_array_api if not valid: raise ValueError('Expected array-like (array or non-string sequence), got %r' % y) sparse_pandas = y.__class__.__name__ in ['SparseSeries', 'SparseArray'] if sparse_pandas: raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'") if is_multilabel(y): y_type = 'multilabel-indicator' check_y_kwargs = dict(accept_sparse=True, allow_nd=True, force_all_finite=False, ensure_2d=False, ensure_min_samples=0, ensure_min_features=0) with warnings.catch_warnings(): warnings.simplefilter('error', np.VisibleDeprecationWarning) if not issparse(y): try: y = check_array(y, dtype=None, **check_y_kwargs) except (np.VisibleDeprecationWarning, ValueError) as e: if str(e).startswith('Complex data not supported'): raise y = check_array(y, dtype=object, **check_y_kwargs) try: if not hasattr(y[0], '__array__') and isinstance(y[0], Sequence) and (not isinstance(y[0], str)): raise ValueError('You appear to be using a legacy multi-label data representation. Sequence of sequences are no longer supported; use a binary array or sparse matrix instead - the MultiLabelBinarizer transformer can convert to this format.') except IndexError: pass if y.ndim not in (1, 2): y_type = 'unknown' if not min(y.shape): if y.ndim == 1: y_type = 'binary' y_type = 'unknown' if not issparse(y) and y.dtype == object and (not isinstance(y.flat[0], str)): y_type = 'unknown' if y.ndim == 2 and y.shape[1] > 1: suffix = '-multioutput' else: suffix = '' if y.dtype.kind == 'f': data = y.data if issparse(y) else y if xp.any(data != data.astype(int)): _assert_all_finite(data, input_name='y') y_type = 'continuous' + suffix first_row = y[0] if not issparse(y) else y.getrow(0).data if xp.unique_values(y).shape[0] > 2 or (y.ndim == 2 and len(first_row) > 1): y_type = 'multiclass' + suffix else: y_type = 'binary' </DeepExtract> if y_type not in ['binary', 'multiclass', 'multiclass-multioutput', 'multilabel-indicator', 'multilabel-sequences']: raise ValueError('Unknown label type: %r' % y_type)