From 996ccf469caf1f7095839ad596ee88d13b10f8ec Mon Sep 17 00:00:00 2001 From: Philipp Weiler Date: Thu, 11 Jul 2024 13:35:10 +0000 Subject: [PATCH] Update densifying sparse array Switch from `.A` to `.toarray()` --- scvelo/core/_anndata.py | 4 +-- scvelo/core/_linear_models.py | 2 +- scvelo/inference/_metabolic_labeling.py | 2 +- scvelo/plotting/heatmap.py | 2 +- scvelo/plotting/utils.py | 4 +-- scvelo/plotting/velocity.py | 2 +- scvelo/plotting/velocity_graph.py | 2 +- scvelo/preprocessing/moments.py | 12 ++++---- scvelo/preprocessing/neighbors.py | 4 +-- scvelo/tools/optimization.py | 6 ++-- scvelo/tools/paga.py | 4 +-- scvelo/tools/run.py | 10 +++---- scvelo/tools/terminal_states.py | 4 +-- scvelo/tools/transition_matrix.py | 2 +- scvelo/tools/utils.py | 10 +++---- scvelo/tools/velocity_embedding.py | 4 +-- scvelo/tools/velocity_graph.py | 6 ++-- tests/core/test_anndata.py | 4 +-- tests/preprocessing/test_moments.py | 18 ++++++------ tests/preprocessing/test_neighbors.py | 38 ++++++++++++------------- tests/preprocessing/test_utils.py | 14 ++++----- 21 files changed, 77 insertions(+), 77 deletions(-) diff --git a/scvelo/core/_anndata.py b/scvelo/core/_anndata.py index 2a05666f..9a2021d5 100644 --- a/scvelo/core/_anndata.py +++ b/scvelo/core/_anndata.py @@ -284,7 +284,7 @@ def get_df( df = data if issparse(df): - df = np.array(df.A) + df = np.array(df.toarray()) if columns is None and hasattr(df, "names"): columns = df.names @@ -426,7 +426,7 @@ def make_dense( for modality in modalities: count_data = get_modality(adata=adata, modality=modality) if issparse(count_data): - set_modality(adata=adata, modality=modality, new_value=count_data.A) + set_modality(adata=adata, modality=modality, new_value=count_data.toarray()) return adata if not inplace else None diff --git a/scvelo/core/_linear_models.py b/scvelo/core/_linear_models.py index 05d3c673..59bdc317 100644 --- a/scvelo/core/_linear_models.py +++ b/scvelo/core/_linear_models.py @@ -73,7 +73,7 @@ def _trim_data(self, data: List) -> List: data = [data] data = np.array( - [data_mat.A if issparse(data_mat) else data_mat for data_mat in data] + [data_mat.toarray() if issparse(data_mat) else data_mat for data_mat in data] ) # TODO: Add explanatory comment diff --git a/scvelo/inference/_metabolic_labeling.py b/scvelo/inference/_metabolic_labeling.py index 545cad22..9987404c 100644 --- a/scvelo/inference/_metabolic_labeling.py +++ b/scvelo/inference/_metabolic_labeling.py @@ -121,7 +121,7 @@ def _get_n_neighbors( rep_X = rep_X[rows, cols] if sparse_op: - n_neighbors_to_use = np.cumsum(rep_X.A > 0, axis=1) + n_neighbors_to_use = np.cumsum(rep_X.toarray() > 0, axis=1) else: n_neighbors_to_use = np.cumsum(rep_X > 0, axis=1) diff --git a/scvelo/plotting/heatmap.py b/scvelo/plotting/heatmap.py index b24a7c10..69fd5f48 100644 --- a/scvelo/plotting/heatmap.py +++ b/scvelo/plotting/heatmap.py @@ -97,7 +97,7 @@ def heatmap( else adata[:, var_names].X ) if issparse(X): - X = X.A + X = X.toarray() df = pd.DataFrame(X[np.argsort(time)], columns=var_names) if n_convolve is not None: diff --git a/scvelo/plotting/utils.py b/scvelo/plotting/utils.py index 2804f67c..e4ba2c95 100644 --- a/scvelo/plotting/utils.py +++ b/scvelo/plotting/utils.py @@ -36,7 +36,7 @@ def make_dense(X): """TODO.""" if issparse(X): - XA = X.A if X.ndim == 2 else X.A1 + XA = X.toarray() if X.ndim == 2 else X.A1 else: XA = X.A1 if isinstance(X, np.matrix) else X return np.array(XA) @@ -799,7 +799,7 @@ def interpret_colorkey(adata, c=None, layer=None, perc=None, use_raw=None): if adata.raw is None and use_raw: raise ValueError("AnnData object does not have `raw` counts.") c = adata.raw.obs_vector(c) if use_raw else adata.obs_vector(c) - c = c.A.flatten() if issparse(c) else c + c = c.toarray().flatten() if issparse(c) else c elif c in adata.var.keys(): # color by observation key c = adata.var[c] elif np.any([var_key in c for var_key in adata.var.keys()]): diff --git a/scvelo/plotting/velocity.py b/scvelo/plotting/velocity.py index bca90168..9b79f231 100644 --- a/scvelo/plotting/velocity.py +++ b/scvelo/plotting/velocity.py @@ -169,7 +169,7 @@ def velocity( _adata = adata[:, var] s, u = _adata.layers[skey], _adata.layers[ukey] if issparse(s): - s, u = s.A, u.A + s, u = s.toarray(), u.toarray() # spliced/unspliced phase portrait with steady-state estimate ax = pl.subplot(gs[v * nplts]) diff --git a/scvelo/plotting/velocity_graph.py b/scvelo/plotting/velocity_graph.py index 9436a092..754bb28a 100644 --- a/scvelo/plotting/velocity_graph.py +++ b/scvelo/plotting/velocity_graph.py @@ -133,7 +133,7 @@ def velocity_graph( if groups is not None: if issparse(T): - T = T.A + T = T.toarray() T[~groups_to_bool(adata, groups, color)] = 0 T = csr_matrix(T) T.eliminate_zeros() diff --git a/scvelo/preprocessing/moments.py b/scvelo/preprocessing/moments.py index 92027681..498d801a 100644 --- a/scvelo/preprocessing/moments.py +++ b/scvelo/preprocessing/moments.py @@ -89,12 +89,12 @@ def moments( adata.layers["Ms"] = ( csr_matrix.dot(connectivities, csr_matrix(adata.layers["spliced"])) .astype(np.float32) - .A + .toarray() ) adata.layers["Mu"] = ( csr_matrix.dot(connectivities, csr_matrix(adata.layers["unspliced"])) .astype(np.float32) - .A + .toarray() ) # if renormalize: normalize_per_cell(adata, layers={'Ms', 'Mu'}, enforce=True) @@ -130,8 +130,8 @@ def second_order_moments(adata, adjusted=False): s, u = csr_matrix(adata.layers["spliced"]), csr_matrix(adata.layers["unspliced"]) if s.shape[0] == 1: s, u = s.T, u.T - Mss = csr_matrix.dot(connectivities, s.multiply(s)).astype(np.float32).A - Mus = csr_matrix.dot(connectivities, s.multiply(u)).astype(np.float32).A + Mss = csr_matrix.dot(connectivities, s.multiply(s)).astype(np.float32).toarray() + Mus = csr_matrix.dot(connectivities, s.multiply(u)).astype(np.float32).toarray() if adjusted: Mss = 2 * Mss - adata.layers["Ms"].reshape(Mss.shape) Mus = 2 * Mus - adata.layers["Mu"].reshape(Mus.shape) @@ -157,7 +157,7 @@ def second_order_moments_u(adata): connectivities = get_connectivities(adata) u = csr_matrix(adata.layers["unspliced"]) - Muu = csr_matrix.dot(connectivities, u.multiply(u)).astype(np.float32).A + Muu = csr_matrix.dot(connectivities, u.multiply(u)).astype(np.float32).toarray() return Muu @@ -222,5 +222,5 @@ def get_moments( else: Mx = csr_matrix.dot(connectivities, X) if issparse(X): - Mx = Mx.astype(np.float32).A + Mx = Mx.astype(np.float32).toarray() return Mx diff --git a/scvelo/preprocessing/neighbors.py b/scvelo/preprocessing/neighbors.py index fe43330f..da427fc7 100644 --- a/scvelo/preprocessing/neighbors.py +++ b/scvelo/preprocessing/neighbors.py @@ -326,7 +326,7 @@ def fit(self, X, metric="l2", M=16, ef=100, ef_construction=100, random_state=0) ef_c, ef = max(ef_construction, self.n_neighbors), max(self.n_neighbors, ef) metric = "l2" if metric == "euclidean" else metric - X = X.A if issparse(X) else X + X = X.toarray() if issparse(X) else X ns, dim = X.shape knn = hnswlib.Index(space=metric, dim=dim) @@ -564,7 +564,7 @@ def get_duplicate_cells(data): vals = [val for val, count in Counter(lst).items() if count > 1] idx_dup = np.where(pd.Series(lst).isin(vals))[0] - X_new = np.array(X[idx_dup].A if issparse(X) else X[idx_dup]) + X_new = np.array(X[idx_dup].toarray() if issparse(X) else X[idx_dup]) sorted_idx = np.lexsort(X_new.T) sorted_data = X_new[sorted_idx, :] diff --git a/scvelo/tools/optimization.py b/scvelo/tools/optimization.py index 59d1c669..24a5781f 100644 --- a/scvelo/tools/optimization.py +++ b/scvelo/tools/optimization.py @@ -12,10 +12,10 @@ # TODO: Add docstrings def get_weight(x, y=None, perc=95): """TODO.""" - xy_norm = np.array(x.A if issparse(x) else x) + xy_norm = np.array(x.toarray() if issparse(x) else x) if y is not None: if issparse(y): - y = y.A + y = y.toarray() xy_norm = xy_norm / np.clip(np.max(xy_norm, axis=0), 1e-3, None) xy_norm += y / np.clip(np.max(y, axis=0), 1e-3, None) if isinstance(perc, numbers.Number): @@ -84,7 +84,7 @@ def optimize_NxN(x, y, fit_offset=False, perc=None): perc = perc[1] weights = get_weight(x, y, perc).astype(bool) if issparse(weights): - weights = weights.A + weights = weights.toarray() else: weights = None diff --git a/scvelo/tools/paga.py b/scvelo/tools/paga.py index fe1478c8..03ae7c09 100644 --- a/scvelo/tools/paga.py +++ b/scvelo/tools/paga.py @@ -150,7 +150,7 @@ def compute_transitions(self): transitions_conf.eliminate_zeros() # remove non-confident direct paths if more confident indirect path is found. - T = transitions_conf.A + T = transitions_conf.toarray() threshold = max(np.nanmin(np.nanmax(T / (T > 0), axis=0)) - 1e-6, 0.01) T *= T > threshold for i in range(len(T)): @@ -169,7 +169,7 @@ def compute_transitions(self): T_tmp[np.where(T_num[:, i])[0][0], i] = T_max from scipy.sparse.csgraph import minimum_spanning_tree - T_tmp = np.abs(minimum_spanning_tree(-T_tmp).A) > 0 + T_tmp = np.abs(minimum_spanning_tree(-T_tmp).toarray()) > 0 T = T_tmp * T transitions_conf = csr_matrix(T) diff --git a/scvelo/tools/run.py b/scvelo/tools/run.py index 827733e9..6e72e33c 100644 --- a/scvelo/tools/run.py +++ b/scvelo/tools/run.py @@ -56,8 +56,8 @@ def convert_to_adata(vlm, basis=None): layers = OrderedDict() layers["spliced"] = vlm.S_sz.T if hasattr(vlm, "S_sz") else vlm.S.T layers["unspliced"] = vlm.U_sz.T if hasattr(vlm, "U_sz") else vlm.U.T - if hasattr(vlm, "A") and (vlm.A.T.shape == layers["spliced"].shape): - layers["ambiguous"] = vlm.A.T + if hasattr(vlm, "A") and (vlm.toarray().T.shape == layers["spliced"].shape): + layers["ambiguous"] = vlm.toarray().T if hasattr(vlm, "velocity"): layers["velocity"] = vlm.velocity.T @@ -109,12 +109,12 @@ def __init__(self, adata, basis=None): self.S = adata.layers["spliced"].T self.U = adata.layers["unspliced"].T self.S = ( - np.array(self.S.A, **kwargs) + np.array(self.S.toarray(), **kwargs) if issparse(self.S) else np.array(self.S, **kwargs) ) self.U = ( - np.array(self.U.A, **kwargs) + np.array(self.U.toarray(), **kwargs) if issparse(self.U) else np.array(self.U, **kwargs) ) @@ -146,7 +146,7 @@ def __init__(self, adata, basis=None): if "ambiguous" in adata.layers.keys(): self.A = np.array(adata.layers["ambiguous"].T) if issparse(self.A): - self.A = self.A.A + self.A = self.A.toarray() self.ca = {} self.ra = {} diff --git a/scvelo/tools/terminal_states.py b/scvelo/tools/terminal_states.py index 4131fdcc..bc5f05f9 100644 --- a/scvelo/tools/terminal_states.py +++ b/scvelo/tools/terminal_states.py @@ -57,7 +57,7 @@ def cell_fate( T = transition_matrix(_adata, self_transitions=self_transitions) fate = np.linalg.inv(np.eye(_adata.n_obs) - T) if issparse(T): - fate = fate.A + fate = fate.toarray() cell_fates = np.array(_adata.obs[groupby][fate.argmax(1)]) if disconnected_groups is not None: idx = _adata.obs[groupby].isin(disconnected_groups) @@ -123,7 +123,7 @@ def cell_origin( T = transition_matrix(_adata, self_transitions=self_transitions, backward=True) fate = np.linalg.inv(np.eye(_adata.n_obs) - T) if issparse(T): - fate = fate.A + fate = fate.toarray() cell_fates = np.array(_adata.obs[groupby][fate.argmax(1)]) if disconnected_groups is not None: idx = _adata.obs[groupby].isin(disconnected_groups) diff --git a/scvelo/tools/transition_matrix.py b/scvelo/tools/transition_matrix.py index bab7e2a6..5af048e7 100644 --- a/scvelo/tools/transition_matrix.py +++ b/scvelo/tools/transition_matrix.py @@ -100,7 +100,7 @@ def transition_matrix( graph = graph.multiply(basis_graph) if self_transitions: - confidence = graph.max(1).A.flatten() + confidence = graph.max(1).toarray().flatten() ub = np.percentile(confidence, 98) self_prob = np.clip(ub - confidence, 0, 1) graph.setdiag(self_prob) diff --git a/scvelo/tools/utils.py b/scvelo/tools/utils.py index 3f4ffce8..29c3c361 100644 --- a/scvelo/tools/utils.py +++ b/scvelo/tools/utils.py @@ -33,7 +33,7 @@ def mean(x, axis=0): # TODO: Add docstrings def make_dense(X): """TODO.""" - XA = X.A if issparse(X) and X.ndim == 2 else X.A1 if issparse(X) else X + XA = X.toarray() if issparse(X) and X.ndim == 2 else X.A1 if issparse(X) else X if XA.ndim == 2: XA = XA[0] if XA.shape[0] == 1 else XA[:, 0] if XA.shape[1] == 1 else XA return np.array(XA) @@ -327,8 +327,8 @@ def cutoff_small_velocities( x = adata.layers["spliced"] if use_raw else adata.layers["Ms"] y = adata.layers["unspliced"] if use_raw else adata.layers["Mu"] - x_max = x.max(0).A[0] if issparse(x) else x.max(0) - y_max = y.max(0).A[0] if issparse(y) else y.max(0) + x_max = x.max(0).toarray()[0] if issparse(x) else x.max(0) + y_max = y.max(0).toarray()[0] if issparse(y) else y.max(0) xy_norm = x / np.clip(x_max, 1e-3, None) + y / np.clip(y_max, 1e-3, None) W = xy_norm >= np.percentile(xy_norm, 98, axis=0) * frac_of_max @@ -449,9 +449,9 @@ def vcorrcoef(X, y, mode="pearsons", axis=-1): Which correlation metric to use. """ if issparse(X): - X = np.array(X.A) + X = np.array(X.toarray()) if issparse(y): - y = np.array(y.A) + y = np.array(y.toarray()) if axis == 0: if X.ndim > 1: X = np.array(X.T) diff --git a/scvelo/tools/velocity_embedding.py b/scvelo/tools/velocity_embedding.py index b0e4cef0..20a455b1 100644 --- a/scvelo/tools/velocity_embedding.py +++ b/scvelo/tools/velocity_embedding.py @@ -158,7 +158,7 @@ def velocity_embedding( T.eliminate_zeros() densify = adata.n_obs < 1e4 - TA = T.A if densify else None + TA = T.toarray() if densify else None with warnings.catch_warnings(): warnings.simplefilter("ignore") @@ -179,7 +179,7 @@ def velocity_embedding( ) delta = T.dot(X[:, vgenes]) - X[:, vgenes] if issparse(delta): - delta = delta.A + delta = delta.toarray() cos_proj = (V * delta).sum(1) / l2_norm(delta) V_emb *= np.clip(cos_proj[:, None] * 10, 0, 1) diff --git a/scvelo/tools/velocity_graph.py b/scvelo/tools/velocity_graph.py index 9c566144..faba33ad 100644 --- a/scvelo/tools/velocity_graph.py +++ b/scvelo/tools/velocity_graph.py @@ -69,12 +69,12 @@ def __init__( xkey = xkey if xkey in adata.layers.keys() else "spliced" X = np.array( - adata.layers[xkey].A[:, subset] + adata.layers[xkey].toarray()[:, subset] if issparse(adata.layers[xkey]) else adata.layers[xkey][:, subset] ) V = np.array( - adata.layers[vkey].A[:, subset] + adata.layers[vkey].toarray()[:, subset] if issparse(adata.layers[vkey]) else adata.layers[vkey][:, subset] ) @@ -209,7 +209,7 @@ def compute_cosines( ) self.uncertainties.eliminate_zeros() - confidence = self.graph.max(1).A.flatten() + confidence = self.graph.max(1).toarray().flatten() self.self_prob = np.clip(np.percentile(confidence, 98) - confidence, 0, 1) def _compute_cosines(self, obs_idx, queue): diff --git a/tests/core/test_anndata.py b/tests/core/test_anndata.py index c1c72e21..031667eb 100644 --- a/tests/core/test_anndata.py +++ b/tests/core/test_anndata.py @@ -126,9 +126,9 @@ def test_cleanup_all( if dense: if layer is None: - adata.X = adata.X.A + adata.X = adata.X.toarray() else: - adata.layers[layer] = adata.layers[layer].A + adata.layers[layer] = adata.layers[layer].toarray() returned_adata = cleanup(adata=adata, clean="all", inplace=inplace) if not inplace: diff --git a/tests/preprocessing/test_moments.py b/tests/preprocessing/test_moments.py index b9bdfd58..72489be6 100644 --- a/tests/preprocessing/test_moments.py +++ b/tests/preprocessing/test_moments.py @@ -42,9 +42,9 @@ def test_first_moments( if dense: if layer is None: - adata.X = adata.X.A + adata.X = adata.X.toarray() else: - adata.layers[layer] = adata.layers[layer].A + adata.layers[layer] = adata.layers[layer].toarray() first_order_moment = get_moments(adata=adata, layer=layer, mode=mode) assert isinstance(first_order_moment, np.ndarray) @@ -69,9 +69,9 @@ def test_second_moments( if dense: if layer is None: - adata.X = adata.X.A + adata.X = adata.X.toarray() else: - adata.layers[layer] = adata.layers[layer].A + adata.layers[layer] = adata.layers[layer].toarray() second_order_moment = get_moments( adata=adata, layer=layer, mode=mode, second_order=True, centered=False @@ -98,9 +98,9 @@ def test_passing_array_for_layer( if dense: if layer is None: - adata.X = adata.X.A + adata.X = adata.X.toarray() else: - adata.layers[layer] = adata.layers[layer].A + adata.layers[layer] = adata.layers[layer].toarray() if layer is None: first_order_moment = get_moments(adata=adata, layer=adata.X, mode=mode) @@ -181,13 +181,13 @@ def _compare_adatas(self, adata_1, adata_2): assert set(adata_1.obsp) == {"distances", "connectivities"} assert issparse(adata_1.obsp["connectivities"]) np.testing.assert_almost_equal( - adata_1.obsp["connectivities"].A, - adata_2.obsp["connectivities"].A, + adata_1.obsp["connectivities"].toarray(), + adata_2.obsp["connectivities"].toarray(), decimal=4, ) assert issparse(adata_1.obsp["distances"]) np.testing.assert_almost_equal( - adata_1.obsp["distances"].A, adata_2.obsp["distances"].A, decimal=3 + adata_1.obsp["distances"].toarray(), adata_2.obsp["distances"].toarray(), decimal=3 ) # Check `.uns` is unchanged diff --git a/tests/preprocessing/test_neighbors.py b/tests/preprocessing/test_neighbors.py index 59b3c8f7..bb14fbd6 100644 --- a/tests/preprocessing/test_neighbors.py +++ b/tests/preprocessing/test_neighbors.py @@ -165,7 +165,7 @@ def test_recursed_neighbors_real_data( ) assert issparse(connectivities) - np.testing.assert_almost_equal(connectivities.A, ground_truth.A) + np.testing.assert_almost_equal(connectivities.toarray(), ground_truth.toarray()) class TestGetCsrFromIndices: @@ -378,7 +378,7 @@ def test_neighbors_with_X_pca( assert issparse(neighbors.distances) assert (neighbors.distances.getnnz(axis=1) == n_neighbors - 1).all() np.testing.assert_almost_equal( - neighbors.distances.A, ground_truth_distances.A, decimal=4 + neighbors.distances.toarray(), ground_truth_distances.toarray(), decimal=4 ) assert hasattr(neighbors, "connectivities") @@ -386,7 +386,7 @@ def test_neighbors_with_X_pca( assert (neighbors.connectivities.getnnz(axis=1) >= n_neighbors - 1).all() assert (neighbors.connectivities != neighbors.connectivities.T).getnnz() == 0 np.testing.assert_almost_equal( - neighbors.connectivities.A, ground_truth_connectivities.A, decimal=4 + neighbors.connectivities.toarray(), ground_truth_connectivities.toarray(), decimal=4 ) assert hasattr(neighbors, "knn_indices") @@ -434,7 +434,7 @@ def test_neighbors_with_X( assert issparse(neighbors.distances) assert (neighbors.distances.getnnz(axis=1) == n_neighbors - 1).all() np.testing.assert_almost_equal( - neighbors.distances.A, ground_truth_distances.A, decimal=4 + neighbors.distances.toarray(), ground_truth_distances.toarray(), decimal=4 ) assert hasattr(neighbors, "connectivities") @@ -442,7 +442,7 @@ def test_neighbors_with_X( assert (neighbors.connectivities.getnnz(axis=1) >= n_neighbors - 1).all() assert (neighbors.connectivities != neighbors.connectivities.T).getnnz() == 0 np.testing.assert_almost_equal( - neighbors.connectivities.A, ground_truth_connectivities.A, decimal=4 + neighbors.connectivities.toarray(), ground_truth_connectivities.toarray(), decimal=4 ) assert hasattr(neighbors, "knn_indices") @@ -745,7 +745,7 @@ def test_neighbors_with_X_pca( assert issparse(neighbors.distances) assert (neighbors.distances.getnnz(axis=1) == n_neighbors - 1).all() np.testing.assert_almost_equal( - neighbors.distances.A, ground_truth_distances.A, decimal=4 + neighbors.distances.toarray(), ground_truth_distances.toarray(), decimal=4 ) assert hasattr(neighbors, "connectivities") @@ -753,7 +753,7 @@ def test_neighbors_with_X_pca( assert (neighbors.connectivities.getnnz(axis=1) >= n_neighbors - 1).all() assert (neighbors.connectivities != neighbors.connectivities.T).getnnz() == 0 np.testing.assert_almost_equal( - neighbors.connectivities.A, ground_truth_connectivities.A, decimal=4 + neighbors.connectivities.toarray(), ground_truth_connectivities.toarray(), decimal=4 ) @pytest.mark.parametrize("dataset", ["pancreas", "dentategyrus"]) @@ -793,7 +793,7 @@ def test_neighbors_with_X( assert issparse(neighbors.distances) assert (neighbors.distances.getnnz(axis=1) == n_neighbors - 1).all() np.testing.assert_almost_equal( - neighbors.distances.A, ground_truth_distances.A, decimal=4 + neighbors.distances.toarray()oarray(), ground_truth_distances.toarray(), decimal=4 ) assert hasattr(neighbors, "connectivities") @@ -801,7 +801,7 @@ def test_neighbors_with_X( assert (neighbors.connectivities.getnnz(axis=1) >= n_neighbors - 1).all() assert (neighbors.connectivities != neighbors.connectivities.T).getnnz() == 0 np.testing.assert_almost_equal( - neighbors.connectivities.A, ground_truth_connectivities.A, decimal=4 + neighbors.connectivities.toarray(), ground_truth_connectivities.toarray(), decimal=4 ) @@ -847,7 +847,7 @@ def test_neighbors_with_X_pca( assert issparse(neighbors.distances) assert (neighbors.distances.getnnz(axis=1) == n_neighbors - 1).all() np.testing.assert_almost_equal( - neighbors.distances.A, ground_truth_distances.A, decimal=4 + neighbors.distances.toarray(), ground_truth_distances.toarray()oarray(), decimal=4 ) assert hasattr(neighbors, "connectivities") @@ -855,7 +855,7 @@ def test_neighbors_with_X_pca( assert (neighbors.connectivities.getnnz(axis=1) >= n_neighbors - 1).all() assert (neighbors.connectivities != neighbors.connectivities.T).getnnz() == 0 np.testing.assert_almost_equal( - neighbors.connectivities.A, ground_truth_connectivities.A, decimal=4 + neighbors.connectivities.toarray(), ground_truth_connectivities.toarray(), decimal=4 ) assert hasattr(neighbors, "knn_indices") @@ -901,7 +901,7 @@ def test_neighbors_with_X( assert issparse(neighbors.distances) assert (neighbors.distances.getnnz(axis=1) == n_neighbors - 1).all() np.testing.assert_almost_equal( - neighbors.distances.A, ground_truth_distances.A, decimal=4 + neighbors.distances.toarray(), ground_truth_distances.toarray(), decimal=4 ) assert hasattr(neighbors, "connectivities") @@ -909,7 +909,7 @@ def test_neighbors_with_X( assert (neighbors.connectivities.getnnz(axis=1) >= n_neighbors - 1).all() assert (neighbors.connectivities != neighbors.connectivities.T).getnnz() == 0 np.testing.assert_almost_equal( - neighbors.connectivities.A, ground_truth_connectivities.A, decimal=4 + neighbors.connectivities.toarray(), ground_truth_connectivities.toarray(), decimal=4 ) assert hasattr(neighbors, "knn_indices") @@ -1227,7 +1227,7 @@ def test_with_pca_present( if sparse_format: assert issparse(adata.X) - np.testing.assert_almost_equal(adata.X.A, X_without_duplicates) + np.testing.assert_almost_equal(adata.X.toarray(), X_without_duplicates) else: np.testing.assert_almost_equal(adata.X, X_without_duplicates) np.testing.assert_almost_equal(adata.obsm["X_pca"], X_pca_without_duplicates) @@ -1296,7 +1296,7 @@ def test_without_pca_present( if sparse_format: assert issparse(adata.X) - np.testing.assert_almost_equal(adata.X.A, X_without_duplicates) + np.testing.assert_almost_equal(adata.X.toarray(), X_without_duplicates) else: np.testing.assert_almost_equal(adata.X, X_without_duplicates) @@ -1373,7 +1373,7 @@ def test_neighbors_recalculated( if sparse_format: assert issparse(adata.X) - np.testing.assert_almost_equal(adata.X.A, X_without_duplicates) + np.testing.assert_almost_equal(adata.X.toarray(), X_without_duplicates) else: np.testing.assert_almost_equal(adata.X, X_without_duplicates) @@ -1811,11 +1811,11 @@ def test_pca_not_yet_calculated( assert (adata.obs.columns == cleaned_adata.obs.columns).all() pd.testing.assert_frame_equal(adata.obs, cleaned_adata.obs) pd.testing.assert_frame_equal(adata.var, cleaned_adata.var) - np.testing.assert_almost_equal(adata.X.A, cleaned_adata.X.A) + np.testing.assert_almost_equal(adata.X.toarray(), cleaned_adata.X.toarray()) for layer in adata.layers: np.testing.assert_almost_equal( - adata.layers[layer].A, - cleaned_adata.layers[layer].A, + adata.layers[layer].toarray(), + cleaned_adata.layers[layer].toarray(), ) if n_pcs is None: diff --git a/tests/preprocessing/test_utils.py b/tests/preprocessing/test_utils.py index 718aa2bd..cf0c4219 100644 --- a/tests/preprocessing/test_utils.py +++ b/tests/preprocessing/test_utils.py @@ -275,7 +275,7 @@ def test_dense_arrays(self, X: np.ndarray, y: np.ndarray): def test_sparse_arrays(self, X: spmatrix, y: np.ndarray): pearsonr = csr_vcorrcoef(X=X, y=y) - X_dense = X.A.squeeze() + X_dense = X.toarray().squeeze() if X_dense.ndim == 1: np.testing.assert_almost_equal(np.corrcoef(X_dense, y)[0, 1], pearsonr) @@ -427,7 +427,7 @@ def test_percentile_dense_input( def test_percentile_sparse_input( self, X, percentile, lower_percentile, upper_percentile ): - clipped_array = X.A.copy().astype(float) + clipped_array = X.toarray().copy().astype(float) clipped_array[ (clipped_array <= lower_percentile) & (clipped_array != 0) ] = lower_percentile @@ -2143,7 +2143,7 @@ def test_counts_per_cell_size(self, X, obs, counts_per_cell, normed_counts): normalize_per_cell(adata, counts_per_cell=counts_per_cell) if issparse(adata.X): assert issparse(adata.X) - np.testing.assert_almost_equal(adata.X.A, normed_counts) + np.testing.assert_almost_equal(adata.X.toarray(), normed_counts) else: np.testing.assert_almost_equal(adata.X, normed_counts) @@ -2169,7 +2169,7 @@ def test_use_initial_size(self, X, obs, use_initial_size, normed_counts): normalize_per_cell(adata, use_initial_size=use_initial_size) if issparse(adata.X): assert issparse(adata.X) - np.testing.assert_almost_equal(adata.X.A, normed_counts) + np.testing.assert_almost_equal(adata.X.toarray(), normed_counts) else: np.testing.assert_almost_equal(adata.X, normed_counts) @@ -2214,7 +2214,7 @@ def test_layers(self, X, layers, layers_to_normalize): if issparse(X): assert issparse(adata.X) - np.testing.assert_almost_equal(adata.X.A, np.diag([1, 0.5])) + np.testing.assert_almost_equal(adata.X.toarray(), np.diag([1, 0.5])) else: np.testing.assert_almost_equal(adata.X, np.diag([1, 0.5])) @@ -2231,7 +2231,7 @@ def test_layers(self, X, layers, layers_to_normalize): if issparse(layers[layer]): assert issparse(adata.layers[layer]) np.testing.assert_almost_equal( - adata.layers[layer].A, normalized_layer + adata.layers[layer].toarray(), normalized_layer ) else: np.testing.assert_almost_equal( @@ -2241,7 +2241,7 @@ def test_layers(self, X, layers, layers_to_normalize): if issparse(layers[layer]): assert issparse(adata.layers[layer]) np.testing.assert_almost_equal( - adata.layers[layer].A, layers[layer].A + adata.layers[layer].toarray(), layers[layer].toarray() ) else: np.testing.assert_almost_equal(adata.layers[layer], layers[layer])