diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4e9f74162ae78..afd65b3c009ab 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9175,7 +9175,6 @@ def _where( errors=errors, try_cast=try_cast, axis=block_axis, - transpose=self._AXIS_REVERSED, ) return self._constructor(new_data).__finalize__(self) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 022d855d9a15b..bf6ebf1abe760 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -143,7 +143,7 @@ def _check_ndim(self, values, ndim): ndim = values.ndim if self._validate_ndim and values.ndim != ndim: - msg = "Wrong number of dimensions. values.ndim != ndim " "[{} != {}]" + msg = "Wrong number of dimensions. values.ndim != ndim [{} != {}]" raise ValueError(msg.format(values.ndim, ndim)) return ndim @@ -259,7 +259,7 @@ def make_block_same_class(self, values, placement=None, ndim=None, dtype=None): if dtype is not None: # issue 19431 fastparquet is passing this warnings.warn( - "dtype argument is deprecated, will be removed " "in a future release.", + "dtype argument is deprecated, will be removed in a future release.", FutureWarning, ) if placement is None: @@ -399,7 +399,7 @@ def fillna(self, value, limit=None, inplace=False, downcast=None): raise ValueError("Limit must be greater than 0") if self.ndim > 2: raise NotImplementedError( - "number of dimensions for 'fillna' " "is currently limited to 2" + "number of dimensions for 'fillna' is currently limited to 2" ) mask[mask.cumsum(self.ndim - 1) > limit] = False @@ -533,7 +533,7 @@ def downcast(self, dtypes=None): if not (dtypes == "infer" or isinstance(dtypes, dict)): raise ValueError( - "downcast must have a dictionary or 'infer' as " "its argument" + "downcast must have a dictionary or 'infer' as its argument" ) # operate column-by-column @@ -1025,7 +1025,7 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False) or mask[mask].shape[-1] == len(new) or len(new) == 1 ): - raise ValueError("cannot assign mismatch " "length to masked array") + raise ValueError("cannot assign mismatch length to masked array") np.putmask(new_values, mask, new) @@ -1381,16 +1381,7 @@ def shift(self, periods, axis=0, fill_value=None): return [self.make_block(new_values)] - def where( - self, - other, - cond, - align=True, - errors="raise", - try_cast=False, - axis=0, - transpose=False, - ): + def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0): """ evaluate the block; return result block(s) from the result @@ -1402,10 +1393,7 @@ def where( errors : str, {'raise', 'ignore'}, default 'raise' - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object - axis : int - transpose : boolean - Set to True if self is stored with axes reversed Returns ------- @@ -1414,6 +1402,7 @@ def where( import pandas.core.computation.expressions as expressions assert errors in ["raise", "ignore"] + transpose = self.ndim == 2 values = self.values orig_other = other @@ -1432,7 +1421,7 @@ def where( cond = cond.T if not hasattr(cond, "shape"): - raise ValueError("where must have a condition that is ndarray " "like") + raise ValueError("where must have a condition that is ndarray like") # our where function def func(cond, values, other): @@ -1473,7 +1462,6 @@ def func(cond, values, other): errors=errors, try_cast=try_cast, axis=axis, - transpose=transpose, ) return self._maybe_downcast(blocks, "infer") @@ -1917,7 +1905,7 @@ def _slice(self, slicer): if isinstance(slicer, tuple) and len(slicer) == 2: if not com.is_null_slice(slicer[0]): - raise AssertionError("invalid slicing for a 1-ndim " "categorical") + raise AssertionError("invalid slicing for a 1-ndim categorical") slicer = slicer[1] return self.values[slicer] @@ -2004,16 +1992,7 @@ def shift( ) ] - def where( - self, - other, - cond, - align=True, - errors="raise", - try_cast=False, - axis=0, - transpose=False, - ): + def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0): if isinstance(other, ABCDataFrame): # ExtensionArrays are 1-D, so if we get here then # `other` should be a DataFrame with a single column. @@ -2321,9 +2300,7 @@ def _try_coerce_args(self, other): elif isinstance(other, (datetime, np.datetime64, date)): other = self._box_func(other) if getattr(other, "tz") is not None: - raise TypeError( - "cannot coerce a Timestamp with a tz on a " "naive Block" - ) + raise TypeError("cannot coerce a Timestamp with a tz on a naive Block") other = other.asm8.view("i8") elif hasattr(other, "dtype") and is_datetime64_dtype(other): other = other.astype("i8", copy=False).view("i8") @@ -2997,7 +2974,7 @@ def _replace_single( # only one will survive if to_rep_re and regex_re: raise AssertionError( - "only one of to_replace and regex can be " "regex compilable" + "only one of to_replace and regex can be regex compilable" ) # if regex was passed as something that can be a regex (rather than a @@ -3181,16 +3158,7 @@ def concat_same_type(self, to_concat, placement=None): values, placement=placement or slice(0, len(values), 1), ndim=self.ndim ) - def where( - self, - other, - cond, - align=True, - errors="raise", - try_cast=False, - axis=0, - transpose=False, - ): + def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0): # TODO(CategoricalBlock.where): # This can all be deleted in favor of ExtensionBlock.where once # we enforce the deprecation. @@ -3205,19 +3173,11 @@ def where( ) try: # Attempt to do preserve categorical dtype. - result = super().where( - other, cond, align, errors, try_cast, axis, transpose - ) + result = super().where(other, cond, align, errors, try_cast, axis) except (TypeError, ValueError): warnings.warn(object_msg, FutureWarning, stacklevel=6) result = self.astype(object).where( - other, - cond, - align=align, - errors=errors, - try_cast=try_cast, - axis=axis, - transpose=transpose, + other, cond, align=align, errors=errors, try_cast=try_cast, axis=axis ) return result @@ -3286,7 +3246,7 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None, fastpath=No if fastpath is not None: # GH#19265 pyarrow is passing this warnings.warn( - "fastpath argument is deprecated, will be removed " "in a future release.", + "fastpath argument is deprecated, will be removed in a future release.", FutureWarning, ) if klass is None: diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index c5254aaa4af5f..b3c74aaaa5701 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -936,7 +936,7 @@ def _consolidate_inplace(self): self._known_consolidated = True self._rebuild_blknos_and_blklocs() - def get(self, item, fastpath=True): + def get(self, item): """ Return values for selected item (ndarray or BlockManager). """ @@ -954,7 +954,7 @@ def get(self, item, fastpath=True): else: raise ValueError("cannot label index with a null key") - return self.iget(loc, fastpath=fastpath) + return self.iget(loc) else: if isna(item): @@ -965,18 +965,18 @@ def get(self, item, fastpath=True): new_axis=self.items[indexer], indexer=indexer, axis=0, allow_dups=True ) - def iget(self, i, fastpath=True): + def iget(self, i): """ - Return the data as a SingleBlockManager if fastpath=True and possible + Return the data as a SingleBlockManager if possible Otherwise return as a ndarray """ block = self.blocks[self._blknos[i]] values = block.iget(self._blklocs[i]) - if not fastpath or values.ndim != 1: + if values.ndim != 1: return values - # fastpath shortcut for select a single-dim from a 2-dim BM + # shortcut for select a single-dim from a 2-dim BM return SingleBlockManager( [ block.make_block_same_class( diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 9ce1062a6ec26..6beb847da3eb4 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -418,9 +418,6 @@ def test_get(self): block = make_block(values=values.copy(), placement=np.arange(3)) mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)]) - assert_almost_equal(mgr.get("a", fastpath=False), values[0]) - assert_almost_equal(mgr.get("b", fastpath=False), values[1]) - assert_almost_equal(mgr.get("c", fastpath=False), values[2]) assert_almost_equal(mgr.get("a").internal_values(), values[0]) assert_almost_equal(mgr.get("b").internal_values(), values[1]) assert_almost_equal(mgr.get("c").internal_values(), values[2]) @@ -701,6 +698,7 @@ def test_consolidate_ordering_issues(self, mgr): ) def test_reindex_index(self): + # TODO: should this be pytest.skip? pass def test_reindex_items(self): @@ -710,18 +708,6 @@ def test_reindex_items(self): reindexed = mgr.reindex_axis(["g", "c", "a", "d"], axis=0) assert reindexed.nblocks == 2 tm.assert_index_equal(reindexed.items, pd.Index(["g", "c", "a", "d"])) - assert_almost_equal( - mgr.get("g", fastpath=False), reindexed.get("g", fastpath=False) - ) - assert_almost_equal( - mgr.get("c", fastpath=False), reindexed.get("c", fastpath=False) - ) - assert_almost_equal( - mgr.get("a", fastpath=False), reindexed.get("a", fastpath=False) - ) - assert_almost_equal( - mgr.get("d", fastpath=False), reindexed.get("d", fastpath=False) - ) assert_almost_equal( mgr.get("g").internal_values(), reindexed.get("g").internal_values() ) @@ -747,18 +733,12 @@ def test_get_numeric_data(self): tm.assert_index_equal( numeric.items, pd.Index(["int", "float", "complex", "bool"]) ) - assert_almost_equal( - mgr.get("float", fastpath=False), numeric.get("float", fastpath=False) - ) assert_almost_equal( mgr.get("float").internal_values(), numeric.get("float").internal_values() ) # Check sharing numeric.set("float", np.array([100.0, 200.0, 300.0])) - assert_almost_equal( - mgr.get("float", fastpath=False), np.array([100.0, 200.0, 300.0]) - ) assert_almost_equal( mgr.get("float").internal_values(), np.array([100.0, 200.0, 300.0]) ) @@ -768,9 +748,6 @@ def test_get_numeric_data(self): numeric.items, pd.Index(["int", "float", "complex", "bool"]) ) numeric2.set("float", np.array([1000.0, 2000.0, 3000.0])) - assert_almost_equal( - mgr.get("float", fastpath=False), np.array([100.0, 200.0, 300.0]) - ) assert_almost_equal( mgr.get("float").internal_values(), np.array([100.0, 200.0, 300.0]) ) @@ -785,17 +762,11 @@ def test_get_bool_data(self): bools = mgr.get_bool_data() tm.assert_index_equal(bools.items, pd.Index(["bool"])) - assert_almost_equal( - mgr.get("bool", fastpath=False), bools.get("bool", fastpath=False) - ) assert_almost_equal( mgr.get("bool").internal_values(), bools.get("bool").internal_values() ) bools.set("bool", np.array([True, False, True])) - tm.assert_numpy_array_equal( - mgr.get("bool", fastpath=False), np.array([True, False, True]) - ) tm.assert_numpy_array_equal( mgr.get("bool").internal_values(), np.array([True, False, True]) ) @@ -803,9 +774,6 @@ def test_get_bool_data(self): # Check sharing bools2 = mgr.get_bool_data(copy=True) bools2.set("bool", np.array([False, True, False])) - tm.assert_numpy_array_equal( - mgr.get("bool", fastpath=False), np.array([True, False, True]) - ) tm.assert_numpy_array_equal( mgr.get("bool").internal_values(), np.array([True, False, True]) )