diff --git a/pandas/_config/config.py b/pandas/_config/config.py index 8b6116d3abd60..c283baeb9d412 100644 --- a/pandas/_config/config.py +++ b/pandas/_config/config.py @@ -550,7 +550,6 @@ def _select_options(pat: str) -> List[str]: if pat=="all", returns all registered options """ - # short-circuit for exact key if pat in _registered_options: return [pat] @@ -573,7 +572,6 @@ def _get_root(key: str) -> Tuple[Dict[str, Any], str]: def _is_deprecated(key: str) -> bool: """ Returns True if the given option has been deprecated """ - key = key.lower() return key in _deprecated_options @@ -586,7 +584,6 @@ def _get_deprecated_option(key: str): ------- DeprecatedOption (namedtuple) if key is deprecated, None otherwise """ - try: d = _deprecated_options[key] except KeyError: @@ -611,7 +608,6 @@ def _translate_key(key: str) -> str: if key id deprecated and a replacement key defined, will return the replacement key, otherwise returns `key` as - is """ - d = _get_deprecated_option(key) if d: return d.rkey or key @@ -627,7 +623,6 @@ def _warn_if_deprecated(key: str) -> bool: ------- bool - True if `key` is deprecated, False otherwise. """ - d = _get_deprecated_option(key) if d: if d.msg: @@ -649,7 +644,6 @@ def _warn_if_deprecated(key: str) -> bool: def _build_option_description(k: str) -> str: """ Builds a formatted description of a registered option and prints it """ - o = _get_registered_option(k) d = _get_deprecated_option(k) @@ -674,7 +668,6 @@ def _build_option_description(k: str) -> str: def pp_options_list(keys: Iterable[str], width=80, _print: bool = False): """ Builds a concise listing of available options, grouped by prefix """ - from textwrap import wrap from itertools import groupby @@ -738,7 +731,6 @@ def config_prefix(prefix): will register options "display.font.color", "display.font.size", set the value of "display.font.size"... and so on. """ - # Note: reset_option relies on set_option, and on key directly # it does not fit in to this monkey-patching scheme @@ -801,7 +793,6 @@ def is_instance_factory(_type) -> Callable[[Any], None]: ValueError if x is not an instance of `_type` """ - if isinstance(_type, (tuple, list)): _type = tuple(_type) type_repr = "|".join(map(str, _type)) @@ -848,7 +839,6 @@ def is_nonnegative_int(value: Optional[int]) -> None: ValueError When the value is not None or is a negative integer """ - if value is None: return diff --git a/pandas/_config/localization.py b/pandas/_config/localization.py index 0d68e78372d8a..66865e1afb952 100644 --- a/pandas/_config/localization.py +++ b/pandas/_config/localization.py @@ -61,7 +61,6 @@ def can_set_locale(lc: str, lc_var: int = locale.LC_ALL) -> bool: bool Whether the passed locale can be set """ - try: with set_locale(lc, lc_var=lc_var): pass diff --git a/pandas/_testing.py b/pandas/_testing.py index 9e71524263a18..46ed65c87e8dd 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -1508,7 +1508,6 @@ def assert_sp_array_equal( create a new BlockIndex for that array, with consolidated block indices. """ - _check_isinstance(left, right, pd.arrays.SparseArray) assert_numpy_array_equal(left.sp_values, right.sp_values, check_dtype=check_dtype) @@ -1876,7 +1875,6 @@ def makeCustomIndex( if unspecified, string labels will be generated. """ - if ndupe_l is None: ndupe_l = [1] * nlevels assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels @@ -2025,7 +2023,6 @@ def makeCustomDataframe( >> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4) """ - assert c_idx_nlevels > 0 assert r_idx_nlevels > 0 assert r_idx_type is None or ( @@ -2229,7 +2226,6 @@ def can_connect(url, error_classes=None): Return True if no IOError (unable to connect) or URLError (bad url) was raised """ - if error_classes is None: error_classes = _get_default_network_errors() @@ -2603,7 +2599,6 @@ def test_parallel(num_threads=2, kwargs_list=None): https://github.com/scikit-image/scikit-image/pull/1519 """ - assert num_threads > 0 has_kwargs_list = kwargs_list is not None if has_kwargs_list: @@ -2685,7 +2680,6 @@ def set_timezone(tz: str): ... 'EDT' """ - import os import time diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py index 05ecccc67daef..ccc970fb453c2 100644 --- a/pandas/compat/numpy/function.py +++ b/pandas/compat/numpy/function.py @@ -99,7 +99,6 @@ def validate_argmin_with_skipna(skipna, args, kwargs): 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean """ - skipna, args = process_skipna(skipna, args) validate_argmin(args, kwargs) return skipna @@ -113,7 +112,6 @@ def validate_argmax_with_skipna(skipna, args, kwargs): 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean """ - skipna, args = process_skipna(skipna, args) validate_argmax(args, kwargs) return skipna @@ -151,7 +149,6 @@ def validate_argsort_with_ascending(ascending, args, kwargs): either integer type or is None, since 'ascending' itself should be a boolean """ - if is_integer(ascending) or ascending is None: args = (ascending,) + args ascending = True @@ -173,7 +170,6 @@ def validate_clip_with_axis(axis, args, kwargs): so check if the 'axis' parameter is an instance of ndarray, since 'axis' itself should either be an integer or None """ - if isinstance(axis, ndarray): args = (axis,) + args axis = None @@ -298,7 +294,6 @@ def validate_take_with_convert(convert, args, kwargs): ndarray or 'None', so check if the 'convert' parameter is either an instance of ndarray or is None """ - if isinstance(convert, ndarray) or convert is None: args = (convert,) + args convert = True diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index 0a1a1376bfc8d..3f4acca8bce18 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -229,7 +229,6 @@ def load(fh, encoding: Optional[str] = None, is_verbose: bool = False): encoding : an optional encoding is_verbose : show exception output """ - try: fh.seek(0) if encoding is not None: diff --git a/pandas/conftest.py b/pandas/conftest.py index 7463b2b579c0c..dd329c1b00dbb 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -118,7 +118,6 @@ def ip(): Will raise a skip if IPython is not installed. """ - pytest.importorskip("IPython", minversion="6.0.0") from IPython.core.interactiveshell import InteractiveShell @@ -679,7 +678,6 @@ def any_nullable_int_dtype(request): * 'UInt64' * 'Int64' """ - return request.param diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 886b0a3c5fec1..af06a559ded69 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -85,7 +85,6 @@ def _ensure_data(values, dtype=None): values : ndarray pandas_dtype : str or dtype """ - # we check some simple dtypes first if is_object_dtype(dtype): return ensure_object(np.asarray(values)), "object" @@ -182,7 +181,6 @@ def _reconstruct_data(values, dtype, original): ------- Index for extension types, otherwise ndarray casted to dtype """ - if is_extension_array_dtype(dtype): values = dtype.construct_array_type()._from_sequence(values) elif is_bool_dtype(dtype): @@ -368,7 +366,6 @@ def unique(values): >>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')]) array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object) """ - values = _ensure_arraylike(values) if is_extension_array_dtype(values): @@ -796,7 +793,6 @@ def duplicated(values, keep="first") -> np.ndarray: ------- duplicated : ndarray """ - values, _ = _ensure_data(values) ndtype = values.dtype.name f = getattr(htable, f"duplicated_{ndtype}") diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 81e1d84880f60..70e0a129c055f 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -35,7 +35,6 @@ def frame_apply( kwds=None, ): """ construct and return a row or column based frame apply object """ - axis = obj._get_axis_number(axis) klass: Type[FrameApply] if axis == 0: @@ -144,7 +143,6 @@ def agg_axis(self) -> "Index": def get_result(self): """ compute the results """ - # dispatch to agg if is_list_like(self.f) or is_dict_like(self.f): return self.obj.aggregate(self.f, axis=self.axis, *self.args, **self.kwds) @@ -193,7 +191,6 @@ def apply_empty_result(self): we will try to apply the function to an empty series in order to see if this is a reduction function """ - # we are not asked to reduce or infer reduction # so just return a copy of the existing object if self.result_type not in ["reduce", None]: @@ -396,7 +393,6 @@ def wrap_results_for_axis( self, results: ResType, res_index: "Index" ) -> "DataFrame": """ return the results for the rows """ - result = self.obj._constructor(data=results) if not isinstance(results[0], ABCSeries): @@ -457,7 +453,6 @@ def wrap_results_for_axis( def infer_to_same_shape(self, results: ResType, res_index: "Index") -> "DataFrame": """ infer the results to the same shape as the input object """ - result = self.obj._constructor(data=results) result = result.T diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index d26ff7490e714..a0b9402fd97cc 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -695,7 +695,6 @@ def _set_categories(self, categories, fastpath=False): [a, c] Categories (2, object): [a, c] """ - if fastpath: new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered) else: @@ -1221,7 +1220,6 @@ def shape(self): ------- shape : tuple """ - return tuple([len(self._codes)]) def shift(self, periods, fill_value=None): @@ -1378,7 +1376,6 @@ def isna(self): Categorical.notna : Boolean inverse of Categorical.isna. """ - ret = self._codes == -1 return ret @@ -1928,7 +1925,6 @@ def _repr_categories_info(self) -> str: """ Returns a string representation of the footer. """ - category_strs = self._repr_categories() dtype = str(self.categories.dtype) levheader = f"Categories ({len(self.categories)}, {dtype}): " @@ -2254,7 +2250,6 @@ def unique(self): Series.unique """ - # unlike np.unique, unique1d does not sort unique_codes = unique1d(self.codes) cat = self.copy() @@ -2314,7 +2309,6 @@ def is_dtype_equal(self, other): ------- bool """ - try: return hash(self.dtype) == hash(other.dtype) except (AttributeError, TypeError): diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 03c8e48c6e699..07aa8d49338c8 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -500,7 +500,6 @@ def __getitem__(self, key): This getitem defers to the underlying array, which by-definition can only handle list-likes, slices, and integer scalars """ - is_int = lib.is_integer(key) if lib.is_scalar(key) and not is_int: raise IndexError( @@ -892,7 +891,6 @@ def _maybe_mask_results(self, result, fill_value=iNaT, convert=None): This is an internal routine. """ - if self._hasnans: if convert: result = result.astype(convert) diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index 4bfd5f5770b69..6cd3a41dd957a 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -147,7 +147,6 @@ def safe_cast(values, dtype, copy: bool): ints. """ - try: return values.astype(dtype, casting="safe", copy=copy) except TypeError: @@ -601,7 +600,6 @@ def _maybe_mask_result(self, result, mask, other, op_name): other : scalar or array-like op_name : str """ - # if we have a float operand we are by-definition # a float result # or our op is a divide diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 8383b783d90e7..8141e2c78a7e2 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -624,7 +624,6 @@ def _addsub_int_array( ------- result : PeriodArray """ - assert op in [operator.add, operator.sub] if op is operator.sub: other = -other diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 8008805ddcf87..b17a4647ffc9f 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -1503,7 +1503,6 @@ def make_sparse(arr, kind="block", fill_value=None, dtype=None, copy=False): ------- (sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar) """ - arr = com.values_from_object(arr) if arr.ndim > 1: diff --git a/pandas/core/arrays/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py index b67f2c9f52c76..eff9c03386a38 100644 --- a/pandas/core/arrays/sparse/scipy_sparse.py +++ b/pandas/core/arrays/sparse/scipy_sparse.py @@ -31,7 +31,6 @@ def _to_ijv(ss, row_levels=(0,), column_levels=(1,), sort_labels=False): def get_indexers(levels): """ Return sparse coords and dense labels for subset levels """ - # TODO: how to do this better? cleanly slice nonnull_labels given the # coord values_ilabels = [tuple(x[i] for i in levels) for x in nonnull_labels.index] @@ -90,7 +89,6 @@ def _sparse_series_to_coo(ss, row_levels=(0,), column_levels=(1,), sort_labels=F levels row_levels, column_levels as the row and column labels respectively. Returns the sparse_matrix, row and column labels. """ - import scipy.sparse if ss.index.nlevels < 2: diff --git a/pandas/core/common.py b/pandas/core/common.py index 00c7a41477017..550ce74de5357 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -337,7 +337,6 @@ def apply_if_callable(maybe_callable, obj, **kwargs): obj : NDFrame **kwargs """ - if callable(maybe_callable): return maybe_callable(obj, **kwargs) @@ -412,7 +411,6 @@ def random_state(state=None): ------- np.random.RandomState """ - if is_integer(state): return np.random.RandomState(state) elif isinstance(state, np.random.RandomState): diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index c26208d3b4465..c59952bea8dc0 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -599,7 +599,6 @@ def visit_Assign(self, node, **kwargs): might or might not exist in the resolvers """ - if len(node.targets) != 1: raise SyntaxError("can only assign a single expression") if not isinstance(node.targets[0], ast.Name): diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 9f209cccd5be6..19f151846a080 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -95,7 +95,6 @@ def _disallow_scalar_only_bool_ops(self): def prune(self, klass): def pr(left, right): """ create and return a new specialized BinOp from myself """ - if left is None: return right elif right is None: @@ -476,7 +475,6 @@ def _validate_where(w): ------ TypeError : An invalid data type was passed in for w (e.g. dict). """ - if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)): raise TypeError( "where must be passed as a string, PyTablesExpr, " @@ -574,7 +572,6 @@ def __repr__(self) -> str: def evaluate(self): """ create and return the numexpr condition and filter """ - try: self.condition = self.terms.prune(ConditionBinOp) except AttributeError: diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 6120bc92adbfc..011c09c9ca1ef 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -78,7 +78,6 @@ def maybe_convert_platform(values): """ try to do platform conversion, allow ndarray or list here """ - if isinstance(values, (list, tuple, range)): values = construct_1d_object_array_from_listlike(values) if getattr(values, "dtype", None) == np.object_: @@ -97,7 +96,6 @@ def is_nested_object(obj) -> bool: This may not be necessarily be performant. """ - if isinstance(obj, ABCSeries) and is_object_dtype(obj): if any(isinstance(v, ABCSeries) for v in obj.values): @@ -525,7 +523,6 @@ def _ensure_dtype_type(value, dtype): ------- object """ - # Start with exceptions in which we do _not_ cast to numpy types if is_extension_array_dtype(dtype): return value @@ -566,7 +563,6 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False): If False, scalar belongs to pandas extension types is inferred as object """ - dtype = np.object_ # a 1-element ndarray @@ -823,7 +819,6 @@ def astype_nansafe(arr, dtype, copy: bool = True, skipna: bool = False): ValueError The dtype was a datetime64/timedelta64 dtype, but it had no unit. """ - # dispatch on extension dtype if needed if is_extension_array_dtype(dtype): return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy) @@ -965,7 +960,6 @@ def soft_convert_objects( copy: bool = True, ): """ if we have an object dtype, try to coerce dates and/or numbers """ - validate_bool_kwarg(datetime, "datetime") validate_bool_kwarg(numeric, "numeric") validate_bool_kwarg(timedelta, "timedelta") @@ -1053,7 +1047,6 @@ def convert_dtypes( dtype new dtype """ - if convert_string or convert_integer or convert_boolean: try: inferred_dtype = lib.infer_dtype(input_array) @@ -1133,7 +1126,6 @@ def maybe_infer_to_datetimelike(value, convert_dates: bool = False): leave inferred dtype 'date' alone """ - # TODO: why not timedelta? if isinstance( value, (ABCDatetimeIndex, ABCPeriodIndex, ABCDatetimeArray, ABCPeriodArray) @@ -1373,7 +1365,6 @@ def find_common_type(types): numpy.find_common_type """ - if len(types) == 0: raise ValueError("no types given") @@ -1420,7 +1411,6 @@ def cast_scalar_to_array(shape, value, dtype=None): ndarray of shape, filled with value, of specified / inferred dtype """ - if dtype is None: dtype, fill_value = infer_dtype_from_scalar(value) else: diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index f8e14d1cbc9e9..c0420244f671e 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -92,7 +92,6 @@ def ensure_float(arr): float_arr : The original array cast to the float dtype if possible. Otherwise, the original array is returned. """ - if issubclass(arr.dtype.type, (np.integer, np.bool_)): arr = arr.astype(float) return arr @@ -132,7 +131,6 @@ def ensure_categorical(arr): cat_arr : The original array cast as a Categorical. If it already is a Categorical, we return as is. """ - if not is_categorical(arr): from pandas import Categorical @@ -325,7 +323,6 @@ def is_scipy_sparse(arr) -> bool: >>> is_scipy_sparse(pd.arrays.SparseArray([1, 2, 3])) False """ - global _is_scipy_sparse if _is_scipy_sparse is None: @@ -367,7 +364,6 @@ def is_categorical(arr) -> bool: >>> is_categorical(pd.CategoricalIndex([1, 2, 3])) True """ - return isinstance(arr, ABCCategorical) or is_categorical_dtype(arr) @@ -398,7 +394,6 @@ def is_datetime64_dtype(arr_or_dtype) -> bool: >>> is_datetime64_dtype([1, 2, 3]) False """ - return _is_dtype_type(arr_or_dtype, classes(np.datetime64)) @@ -434,7 +429,6 @@ def is_datetime64tz_dtype(arr_or_dtype) -> bool: >>> is_datetime64tz_dtype(s) True """ - if arr_or_dtype is None: return False return DatetimeTZDtype.is_dtype(arr_or_dtype) @@ -467,7 +461,6 @@ def is_timedelta64_dtype(arr_or_dtype) -> bool: >>> is_timedelta64_dtype('0 days') False """ - return _is_dtype_type(arr_or_dtype, classes(np.timedelta64)) @@ -498,7 +491,6 @@ def is_period_dtype(arr_or_dtype) -> bool: >>> is_period_dtype(pd.PeriodIndex([], freq="A")) True """ - # TODO: Consider making Period an instance of PeriodDtype if arr_or_dtype is None: return False @@ -534,7 +526,6 @@ def is_interval_dtype(arr_or_dtype) -> bool: >>> is_interval_dtype(pd.IntervalIndex([interval])) True """ - # TODO: Consider making Interval an instance of IntervalDtype if arr_or_dtype is None: return False @@ -568,7 +559,6 @@ def is_categorical_dtype(arr_or_dtype) -> bool: >>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3])) True """ - if arr_or_dtype is None: return False return CategoricalDtype.is_dtype(arr_or_dtype) @@ -602,7 +592,6 @@ def is_string_dtype(arr_or_dtype) -> bool: >>> is_string_dtype(pd.Series([1, 2])) False """ - # TODO: gh-15585: consider making the checks stricter. def condition(dtype) -> bool: return dtype.kind in ("O", "S", "U") and not is_excluded_dtype(dtype) @@ -641,7 +630,6 @@ def is_period_arraylike(arr) -> bool: >>> is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D")) True """ - if isinstance(arr, (ABCPeriodIndex, ABCPeriodArray)): return True elif isinstance(arr, (np.ndarray, ABCSeries)): @@ -673,7 +661,6 @@ def is_datetime_arraylike(arr) -> bool: >>> is_datetime_arraylike(pd.DatetimeIndex([1, 2, 3])) True """ - if isinstance(arr, ABCDatetimeIndex): return True elif isinstance(arr, (np.ndarray, ABCSeries)): @@ -711,7 +698,6 @@ def is_dtype_equal(source, target) -> bool: >>> is_dtype_equal(DatetimeTZDtype(tz="UTC"), "datetime64") False """ - try: source = _get_dtype(source) target = _get_dtype(target) @@ -770,7 +756,6 @@ def is_any_int_dtype(arr_or_dtype) -> bool: >>> is_any_int_dtype(pd.Index([1, 2.])) # float False """ - return _is_dtype_type(arr_or_dtype, classes(np.integer, np.timedelta64)) @@ -825,7 +810,6 @@ def is_integer_dtype(arr_or_dtype) -> bool: >>> is_integer_dtype(pd.Index([1, 2.])) # float False """ - return _is_dtype_type(arr_or_dtype, classes_and_not_datetimelike(np.integer)) @@ -882,7 +866,6 @@ def is_signed_integer_dtype(arr_or_dtype) -> bool: >>> is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned False """ - return _is_dtype_type(arr_or_dtype, classes_and_not_datetimelike(np.signedinteger)) @@ -982,7 +965,6 @@ def is_int64_dtype(arr_or_dtype) -> bool: >>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned False """ - return _is_dtype_type(arr_or_dtype, classes(np.int64)) @@ -1137,7 +1119,6 @@ def is_datetime_or_timedelta_dtype(arr_or_dtype) -> bool: >>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.datetime64)) True """ - return _is_dtype_type(arr_or_dtype, classes(np.datetime64, np.timedelta64)) @@ -1198,7 +1179,6 @@ def is_numeric_v_string_like(a, b): >>> is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"])) False """ - is_a_array = isinstance(a, np.ndarray) is_b_array = isinstance(b, np.ndarray) @@ -1260,7 +1240,6 @@ def is_datetimelike_v_numeric(a, b): >>> is_datetimelike_v_numeric(np.array([dt]), np.array([dt])) False """ - if not hasattr(a, "dtype"): a = np.asarray(a) if not hasattr(b, "dtype"): @@ -1311,7 +1290,6 @@ def needs_i8_conversion(arr_or_dtype) -> bool: >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True """ - if arr_or_dtype is None: return False return ( @@ -1358,7 +1336,6 @@ def is_numeric_dtype(arr_or_dtype) -> bool: >>> is_numeric_dtype(np.array([], dtype=np.timedelta64)) False """ - return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.number, np.bool_) ) @@ -1392,7 +1369,6 @@ def is_string_like_dtype(arr_or_dtype) -> bool: >>> is_string_like_dtype(pd.Series([1, 2])) False """ - return _is_dtype(arr_or_dtype, lambda dtype: dtype.kind in ("S", "U")) @@ -1638,7 +1614,6 @@ def is_complex_dtype(arr_or_dtype) -> bool: >>> is_complex_dtype(np.array([1 + 1j, 5])) True """ - return _is_dtype_type(arr_or_dtype, classes(np.complexfloating)) @@ -1657,7 +1632,6 @@ def _is_dtype(arr_or_dtype, condition) -> bool: bool """ - if arr_or_dtype is None: return False try: @@ -1686,7 +1660,6 @@ def _get_dtype(arr_or_dtype) -> DtypeObj: ------ TypeError : The passed in object is None. """ - if arr_or_dtype is None: raise TypeError("Cannot deduce dtype from null object") @@ -1717,7 +1690,6 @@ def _is_dtype_type(arr_or_dtype, condition) -> bool: ------- bool : if the condition is satisfied for the arr_or_dtype """ - if arr_or_dtype is None: return condition(type(None)) @@ -1767,7 +1739,6 @@ def infer_dtype_from_object(dtype): ------- dtype_object : The extracted numpy dtype.type-style object. """ - if isinstance(dtype, type) and issubclass(dtype, np.generic): # Type object from a dtype return dtype @@ -1827,7 +1798,6 @@ def _validate_date_like_dtype(dtype) -> None: ValueError : The dtype is an illegal date-like dtype (e.g. the the frequency provided is too specific) """ - try: typ = np.datetime_data(dtype)[0] except ValueError as e: diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index fdc2eeb34b4ed..e53eb3b4d8e71 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -38,7 +38,6 @@ def get_dtype_kinds(l): ------- a set of kinds that exist in this list of arrays """ - typs = set() for arr in l: @@ -85,7 +84,6 @@ def concat_compat(to_concat, axis: int = 0): ------- a single array, preserving the combined dtypes """ - # filter empty arrays # 1-d dtypes always are included here def is_nonempty(x) -> bool: @@ -153,7 +151,6 @@ def concat_categorical(to_concat, axis: int = 0): Categorical A single array, preserving the combined dtypes """ - # we could have object blocks and categoricals here # if we only have a single categoricals then combine everything # else its a non-compat categorical @@ -381,7 +378,6 @@ def concat_datetime(to_concat, axis=0, typs=None): ------- a single array, preserving the combined dtypes """ - if typs is None: typs = get_dtype_kinds(to_concat) @@ -466,7 +462,6 @@ def _concat_sparse(to_concat, axis=0, typs=None): ------- a single array, preserving the combined dtypes """ - from pandas.core.arrays import SparseArray fill_values = [x.fill_value for x in to_concat if isinstance(x, SparseArray)] diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 8aaebe89871b6..d93ad973ff02d 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -831,7 +831,6 @@ def __new__(cls, freq=None): ---------- freq : frequency """ - if isinstance(freq, PeriodDtype): return freq @@ -930,7 +929,6 @@ def is_dtype(cls, dtype) -> bool: Return a boolean if we if the passed type is an actual dtype that we can match (via string or type) """ - if isinstance(dtype, str): # PeriodDtype can be instantiated from freq string like "U", # but doesn't regard freq str like "U" as dtype. @@ -1139,7 +1137,6 @@ def is_dtype(cls, dtype) -> bool: Return a boolean if we if the passed type is an actual dtype that we can match (via string or type) """ - if isinstance(dtype, str): if dtype.lower().startswith("interval"): try: diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index a9cd696633273..56b880dca1241 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -65,7 +65,6 @@ def is_number(obj) -> bool: >>> pd.api.types.is_number("5") False """ - return isinstance(obj, (Number, np.number)) @@ -91,7 +90,6 @@ def _iterable_not_string(obj) -> bool: >>> _iterable_not_string(1) False """ - return isinstance(obj, abc.Iterable) and not isinstance(obj, str) @@ -124,7 +122,6 @@ def is_file_like(obj) -> bool: >>> is_file_like([1, 2, 3]) False """ - if not (hasattr(obj, "read") or hasattr(obj, "write")): return False @@ -177,7 +174,6 @@ def is_re_compilable(obj) -> bool: >>> is_re_compilable(1) False """ - try: re.compile(obj) except TypeError: @@ -215,7 +211,6 @@ def is_array_like(obj) -> bool: >>> is_array_like(("a", "b")) False """ - return is_list_like(obj) and hasattr(obj, "dtype") @@ -321,7 +316,6 @@ def is_named_tuple(obj) -> bool: >>> is_named_tuple((1, 2)) False """ - return isinstance(obj, tuple) and hasattr(obj, "_fields") @@ -386,7 +380,6 @@ def is_sequence(obj) -> bool: >>> is_sequence(iter(l)) False """ - try: iter(obj) # Can iterate over it. len(obj) # Has a length associated with it. diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 0bc754b3e8fb3..ee74b02af9516 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -430,7 +430,6 @@ def array_equivalent(left, right, strict_nan: bool = False) -> bool: ... np.array([1, 2, np.nan])) False """ - left, right = np.asarray(left), np.asarray(right) # shape compat @@ -504,7 +503,6 @@ def _infer_fill_value(val): scalar/ndarray/list-like if we are a NaT, return the correct dtyped element to provide proper block construction """ - if not is_list_like(val): val = [val] val = np.array(val, copy=False) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 0fca02f110031..99568d47b777a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5013,7 +5013,6 @@ def sort_index( sorted_obj : DataFrame or None DataFrame with sorted index if inplace=False, None otherwise. """ - # TODO: this can be combined with Series.sort_index impl as # almost identical @@ -7040,7 +7039,6 @@ def applymap(self, func) -> "DataFrame": 0 1.000000 4.494400 1 11.262736 20.857489 """ - # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): if x.empty: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index dfafb1057a543..480b03a956356 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -259,7 +259,6 @@ def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None: def _validate_dtype(self, dtype): """ validate the passed dtype """ - if dtype is not None: dtype = pandas_dtype(dtype) @@ -351,7 +350,6 @@ def _construct_axes_from_arguments( supplied; useful to distinguish when a user explicitly passes None in scenarios where None has special meaning. """ - # construct the args args = list(args) for a in self._AXIS_ORDERS: @@ -2246,7 +2244,6 @@ def to_json( "data": [{"index": "row 1", "col 1": "a", "col 2": "b"}, {"index": "row 2", "col 1": "c", "col 2": "d"}]}' """ - from pandas.io import json if date_format is None and orient == "table": @@ -3082,7 +3079,6 @@ def to_csv( >>> df.to_csv('out.zip', index=False, ... compression=compression_opts) # doctest: +SKIP """ - df = self if isinstance(self, ABCDataFrame) else self.to_frame() from pandas.io.formats.csvs import CSVFormatter @@ -3161,7 +3157,6 @@ def _maybe_update_cacher( verify_is_copy : bool, default True Provide is_copy checks. """ - cacher = getattr(self, "_cacher", None) if cacher is not None: ref = cacher[1]() @@ -3575,7 +3570,6 @@ def _check_setitem_copy(self, stacklevel=4, t="setting", force=False): df.iloc[0:5]['group'] = 'a' """ - # return early if the check is not needed if not (force or self._is_copy): return @@ -4417,7 +4411,6 @@ def _reindex_with_indexers( allow_dups: bool_t = False, ) -> FrameOrSeries: """allow_dups indicates an internal call here """ - # reindex doing multiple operations on different axes if indicated new_data = self._data for axis in sorted(reindexers.keys()): @@ -4613,7 +4606,6 @@ def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries: 4 monkey 5 parrot """ - return self.iloc[:n] def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries: @@ -4686,7 +4678,6 @@ def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries: 7 whale 8 zebra """ - if n == 0: return self.iloc[0:0] return self.iloc[-n:] @@ -4801,7 +4792,6 @@ def sample( falcon 2 2 10 fish 0 0 8 """ - if axis is None: axis = self._stat_axis_number @@ -5087,7 +5077,6 @@ def __getattr__(self, name: str): """After regular attribute access, try looking up the name This allows simpler access to columns for interactive use. """ - # Note: obj.x will always call obj.__getattribute__('x') prior to # calling obj.__getattr__('x'). @@ -5106,7 +5095,6 @@ def __setattr__(self, name: str, value) -> None: """After regular attribute access, try setting the name This allows simpler access to columns for interactive use. """ - # first try regular attribute access via __getattribute__, so that # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify # the same attribute. @@ -5209,7 +5197,6 @@ def _is_numeric_mixed_type(self): def _check_inplace_setting(self, value) -> bool_t: """ check whether we allow in-place setting with this type of value """ - if self._is_mixed_type: if not self._is_numeric_mixed_type: @@ -7916,7 +7903,6 @@ def resample( 2000-01-03 32 150 2000-01-04 36 90 """ - from pandas.core.resample import get_resampler axis = self._get_axis_number(axis) @@ -8930,7 +8916,6 @@ def tshift( attributes of the index. If neither of those attributes exist, a ValueError is thrown """ - index = self._get_axis(axis) if freq is None: freq = getattr(index, "freq", None) @@ -9919,7 +9904,6 @@ def _add_numeric_operations(cls): """ Add the operations to the cls; evaluate the doc strings again """ - axis_descr, name, name2 = _doc_parms(cls) cls.any = _make_logical_function( @@ -10157,7 +10141,6 @@ def _add_series_or_dataframe_operations(cls): Add the series or dataframe only operations to the cls; evaluate the doc strings again. """ - from pandas.core.window import EWM, Expanding, Rolling, Window @Appender(Rolling.__doc__) @@ -10271,7 +10254,6 @@ def _find_valid_index(self, how: str): ------- idx_first_valid : type of index """ - idxpos = find_valid_index(self._values, how) if idxpos is None: return None diff --git a/pandas/core/groupby/categorical.py b/pandas/core/groupby/categorical.py index 399ed9ddc9ba1..c71ebee397bbd 100644 --- a/pandas/core/groupby/categorical.py +++ b/pandas/core/groupby/categorical.py @@ -41,7 +41,6 @@ def recode_for_groupby(c: Categorical, sort: bool, observed: bool): Categorical or None If we are observed, return the original categorical, otherwise None """ - # we only care about observed values if observed: unique_codes = unique1d(c.codes) @@ -90,7 +89,6 @@ def recode_from_groupby(c: Categorical, sort: bool, ci): ------- CategoricalIndex """ - # we re-order to the original category orderings if sort: return ci.set_categories(c.categories) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index f194c774cf329..37b6429167646 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1571,7 +1571,6 @@ def filter(self, func, dropna=True, *args, **kwargs): 3 bar 4 1.0 5 bar 6 9.0 """ - indices = [] obj = self._selected_obj @@ -1626,7 +1625,6 @@ def _gotitem(self, key, ndim: int, subset=None): subset : object, default None subset to act on """ - if ndim == 2: if subset is None: subset = self.obj @@ -1844,7 +1842,6 @@ def nunique(self, dropna: bool = True): 4 ham 5 x 5 ham 5 y """ - obj = self._selected_obj def groupby_series(obj, col=None): diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 153bf386d4f33..426b3b47d9530 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1174,7 +1174,6 @@ def count(self): Series or DataFrame Count of values within each group. """ - # defined here for API doc raise NotImplementedError @@ -1277,7 +1276,6 @@ def std(self, ddof: int = 1): Series or DataFrame Standard deviation of values within each group. """ - # TODO: implement at Cython level? return np.sqrt(self.var(ddof=ddof)) @@ -1458,7 +1456,6 @@ def ohlc(self) -> DataFrame: DataFrame Open, high, low and close values within each group. """ - return self._apply_to_column_groupbys(lambda x: x._cython_agg_general("ohlc")) @Appender(DataFrame.describe.__doc__) @@ -1764,7 +1761,6 @@ def nth(self, n: Union[int, List[int]], dropna: Optional[str] = None) -> DataFra 1 1 2.0 4 2 5.0 """ - valid_containers = (set, list, tuple) if not isinstance(n, (valid_containers, int)): raise TypeError("n needs to be an int or a list/set/tuple of ints") @@ -2034,7 +2030,6 @@ def ngroup(self, ascending: bool = True): 5 0 dtype: int64 """ - with _group_selection_context(self): index = self._selected_obj.index result = Series(self.grouper.group_info[0], index) @@ -2095,7 +2090,6 @@ def cumcount(self, ascending: bool = True): 5 0 dtype: int64 """ - with _group_selection_context(self): index = self._selected_obj.index cumcounts = self._cumcount_array(ascending=ascending) @@ -2348,7 +2342,6 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None): Series or DataFrame Object shifted within each group. """ - if freq is not None or axis != 0 or not isna(fill_value): return self.apply(lambda x: x.shift(periods, freq, axis, fill_value)) diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index f0c6eedf5cee4..8a42a8fa297cd 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -130,7 +130,6 @@ def _get_grouper(self, obj, validate: bool = True): ------- a tuple of binner, grouper, obj (possibly sorted) """ - self._set_grouper(obj) self.grouper, _, self.obj = get_grouper( self.obj, diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 4e593ce543ea6..63087672d1365 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -433,7 +433,6 @@ def _cython_operation( Names is only useful when dealing with 2D results, like ohlc (see self._name_functions). """ - assert kind in ["transform", "aggregate"] orig_values = values @@ -748,7 +747,6 @@ def __init__( @cache_readonly def groups(self): """ dict {group name -> group labels} """ - # this is mainly for compat # GH 3881 result = { diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 719bf13cbd313..f3bae63aa7e03 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -912,7 +912,6 @@ def _format_data(self, name=None) -> str_t: """ Return the formatted data as a unicode string. """ - # do we want to justify (only do so for non-objects) is_justify = True @@ -1003,7 +1002,6 @@ def to_native_types(self, slicer=None, **kwargs): numpy.ndarray Formatted values. """ - values = self if slicer is not None: values = values[slicer] @@ -1092,7 +1090,6 @@ def to_series(self, index=None, name=None): Series The dtype will be based on the type of the Index values. """ - from pandas import Series if index is None: @@ -1153,7 +1150,6 @@ def to_frame(self, index: bool = True, name=None): 1 Bear 2 Cow """ - from pandas import DataFrame if name is None: @@ -1294,7 +1290,6 @@ def set_names(self, names, level=None, inplace: bool = False): ( 'cobra', 2019)], names=['species', 'year']) """ - if level is not None and not isinstance(self, ABCMultiIndex): raise ValueError("Level must be None for non-MultiIndex") @@ -2548,7 +2543,6 @@ def _union(self, other, sort): ------- Index """ - if not len(other) or self.equals(other): return self._get_reconciled_name_object(other) @@ -3306,7 +3300,6 @@ def _can_reindex(self, indexer): ------ ValueError if its a duplicate axis """ - # trying to reindex on an axis with duplicates if not self.is_unique and len(indexer): raise ValueError("cannot reindex from a duplicate axis") @@ -3391,7 +3384,6 @@ def _reindex_non_unique(self, target): Indices of output values in original index. """ - target = ensure_index(target) indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 @@ -4182,7 +4174,6 @@ def append(self, other): ------- appended : Index """ - to_concat = [self] if isinstance(other, (list, tuple)): @@ -4725,7 +4716,6 @@ def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]: dict {group name -> group labels} """ - # TODO: if we are a MultiIndex, we can do better # that converting to tuples if isinstance(values, ABCMultiIndex): @@ -4757,7 +4747,6 @@ def map(self, mapper, na_action=None): If the function returns a tuple with more than one element a MultiIndex will be returned. """ - from pandas.core.indexes.multi import MultiIndex new_values = super()._map_values(mapper, na_action=na_action) @@ -4923,7 +4912,6 @@ def _maybe_cast_indexer(self, key): If we have a float key and are not a floating index, then try to cast to an int if equivalent. """ - if not self.is_floating(): return com.cast_scalar_indexer(key) return key @@ -5740,7 +5728,6 @@ def _try_convert_to_int_array( ------ ValueError if the conversion was not successful. """ - if not is_unsigned_integer_dtype(dtype): # skip int64 conversion attempt if uint-like dtype is passed, as # this could return Int64Index when UInt64Index is what's desired diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 7373f41daefa4..bb62d500311df 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -215,7 +215,6 @@ def _create_from_codes(self, codes, dtype=None, name=None): ------- CategoricalIndex """ - if dtype is None: dtype = self.dtype if name is None: diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 941b6c876bb36..d505778d18c52 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -386,7 +386,6 @@ def _convert_scalar_indexer(self, key, kind: str): key : label of the slice bound kind : {'loc', 'getitem'} """ - assert kind in ["loc", "getitem"] if not is_scalar(key): @@ -556,7 +555,6 @@ def _concat_same_dtype(self, to_concat, name): """ Concatenate to_concat which has the same class. """ - new_data = type(self._data)._concat_same_type(to_concat) return self._simple_new(new_data, name=name) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index b67d0dcea0ac6..e303e487b1a7d 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -939,7 +939,6 @@ def date_range( DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq='D') """ - if freq is None and com.any_none(periods, start, end): freq = "D" diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index 04b4b275bf90a..daccb35864e98 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -39,7 +39,6 @@ def inherit_from_data(name: str, delegate, cache: bool = False, wrap: bool = Fal ------- attribute, method, property, or cache_readonly """ - attr = getattr(delegate, name) if isinstance(attr, property): diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 9c4cd6cf72d35..6ea4250e4acf4 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -550,7 +550,6 @@ def _can_reindex(self, indexer: np.ndarray) -> None: ------ ValueError if its a duplicate axis """ - # trying to reindex on an axis with duplicates if self.is_overlapping and len(indexer): raise ValueError("cannot reindex from an overlapping axis") diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index ac151daac951a..e560cdb150a1b 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -139,7 +139,6 @@ def _codes_to_ints(self, codes): int, or 1-dimensional array of dtype object Integer(s) representing one combination (each). """ - # Shift the representation of each level by the pre-calculated number # of bits. Since this can overflow uint64, first make sure we are # working with Python integers: @@ -1115,7 +1114,6 @@ def _nbytes(self, deep: bool = False) -> int: *this is in internal routine* """ - # for implementations with no useful getsizeof (PyPy) objsize = 24 @@ -1405,7 +1403,6 @@ def is_monotonic_increasing(self) -> bool: return if the index is monotonic increasing (only equal or increasing) values. """ - if all(x.is_monotonic for x in self.levels): # If each level is sorted, we can operate on the codes directly. GH27495 return libalgos.is_lexsorted( @@ -1466,7 +1463,6 @@ def _hashed_indexing_key(self, key): ----- we need to stringify if we have mixed levels """ - if not isinstance(key, tuple): return hash_tuples(key) @@ -1526,7 +1522,6 @@ def _get_level_values(self, level, unique=False): ------- values : ndarray """ - lev = self.levels[level] level_codes = self.codes[level] name = self._names[level] @@ -1609,7 +1604,6 @@ def to_frame(self, index=True, name=None): -------- DataFrame """ - from pandas import DataFrame if name is not None: @@ -1736,7 +1730,6 @@ def _sort_levels_monotonic(self): ('b', 'bb')], ) """ - if self.is_lexsorted() and self.is_monotonic: return self @@ -1805,7 +1798,6 @@ def remove_unused_levels(self): >>> mi2.levels FrozenList([[1], ['a', 'b']]) """ - new_levels = [] new_codes = [] @@ -1870,7 +1862,6 @@ def __reduce__(self): def __setstate__(self, state): """Necessary for making this object picklable""" - if isinstance(state, dict): levels = state.get("levels") codes = state.get("codes") @@ -2486,7 +2477,6 @@ def get_slice_bound( MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. """ - if not isinstance(label, tuple): label = (label,) return self._partial_tup_index(label, side=side) @@ -2596,7 +2586,6 @@ def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int: -------- Index.get_loc : The get_loc method for (single-level) index. """ - if is_scalar(key) and isna(key): return -1 else: @@ -2751,7 +2740,6 @@ def get_loc_level(self, key, level=0, drop_level: bool = True): >>> mi.get_loc_level(['b', 'e']) (1, None) """ - # different name to distinguish from maybe_droplevels def maybe_mi_droplevels(indexer, levels, drop_level: bool): if not drop_level: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 536aa53c95fba..8e0f96a1dac7b 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -462,7 +462,6 @@ def split_and_operate(self, mask, f, inplace: bool): ------- list of blocks """ - if mask is None: mask = np.broadcast_to(True, shape=self.shape) @@ -519,7 +518,6 @@ def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"] def downcast(self, dtypes=None): """ try to downcast each item to the dict of dtypes if present """ - # turn it off completely if dtypes is False: return self @@ -663,7 +661,6 @@ def convert( of the block (if copy = True) by definition we are not an ObjectBlock here! """ - return self.copy() if copy else self def _can_hold_element(self, element: Any) -> bool: @@ -709,7 +706,6 @@ def replace( blocks here this is just a call to putmask. regex is not used here. It is used in ObjectBlocks. It is here for API compatibility. """ - inplace = validate_bool_kwarg(inplace, "inplace") original_to_replace = to_replace @@ -945,7 +941,6 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False) ------- a list of new blocks, the result of the putmask """ - new_values = self.values if inplace else self.values.copy() new = getattr(new, "values", new) @@ -1055,7 +1050,6 @@ def coerce_to_target_dtype(self, other): we can also safely try to coerce to the same dtype and will receive the same block """ - # if we cannot then coerce to object dtype, _ = infer_dtype_from(other, pandas_dtype=True) @@ -1188,7 +1182,6 @@ def _interpolate_with_fill( downcast=None, ): """ fillna but using the interpolate machinery """ - inplace = validate_bool_kwarg(inplace, "inplace") # if we are coercing, then don't force the conversion @@ -1232,7 +1225,6 @@ def _interpolate( **kwargs, ): """ interpolate using scipy wrappers """ - inplace = validate_bool_kwarg(inplace, "inplace") data = self.values if inplace else self.values.copy() @@ -1280,7 +1272,6 @@ def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): Take values according to indexer and return them as a block.bb """ - # algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock # so need to preserve types # sparse is treated like an ndarray, but needs .get_values() shaping @@ -1319,7 +1310,6 @@ def diff(self, n: int, axis: int = 1) -> List["Block"]: def shift(self, periods, axis=0, fill_value=None): """ shift the block by periods, possibly upcast """ - # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also new_values, fill_value = maybe_upcast(self.values, fill_value) @@ -1579,7 +1569,6 @@ def _replace_coerce( ------- A new block if there is anything to replace or the original block. """ - if mask.any(): if not regex: self = self.coerce_to_target_dtype(value) @@ -1861,7 +1850,6 @@ def _can_hold_element(self, element: Any) -> bool: def _slice(self, slicer): """ return a slice of my values """ - # slice the category # return same dims as we currently have @@ -2064,7 +2052,6 @@ def to_native_types( **kwargs, ): """ convert to our native types format, slicing if desired """ - values = self.values if slicer is not None: values = values[:, slicer] @@ -2251,7 +2238,6 @@ def to_native_types( self, slicer=None, na_rep=None, date_format=None, quoting=None, **kwargs ): """ convert to our native types format, slicing if desired """ - values = self.values i8values = self.values.view("i8") @@ -2529,7 +2515,6 @@ def should_store(self, value): def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ - values = self.values if slicer is not None: values = values[:, slicer] @@ -2622,7 +2607,6 @@ def convert( can return multiple blocks! """ - # operate column-by-column def f(mask, val, idx): shape = val.shape @@ -2924,7 +2908,6 @@ def to_dense(self): def to_native_types(self, slicer=None, na_rep="", quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ - values = self.values if slicer is not None: # Categorical is always one dimension @@ -3060,7 +3043,6 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None): def _extend_blocks(result, blocks=None): """ return a new extended blocks, given the result """ - if blocks is None: blocks = [] if isinstance(result, list): @@ -3156,7 +3138,6 @@ def _putmask_smart(v, mask, n): -------- ndarray.putmask """ - # we cannot use np.asarray() here as we cannot have conversions # that numpy does when numeric are mixed with strings diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index c75373b82305c..fdb57562e46ad 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -409,7 +409,6 @@ def _trim_join_unit(join_unit, length): Extra items that didn't fit are returned as a separate block. """ - if 0 not in join_unit.indexers: extra_indexers = join_unit.indexers diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 798386825d802..9dd4312a39525 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -78,7 +78,6 @@ def masked_rec_array_to_mgr(data, index, columns, dtype, copy: bool): """ Extract from a masked rec array and create the manager. """ - # essentially process a record array then fill it fill_value = data.fill_value fdata = ma.getdata(data) @@ -555,7 +554,6 @@ def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None): tuple arrays, columns """ - if columns is None: gen = (list(x.keys()) for x in data) sort = not any(isinstance(d, dict) for d in data) @@ -603,7 +601,6 @@ def sanitize_index(data, index: Index): Sanitize an index type to return an ndarray of the underlying, pass through a non-Index. """ - if len(data) != len(index): raise ValueError("Length of values does not match length of index") diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 08ae0b02169d4..7f8fdc886313e 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -357,7 +357,6 @@ def apply(self, f, filter=None, **kwargs): ------- BlockManager """ - result_blocks = [] # filter kwarg is used in replace-* family of methods @@ -453,7 +452,6 @@ def quantile( ------- Block Manager (new object) """ - # Series dispatches to DataFrame for quantile, which allows us to # simplify some of the code here and in the blocks assert self.ndim >= 2 @@ -569,7 +567,6 @@ def replace(self, value, **kwargs): def replace_list(self, src_list, dest_list, inplace=False, regex=False): """ do a list replace """ - inplace = validate_bool_kwarg(inplace, "inplace") # figure out our mask a-priori to avoid repeated replacements @@ -1246,7 +1243,6 @@ def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): ------- new_blocks : list of Block """ - allow_fill = fill_tuple is not None sl_type, slobj, sllen = _preprocess_slice_or_indexer( @@ -1777,7 +1773,6 @@ def _simple_blockify(tuples, dtype): def _multi_blockify(tuples, dtype=None): """ return an array of blocks that potentially have different dtypes """ - # group by dtype grouper = itertools.groupby(tuples, lambda x: x[2].dtype) @@ -1843,7 +1838,6 @@ def _consolidate(blocks): """ Merge blocks having same dtype, exclude non-consolidating blocks """ - # sort by _can_consolidate, dtype gkey = lambda x: x._consolidate_key grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 2bf2be082f639..422afd061762b 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -224,7 +224,6 @@ def _maybe_get_mask( ------- Optional[np.ndarray] """ - if mask is None: if is_bool_dtype(values.dtype) or is_integer_dtype(values.dtype): # Boolean data cannot contain nulls, so signal via mask being None @@ -279,7 +278,6 @@ def _get_values( fill_value : Any fill value used """ - # In _get_values is only called from within nanops, and in all cases # with scalar fill_value. This guarantee is important for the # maybe_upcast_putmask call below @@ -338,7 +336,6 @@ def _na_ok_dtype(dtype) -> bool: def _wrap_results(result, dtype: Dtype, fill_value=None): """ wrap our results if needed """ - if is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): if fill_value is None: # GH#24293 @@ -833,7 +830,6 @@ def nansem( >>> nanops.nansem(s) 0.5773502691896258 """ - # This checks if non-numeric-like data is passed with numeric_only=False # and raises a TypeError otherwise nanvar(values, axis, skipna, ddof=ddof, mask=mask) diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 0312c11a6d590..f3c1a609d50a1 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -254,7 +254,6 @@ def _get_opstr(op): ------- op_str : string or None """ - return { operator.add: "+", radd: "+", @@ -430,7 +429,6 @@ def column_op(a, b): def _align_method_SERIES(left, right, align_asobject=False): """ align lhs and rhs Series """ - # ToDo: Different from _align_method_FRAME, list, tuple and ndarray # are not coerced here # because Series has inconsistencies described in #13637 diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index 3302ed9c219e6..5d53856729d0c 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -175,7 +175,6 @@ def arithmetic_op( ndarrray or ExtensionArray Or a 2-tuple of these in the case of divmod or rdivmod. """ - from pandas.core.ops import maybe_upcast_for_op # NB: We assume that extract_array has already been called @@ -218,7 +217,6 @@ def comparison_op( ------- ndarrray or ExtensionArray """ - # NB: We assume extract_array has already been called on left and right lvalues = left rvalues = right @@ -322,7 +320,6 @@ def logical_op( ------- ndarrray or ExtensionArray """ - fill_int = lambda x: x def fill_bool(x, left=None): diff --git a/pandas/core/ops/common.py b/pandas/core/ops/common.py index f4b16cf4a0cf2..5c83591b0e71e 100644 --- a/pandas/core/ops/common.py +++ b/pandas/core/ops/common.py @@ -43,7 +43,6 @@ def _unpack_zerodim_and_defer(method, name: str): ------- method """ - is_cmp = name.strip("__") in {"eq", "ne", "lt", "le", "gt", "ge"} @wraps(method) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 94ff1f0056663..98910a9baf962 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -183,7 +183,6 @@ def _get_binner(self): Create the BinGrouper, assume that self.set_grouper(obj) has already been called. """ - binner, bins, binlabels = self._get_binner_for_time() assert len(bins) == len(binlabels) bin_grouper = BinGrouper(bins, binlabels, indexer=self.groupby.indexer) @@ -345,7 +344,6 @@ def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs): """ Re-evaluate the obj with a groupby aggregation. """ - if grouper is None: self._set_binner() grouper = self.grouper @@ -397,7 +395,6 @@ def _apply_loffset(self, result): result : Series or DataFrame the result of resample """ - needs_offset = ( isinstance(self.loffset, (DateOffset, timedelta, np.timedelta64)) and isinstance(result.index, DatetimeIndex) @@ -1158,7 +1155,6 @@ def _downsample(self, how, **kwargs): how : string / cython mapped function **kwargs : kw args passed to how function """ - # we may need to actually resample as if we are timestamps if self.kind == "timestamp": return super()._downsample(how, **kwargs) @@ -1202,7 +1198,6 @@ def _upsample(self, method, limit=None, fill_value=None): .fillna """ - # we may need to actually resample as if we are timestamps if self.kind == "timestamp": return super()._upsample(method, limit=limit, fill_value=fill_value) @@ -1277,7 +1272,6 @@ def get_resampler_for_grouping( """ Return our appropriate resampler when grouping as well. """ - # .resample uses 'on' similar to how .groupby uses 'key' kwargs["key"] = kwargs.pop("on", None) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 480c5279ad3f6..49ac1b6cfa52b 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -108,7 +108,6 @@ def _groupby_and_merge( check_duplicates: bool, default True should we check & clean duplicates """ - pieces = [] if not isinstance(by, (list, tuple)): by = [by] diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index b047e163c5565..b04e4e1ac4d48 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -567,7 +567,6 @@ def crosstab( b 0 1 0 c 0 0 0 """ - index = com.maybe_make_list(index) columns = com.maybe_make_list(columns) diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index a18b45a077be0..e499158a13b0c 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -513,7 +513,6 @@ def _format_labels( bins, precision: int, right: bool = True, include_lowest: bool = False, dtype=None ): """ based on the dtype, return our labels """ - closed = "right" if right else "left" if is_datetime64tz_dtype(dtype): @@ -544,7 +543,6 @@ def _preprocess_for_cut(x): input to array, strip the index information and store it separately """ - # Check that the passed array is a Pandas or Numpy object # We don't want to strip away a Pandas data-type here (e.g. datetimetz) ndim = getattr(x, "ndim", None) diff --git a/pandas/core/series.py b/pandas/core/series.py index 7d74d32bf5e14..1f2ea9990c90f 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -396,7 +396,6 @@ def _set_axis(self, axis, labels, fastpath: bool = False) -> None: """ Override generic, we want to set the _typ here. """ - if not fastpath: labels = ensure_index(labels) @@ -540,7 +539,6 @@ def _internal_get_values(self): numpy.ndarray Data of the Series. """ - return self._data.get_values() # ops @@ -1402,7 +1400,6 @@ def to_string( str or None String representation of Series if ``buf=None``, otherwise None. """ - formatter = fmt.SeriesFormatter( self, name=name, @@ -2171,7 +2168,6 @@ def quantile(self, q=0.5, interpolation="linear"): 0.75 3.25 dtype: float64 """ - validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry @@ -2583,7 +2579,6 @@ def _binop(self, other, func, level=None, fill_value=None): ------- Series """ - if not isinstance(other, Series): raise AssertionError("Other operand must be Series") diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 51c154aa47518..5496eca46b992 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -376,7 +376,6 @@ def compress_group_index(group_index, sort: bool = True): space can be huge, so this function compresses it, by computing offsets (comp_ids) into the list of unique labels (obs_group_ids). """ - size_hint = min(len(group_index), hashtable._SIZE_HINT_LIMIT) table = hashtable.Int64HashTable(size_hint) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index a4648186477d6..3a7e3fdab5dca 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -687,7 +687,6 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): 2 NaN dtype: object """ - # Check whether repl is valid (GH 13438, GH 15055) if not (isinstance(repl, str) or callable(repl)): raise TypeError("repl must be a string or callable") @@ -1085,7 +1084,6 @@ def str_extractall(arr, pat, flags=0): B 0 b 1 C 0 NaN 1 """ - regex = re.compile(pat, flags=flags) # the regex must contain capture groups. if regex.groups == 0: @@ -1358,7 +1356,6 @@ def str_find(arr, sub, start=0, end=None, side="left"): Series or Index Indexes where substring is found. """ - if not isinstance(sub, str): msg = f"expected a string object, not {type(sub).__name__}" raise TypeError(msg) @@ -1930,7 +1927,6 @@ def forbid_nonstring_types(forbidden, name=None): TypeError If the inferred type of the underlying data is in `forbidden`. """ - # deal with None forbidden = [] if forbidden is None else forbidden diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index 3f0cfce39f6f9..1d933cf431b4b 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -111,7 +111,6 @@ def to_timedelta(arg, unit="ns", errors="raise"): def _coerce_scalar_to_timedelta_type(r, unit="ns", errors="raise"): """Convert string 'r' to a timedelta object.""" - try: result = Timedelta(r, unit) except ValueError: @@ -128,7 +127,6 @@ def _coerce_scalar_to_timedelta_type(r, unit="ns", errors="raise"): def _convert_listlike(arg, unit="ns", errors="raise", name=None): """Convert a list of objects to a timedelta index object.""" - if isinstance(arg, (list, tuple)) or not hasattr(arg, "dtype"): # This is needed only to ensure that in the case where we end up # returning arg (errors == "ignore"), and where the input is a diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index 3366f10b92604..160d328ec16ec 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -269,7 +269,6 @@ def hash_array( ------- 1d uint64 numpy array of hash values, same length as the vals """ - if not hasattr(vals, "dtype"): raise TypeError("must pass a ndarray-like") dtype = vals.dtype @@ -340,7 +339,6 @@ def _hash_scalar( ------- 1d uint64 numpy array of hash value, of length 1 """ - if isna(val): # this is to be consistent with the _hash_categorical implementation return np.array([np.iinfo(np.uint64).max], dtype="u8") diff --git a/pandas/core/window/numba_.py b/pandas/core/window/numba_.py index 127957943d2ff..d6e8194c861fa 100644 --- a/pandas/core/window/numba_.py +++ b/pandas/core/window/numba_.py @@ -110,7 +110,6 @@ def generate_numba_apply_func( ------- Numba function """ - if engine_kwargs is None: engine_kwargs = {} diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 5c18796deb07a..f29cd428b7bad 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -149,7 +149,6 @@ def _create_blocks(self): """ Split data into blocks & return conformed data. """ - obj = self._selected_obj # filter out the on from the object @@ -172,7 +171,6 @@ def _gotitem(self, key, ndim, subset=None): subset : object, default None subset to act on """ - # create a new object to prevent aliasing if subset is None: subset = self.obj @@ -238,7 +236,6 @@ def __repr__(self) -> str: """ Provide a nice str repr of our rolling object. """ - attrs_list = ( f"{attr_name}={getattr(self, attr_name)}" for attr_name in self._attributes @@ -284,7 +281,6 @@ def _wrap_result(self, result, block=None, obj=None): """ Wrap a single result. """ - if obj is None: obj = self._selected_obj index = obj.index @@ -310,7 +306,6 @@ def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries: obj : conformed data (may be resampled) exclude: list of columns to exclude, default to None """ - from pandas import Series, concat final = [] @@ -1021,7 +1016,6 @@ def _get_window( window : ndarray the window, weights """ - window = self.window if isinstance(window, (list, tuple, np.ndarray)): return com.asarray_tuplesafe(window).astype(float) diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py index 6d76d7de407b1..f4bd14ad5c679 100644 --- a/pandas/io/clipboard/__init__.py +++ b/pandas/io/clipboard/__init__.py @@ -500,7 +500,6 @@ def determine_clipboard(): Determine the OS/platform and set the copy() and paste() functions accordingly. """ - global Foundation, AppKit, qtpy, PyQt4, PyQt5 # Setup for the CYGWIN platform: diff --git a/pandas/io/common.py b/pandas/io/common.py index c4772895afd1e..beb6c9d97aff3 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -296,7 +296,6 @@ def infer_compression( ------ ValueError on invalid compression specified. """ - # No compression has been explicitly specified if compression is None: return None diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index be52523e486af..ab2d97e6026d1 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -51,7 +51,6 @@ def _convert_to_style(cls, style_dict): ---------- style_dict : style dictionary to convert """ - from openpyxl.style import Style xls_style = Style() @@ -92,7 +91,6 @@ def _convert_to_style_kwargs(cls, style_dict): value has been replaced with a native openpyxl style object of the appropriate class. """ - _style_key_map = {"borders": "border"} style_kwargs = {} @@ -128,7 +126,6 @@ def _convert_to_color(cls, color_spec): ------- color : openpyxl.styles.Color """ - from openpyxl.styles import Color if isinstance(color_spec, str): @@ -164,7 +161,6 @@ def _convert_to_font(cls, font_dict): ------- font : openpyxl.styles.Font """ - from openpyxl.styles import Font _font_key_map = { @@ -202,7 +198,6 @@ def _convert_to_stop(cls, stop_seq): ------- stop : list of openpyxl.styles.Color """ - return map(cls._convert_to_color, stop_seq) @classmethod @@ -230,7 +225,6 @@ def _convert_to_fill(cls, fill_dict): ------- fill : openpyxl.styles.Fill """ - from openpyxl.styles import PatternFill, GradientFill _pattern_fill_key_map = { @@ -286,7 +280,6 @@ def _convert_to_side(cls, side_spec): ------- side : openpyxl.styles.Side """ - from openpyxl.styles import Side _side_key_map = {"border_style": "style"} @@ -329,7 +322,6 @@ def _convert_to_border(cls, border_dict): ------- border : openpyxl.styles.Border """ - from openpyxl.styles import Border _border_key_map = {"diagonalup": "diagonalUp", "diagonaldown": "diagonalDown"} @@ -365,7 +357,6 @@ def _convert_to_alignment(cls, alignment_dict): ------- alignment : openpyxl.styles.Alignment """ - from openpyxl.styles import Alignment return Alignment(**alignment_dict) @@ -399,7 +390,6 @@ def _convert_to_protection(cls, protection_dict): Returns ------- """ - from openpyxl.styles import Protection return Protection(**protection_dict) diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py index e7a132b73e076..16f800a6de2c9 100644 --- a/pandas/io/excel/_xlrd.py +++ b/pandas/io/excel/_xlrd.py @@ -60,7 +60,6 @@ def _parse_cell(cell_contents, cell_typ): """ converts the contents of the cell into a pandas appropriate object """ - if cell_typ == XL_CELL_DATE: # Use the newer xlrd datetime handling. diff --git a/pandas/io/excel/_xlsxwriter.py b/pandas/io/excel/_xlsxwriter.py index 6d9ff9be5249a..85a1bb031f457 100644 --- a/pandas/io/excel/_xlsxwriter.py +++ b/pandas/io/excel/_xlsxwriter.py @@ -85,7 +85,6 @@ def convert(cls, style_dict, num_format_str=None): style_dict : style dictionary to convert num_format_str : optional number format string """ - # Create a XlsxWriter format object. props = {} @@ -191,7 +190,6 @@ def save(self): """ Save workbook to disk. """ - return self.book.close() def write_cells( diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 35a6870c1194b..55d534f975b68 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -932,7 +932,6 @@ def to_latex( """ Render a DataFrame to a LaTeX tabular/longtable environment output. """ - from pandas.io.formats.latex import LatexFormatter return LatexFormatter( @@ -1135,7 +1134,6 @@ def format_array( ------- List[str] """ - fmt_klass: Type[GenericArrayFormatter] if is_datetime64_dtype(values.dtype): fmt_klass = Datetime64Formatter @@ -1296,9 +1294,7 @@ def _value_formatter( float_format: Optional[float_format_type] = None, threshold: Optional[Union[float, int]] = None, ) -> Callable: - """Returns a function to be applied on each value to format it - """ - + """Returns a function to be applied on each value to format it""" # the float_format parameter supersedes self.float_format if float_format is None: float_format = self.float_format @@ -1346,7 +1342,6 @@ def get_result_as_array(self) -> np.ndarray: Returns the float values converted into strings using the parameters given at initialisation, as a numpy array """ - if self.formatter is not None: return np.array([self.formatter(x) for x in self.values]) @@ -1461,7 +1456,6 @@ def __init__( def _format_strings(self) -> List[str]: """ we by definition have DO NOT have a TZ """ - values = self.values if not isinstance(values, DatetimeIndex): @@ -1541,7 +1535,6 @@ def format_percentiles( >>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999]) ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%'] """ - percentiles = np.asarray(percentiles) # It checks for np.NaN as well @@ -1642,7 +1635,6 @@ def _get_format_datetime64_from_values( values: Union[np.ndarray, DatetimeArray, DatetimeIndex], date_format: Optional[str] ) -> Optional[str]: """ given values and a date_format, return a string format """ - if isinstance(values, np.ndarray) and values.ndim > 1: # We don't actually care about the order of values, and DatetimeIndex # only accepts 1D values @@ -1657,7 +1649,6 @@ def _get_format_datetime64_from_values( class Datetime64TZFormatter(Datetime64Formatter): def _format_strings(self) -> List[str]: """ we by definition have a TZ """ - values = self.values.astype(object) is_dates_only = _is_dates_only(values) formatter = self.formatter or _get_format_datetime64( @@ -1698,7 +1689,6 @@ def _get_format_timedelta64( If box, then show the return in quotes """ - values_int = values.astype(np.int64) consider_values = values_int != iNaT @@ -1913,7 +1903,6 @@ def set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> Non See also EngFormatter. """ - set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix)) set_option("display.column_space", max(12, accuracy + 9)) diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py index 8ab56437d5c05..935762598f78a 100644 --- a/pandas/io/formats/latex.py +++ b/pandas/io/formats/latex.py @@ -56,7 +56,6 @@ def write_result(self, buf: IO[str]) -> None: Render a DataFrame to a LaTeX tabular, longtable, or table/tabular environment output. """ - # string representation of the columns if len(self.frame.columns) == 0 or len(self.frame.index) == 0: info_line = "Empty {name}\nColumns: {col}\nIndex: {idx}".format( diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 565752e269d79..eca5a3fb18e60 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -802,7 +802,6 @@ def where( -------- Styler.applymap """ - if other is None: other = "" diff --git a/pandas/io/html.py b/pandas/io/html.py index c676bfb1f0c74..ee8e96b4b3344 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -395,7 +395,6 @@ def _parse_thead_tbody_tfoot(self, table_html): - Move rows from bottom of body to footer only if all elements inside row are