Skip to content

⬆️ UPGRADE: Autoupdate pre-commit config #39521

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ repos:
hooks:
- id: isort
- repo: https://github.com/asottile/pyupgrade
rev: v2.7.4
rev: v2.8.0
hooks:
- id: pyupgrade
args: [--py37-plus]
Expand Down Expand Up @@ -192,6 +192,6 @@ repos:
files: ^pandas/
exclude: ^pandas/tests/
- repo: https://github.com/MarcoGorelli/no-string-hints
rev: v0.1.6
rev: v0.1.7
hooks:
- id: no-string-hints
32 changes: 16 additions & 16 deletions pandas/_testing/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,24 +107,24 @@
_N = 30
_K = 4

UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
UNSIGNED_INT_DTYPES: list[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: list[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES

FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
FLOAT_DTYPES: list[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: list[Dtype] = [str, "str", "U"]

DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"]

BOOL_DTYPES: List[Dtype] = [bool, "bool"]
BYTES_DTYPES: List[Dtype] = [bytes, "bytes"]
OBJECT_DTYPES: List[Dtype] = [object, "object"]
BOOL_DTYPES: list[Dtype] = [bool, "bool"]
BYTES_DTYPES: list[Dtype] = [bytes, "bytes"]
OBJECT_DTYPES: list[Dtype] = [object, "object"]

ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
Expand Down Expand Up @@ -417,7 +417,7 @@ def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]:
----------
k: length of each of the index instances
"""
make_index_funcs: List[Callable[..., Index]] = [
make_index_funcs: list[Callable[..., Index]] = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
Expand Down Expand Up @@ -865,7 +865,7 @@ def skipna_wrapper(x):
return skipna_wrapper


def convert_rows_list_to_csv_str(rows_list: List[str]):
def convert_rows_list_to_csv_str(rows_list: list[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.

Expand All @@ -885,7 +885,7 @@ def convert_rows_list_to_csv_str(rows_list: List[str]):
return sep.join(rows_list) + sep


def external_error_raised(expected_exception: Type[Exception]) -> ContextManager:
def external_error_raised(expected_exception: type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.

Expand Down
2 changes: 1 addition & 1 deletion pandas/compat/pickle_compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ def load_newobj_ex(self):
pass


def load(fh, encoding: Optional[str] = None, is_verbose: bool = False):
def load(fh, encoding: str | None = None, is_verbose: bool = False):
"""
Load a pickle, with a provided encoding,

Expand Down
26 changes: 13 additions & 13 deletions pandas/core/aggregation.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,8 @@


def reconstruct_func(
func: Optional[AggFuncType], **kwargs
) -> Tuple[bool, Optional[AggFuncType], Optional[List[str]], Optional[List[int]]]:
func: AggFuncType | None, **kwargs
) -> tuple[bool, AggFuncType | None, list[str] | None, list[int] | None]:
"""
This is the internal function to reconstruct func given if there is relabeling
or not and also normalize the keyword to get new order of columns.
Expand Down Expand Up @@ -86,8 +86,8 @@ def reconstruct_func(
(False, 'min', None, None)
"""
relabeling = func is None and is_multi_agg_with_relabel(**kwargs)
columns: Optional[List[str]] = None
order: Optional[List[int]] = None
columns: list[str] | None = None
order: list[int] | None = None

if not relabeling:
if isinstance(func, list) and len(func) > len(set(func)):
Expand Down Expand Up @@ -134,7 +134,7 @@ def is_multi_agg_with_relabel(**kwargs) -> bool:
)


def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[int]]:
def normalize_keyword_aggregation(kwargs: dict) -> tuple[dict, list[str], list[int]]:
"""
Normalize user-provided "named aggregation" kwargs.
Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
Expand Down Expand Up @@ -190,8 +190,8 @@ def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[i


def _make_unique_kwarg_list(
seq: Sequence[Tuple[Any, Any]]
) -> Sequence[Tuple[Any, Any]]:
seq: Sequence[tuple[Any, Any]]
) -> Sequence[tuple[Any, Any]]:
"""
Uniquify aggfunc name of the pairs in the order list

Expand Down Expand Up @@ -295,10 +295,10 @@ def maybe_mangle_lambdas(agg_spec: Any) -> Any:

def relabel_result(
result: FrameOrSeries,
func: Dict[str, List[Union[Callable, str]]],
func: dict[str, list[Callable | str]],
columns: Iterable[Hashable],
order: Iterable[int],
) -> Dict[Hashable, Series]:
) -> dict[Hashable, Series]:
"""
Internal function to reorder result if relabelling is True for
dataframe.agg, and return the reordered result in dict.
Expand All @@ -325,7 +325,7 @@ def relabel_result(
reordered_indexes = [
pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1])
]
reordered_result_in_dict: Dict[Hashable, Series] = {}
reordered_result_in_dict: dict[Hashable, Series] = {}
idx = 0

reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1
Expand Down Expand Up @@ -369,7 +369,7 @@ def relabel_result(

def validate_func_kwargs(
kwargs: dict,
) -> Tuple[List[str], List[Union[str, Callable[..., Any]]]]:
) -> tuple[list[str], list[str | Callable[..., Any]]]:
"""
Validates types of user-provided "named aggregation" kwargs.
`TypeError` is raised if aggfunc is not `str` or callable.
Expand Down Expand Up @@ -495,7 +495,7 @@ def transform_dict_like(
# GH 15931 - deprecation of renaming keys
raise SpecificationError("nested renamer is not supported")

results: Dict[Hashable, FrameOrSeriesUnion] = {}
results: dict[Hashable, FrameOrSeriesUnion] = {}
for name, how in func.items():
colg = obj._gotitem(name, ndim=1)
try:
Expand Down Expand Up @@ -536,7 +536,7 @@ def transform_str_or_callable(

def agg_list_like(
obj: AggObjType,
arg: List[AggFuncTypeBase],
arg: list[AggFuncTypeBase],
_axis: int,
) -> FrameOrSeriesUnion:
"""
Expand Down
16 changes: 8 additions & 8 deletions pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,15 +70,15 @@
from pandas import Categorical, DataFrame, Index, Series
from pandas.core.arrays import DatetimeArray, TimedeltaArray

_shared_docs: Dict[str, str] = {}
_shared_docs: dict[str, str] = {}


# --------------- #
# dtype access #
# --------------- #
def _ensure_data(
values: ArrayLike, dtype: Optional[DtypeObj] = None
) -> Tuple[np.ndarray, DtypeObj]:
values: ArrayLike, dtype: DtypeObj | None = None
) -> tuple[np.ndarray, DtypeObj]:
"""
routine to ensure that our data is of the correct
input dtype for lower-level routines
Expand Down Expand Up @@ -495,7 +495,7 @@ def f(c, v):

def factorize_array(
values: np.ndarray, na_sentinel: int = -1, size_hint=None, na_value=None, mask=None
) -> Tuple[np.ndarray, np.ndarray]:
) -> tuple[np.ndarray, np.ndarray]:
"""
Factorize an array-like to codes and uniques.

Expand Down Expand Up @@ -558,9 +558,9 @@ def factorize_array(
def factorize(
values,
sort: bool = False,
na_sentinel: Optional[int] = -1,
size_hint: Optional[int] = None,
) -> Tuple[np.ndarray, Union[np.ndarray, Index]]:
na_sentinel: int | None = -1,
size_hint: int | None = None,
) -> tuple[np.ndarray, np.ndarray | Index]:
"""
Encode the object as an enumerated type or categorical variable.

Expand Down Expand Up @@ -2052,7 +2052,7 @@ def safe_sort(
na_sentinel: int = -1,
assume_unique: bool = False,
verify: bool = True,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
) -> np.ndarray | tuple[np.ndarray, np.ndarray]:
"""
Sort ``values`` and reorder corresponding ``codes``.

Expand Down
22 changes: 11 additions & 11 deletions pandas/core/apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,13 +58,13 @@ def frame_apply(
func: AggFuncType,
axis: Axis = 0,
raw: bool = False,
result_type: Optional[str] = None,
result_type: str | None = None,
args=None,
kwds=None,
) -> FrameApply:
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
klass: Type[FrameApply]
klass: type[FrameApply]
if axis == 0:
klass = FrameRowApply
elif axis == 1:
Expand Down Expand Up @@ -104,7 +104,7 @@ def __init__(
obj: AggObjType,
func,
raw: bool,
result_type: Optional[str],
result_type: str | None,
args,
kwds,
):
Expand Down Expand Up @@ -144,7 +144,7 @@ def index(self) -> Index:
def apply(self) -> FrameOrSeriesUnion:
pass

def agg(self) -> Tuple[Optional[FrameOrSeriesUnion], Optional[bool]]:
def agg(self) -> tuple[FrameOrSeriesUnion | None, bool | None]:
"""
Provide an implementation for the aggregators.

Expand Down Expand Up @@ -188,7 +188,7 @@ def agg(self) -> Tuple[Optional[FrameOrSeriesUnion], Optional[bool]]:
# caller can react
return result, True

def maybe_apply_str(self) -> Optional[FrameOrSeriesUnion]:
def maybe_apply_str(self) -> FrameOrSeriesUnion | None:
"""
Compute apply in case of a string.

Expand All @@ -212,7 +212,7 @@ def maybe_apply_str(self) -> Optional[FrameOrSeriesUnion]:
raise ValueError(f"Operation {f} does not support axis=1")
return self.obj._try_aggregate_string_function(f, *self.args, **self.kwds)

def maybe_apply_multiple(self) -> Optional[FrameOrSeriesUnion]:
def maybe_apply_multiple(self) -> FrameOrSeriesUnion | None:
"""
Compute apply in case of a list-like or dict-like.

Expand Down Expand Up @@ -411,7 +411,7 @@ def apply_standard(self):
# wrap results
return self.wrap_results(results, res_index)

def apply_series_generator(self) -> Tuple[ResType, Index]:
def apply_series_generator(self) -> tuple[ResType, Index]:
assert callable(self.f)

series_gen = self.series_generator
Expand Down Expand Up @@ -664,11 +664,11 @@ def apply_standard(self) -> FrameOrSeriesUnion:


class GroupByApply(Apply):
obj: Union[SeriesGroupBy, DataFrameGroupBy]
obj: SeriesGroupBy | DataFrameGroupBy

def __init__(
self,
obj: Union[SeriesGroupBy, DataFrameGroupBy],
obj: SeriesGroupBy | DataFrameGroupBy,
func: AggFuncType,
args,
kwds,
Expand All @@ -690,11 +690,11 @@ def apply(self):

class ResamplerWindowApply(Apply):
axis = 0
obj: Union[Resampler, BaseWindow]
obj: Resampler | BaseWindow

def __init__(
self,
obj: Union[Resampler, BaseWindow],
obj: Resampler | BaseWindow,
func: AggFuncType,
args,
kwds,
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/arrays/_mixins.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ def unique(self: NDArrayBackedExtensionArrayT) -> NDArrayBackedExtensionArrayT:
@classmethod
@doc(ExtensionArray._concat_same_type)
def _concat_same_type(
cls: Type[NDArrayBackedExtensionArrayT],
cls: type[NDArrayBackedExtensionArrayT],
to_concat: Sequence[NDArrayBackedExtensionArrayT],
axis: int = 0,
) -> NDArrayBackedExtensionArrayT:
Expand Down Expand Up @@ -236,8 +236,8 @@ def _validate_setitem_value(self, value):
return value

def __getitem__(
self: NDArrayBackedExtensionArrayT, key: Union[int, slice, np.ndarray]
) -> Union[NDArrayBackedExtensionArrayT, Any]:
self: NDArrayBackedExtensionArrayT, key: int | slice | np.ndarray
) -> NDArrayBackedExtensionArrayT | Any:
if lib.is_integer(key):
# fast-path
result = self._ndarray[key]
Expand Down Expand Up @@ -296,7 +296,7 @@ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
msg = f"'{type(self).__name__}' does not implement reduction '{name}'"
raise TypeError(msg)

def _wrap_reduction_result(self, axis: Optional[int], result):
def _wrap_reduction_result(self, axis: int | None, result):
if axis is None or self.ndim == 1:
return self._box_func(result)
return self._from_backing_data(result)
Expand Down
Loading