diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 1a8329d77f61e..4f618940cf9b2 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1686,7 +1686,7 @@ def _insert_inaxis_grouper_inplace(self, result: DataFrame) -> None: def _wrap_aggregated_output( self, - output: Mapping[base.OutputKey, Series | np.ndarray], + output: Mapping[base.OutputKey, Series | ArrayLike], ) -> DataFrame: """ Wraps the output of DataFrameGroupBy aggregations into the expected result. diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index ce7f0de616e18..2436391580dcb 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1056,7 +1056,7 @@ def _set_result_index_ordered( return result - def _wrap_aggregated_output(self, output: Mapping[base.OutputKey, np.ndarray]): + def _wrap_aggregated_output(self, output: Mapping[base.OutputKey, ArrayLike]): raise AbstractMethodError(self) def _wrap_transformed_output(self, output: Mapping[base.OutputKey, ArrayLike]): @@ -1222,7 +1222,7 @@ def _python_agg_general(self, func, *args, **kwargs): f = lambda x: func(x, *args, **kwargs) # iterate through "columns" ex exclusions to populate output dict - output: dict[base.OutputKey, np.ndarray] = {} + output: dict[base.OutputKey, ArrayLike] = {} for idx, obj in enumerate(self._iterate_slices()): name = obj.name diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 1dcb2b97476a3..96f5b2cfb9d81 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -923,7 +923,8 @@ def _cython_operation( **kwargs, ) - def agg_series(self, obj: Series, func: F): + @final + def agg_series(self, obj: Series, func: F) -> tuple[ArrayLike, np.ndarray]: # Caller is responsible for checking ngroups != 0 assert self.ngroups != 0 @@ -952,8 +953,9 @@ def agg_series(self, obj: Series, func: F): raise return self._aggregate_series_pure_python(obj, func) - @final - def _aggregate_series_fast(self, obj: Series, func: F): + def _aggregate_series_fast( + self, obj: Series, func: F + ) -> tuple[ArrayLike, np.ndarray]: # At this point we have already checked that # - obj.index is not a MultiIndex # - obj is backed by an ndarray, not ExtensionArray @@ -1157,18 +1159,14 @@ def groupings(self) -> list[grouper.Grouping]: for lvl, name in zip(self.levels, self.names) ] - def agg_series(self, obj: Series, func: F): - # Caller is responsible for checking ngroups != 0 - assert self.ngroups != 0 - assert len(self.bins) > 0 # otherwise we'd get IndexError in get_result - - if is_extension_array_dtype(obj.dtype): - # preempt SeriesBinGrouper from raising TypeError - return self._aggregate_series_pure_python(obj, func) - - elif obj.index._has_complex_internals: - return self._aggregate_series_pure_python(obj, func) - + def _aggregate_series_fast( + self, obj: Series, func: F + ) -> tuple[ArrayLike, np.ndarray]: + # At this point we have already checked that + # - obj.index is not a MultiIndex + # - obj is backed by an ndarray, not ExtensionArray + # - ngroups != 0 + # - len(self.bins) > 0 grouper = libreduction.SeriesBinGrouper(obj, func, self.bins) return grouper.get_result()