Skip to content

TST: fix pyarrow xfails for date/time dtypes #51281

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 18 commits into from
Feb 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions pandas/_libs/missing.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -338,6 +338,14 @@ def _create_binary_propagating_op(name, is_divmod=False):
elif is_cmp and isinstance(other, (date, time, timedelta)):
return NA

elif isinstance(other, date):
if name in ["__sub__", "__rsub__"]:
return NA

elif isinstance(other, timedelta):
if name in ["__sub__", "__rsub__", "__add__", "__radd__"]:
return NA

return NotImplemented

method.__name__ = name
Expand Down
45 changes: 24 additions & 21 deletions pandas/tests/extension/test_arrow.py
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,7 @@ def test_accumulate_series(self, data, all_numeric_accumulations, skipna, reques
# renders the exception messages even when not showing them
pytest.skip(f"{all_numeric_accumulations} not implemented for pyarrow < 9")

elif all_numeric_accumulations == "cumsum" and (pa.types.is_boolean(pa_type)):
elif all_numeric_accumulations == "cumsum" and pa.types.is_boolean(pa_type):
request.node.add_marker(
pytest.mark.xfail(
reason=f"{all_numeric_accumulations} not implemented for {pa_type}",
Expand Down Expand Up @@ -859,17 +859,7 @@ def test_factorize(self, data_for_grouping, request):

def test_combine_add(self, data_repeated, request):
pa_dtype = next(data_repeated(1)).dtype.pyarrow_dtype
if pa.types.is_duration(pa_dtype):
# TODO: this fails on the scalar addition constructing 'expected'
# but not in the actual 'combine' call, so may be salvage-able
mark = pytest.mark.xfail(
raises=TypeError,
reason=f"{pa_dtype} cannot be added to {pa_dtype}",
)
request.node.add_marker(mark)
super().test_combine_add(data_repeated)

elif pa.types.is_temporal(pa_dtype):
if pa.types.is_temporal(pa_dtype) and not pa.types.is_duration(pa_dtype):
# analogous to datetime64, these cannot be added
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
Expand Down Expand Up @@ -915,14 +905,24 @@ def _patch_combine(self, obj, other, op):
pa_expected = pa.array(expected_data._values)

if pa.types.is_duration(pa_expected.type):
# pyarrow sees sequence of datetime/timedelta objects and defaults
# to "us" but the non-pointwise op retains unit
unit = original_dtype.pyarrow_dtype.unit
if type(other) in [datetime, timedelta] and unit in ["s", "ms"]:
# pydatetime/pytimedelta objects have microsecond reso, so we
# take the higher reso of the original and microsecond. Note
# this matches what we would do with DatetimeArray/TimedeltaArray
unit = "us"
orig_pa_type = original_dtype.pyarrow_dtype
if pa.types.is_date(orig_pa_type):
if pa.types.is_date64(orig_pa_type):
# TODO: why is this different vs date32?
unit = "ms"
else:
unit = "s"
else:
# pyarrow sees sequence of datetime/timedelta objects and defaults
# to "us" but the non-pointwise op retains unit
# timestamp or duration
unit = orig_pa_type.unit
if type(other) in [datetime, timedelta] and unit in ["s", "ms"]:
# pydatetime/pytimedelta objects have microsecond reso, so we
# take the higher reso of the original and microsecond. Note
# this matches what we would do with DatetimeArray/TimedeltaArray
unit = "us"

pa_expected = pa_expected.cast(f"duration[{unit}]")
else:
pa_expected = pa_expected.cast(original_dtype.pyarrow_dtype)
Expand Down Expand Up @@ -979,7 +979,7 @@ def _get_arith_xfail_marker(self, opname, pa_dtype):
f"for {pa_dtype}"
)
)
elif arrow_temporal_supported:
elif arrow_temporal_supported and pa.types.is_time(pa_dtype):
mark = pytest.mark.xfail(
raises=TypeError,
reason=(
Expand Down Expand Up @@ -1024,6 +1024,7 @@ def test_arith_series_with_scalar(
)
or pa.types.is_duration(pa_dtype)
or pa.types.is_timestamp(pa_dtype)
or pa.types.is_date(pa_dtype)
):
# BaseOpsUtil._combine always returns int64, while ArrowExtensionArray does
# not upcast
Expand Down Expand Up @@ -1055,6 +1056,7 @@ def test_arith_frame_with_scalar(
)
or pa.types.is_duration(pa_dtype)
or pa.types.is_timestamp(pa_dtype)
or pa.types.is_date(pa_dtype)
):
# BaseOpsUtil._combine always returns int64, while ArrowExtensionArray does
# not upcast
Expand Down Expand Up @@ -1107,6 +1109,7 @@ def test_arith_series_with_array(
)
or pa.types.is_duration(pa_dtype)
or pa.types.is_timestamp(pa_dtype)
or pa.types.is_date(pa_dtype)
):
monkeypatch.setattr(TestBaseArithmeticOps, "_combine", self._patch_combine)
self.check_opname(ser, op_name, other, exc=self.series_array_exc)
Expand Down