-
-
Notifications
You must be signed in to change notification settings - Fork 18.6k
TYP: misc return types #57430
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
TYP: misc return types #57430
Changes from all commits
Commits
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -217,6 +217,8 @@ | |
FormattersType, | ||
Frequency, | ||
FromDictOrient, | ||
HashableT, | ||
HashableT2, | ||
IgnoreRaise, | ||
IndexKeyFunc, | ||
IndexLabel, | ||
|
@@ -239,6 +241,7 @@ | |
SortKind, | ||
StorageOptions, | ||
Suffixes, | ||
T, | ||
ToStataByteorder, | ||
ToTimestampHow, | ||
UpdateJoin, | ||
|
@@ -643,10 +646,10 @@ class DataFrame(NDFrame, OpsMixin): | |
__pandas_priority__ = 4000 | ||
|
||
@property | ||
def _constructor(self) -> Callable[..., DataFrame]: | ||
def _constructor(self) -> type[DataFrame]: | ||
return DataFrame | ||
|
||
def _constructor_from_mgr(self, mgr, axes): | ||
def _constructor_from_mgr(self, mgr, axes) -> DataFrame: | ||
if self._constructor is DataFrame: | ||
# we are pandas.DataFrame (or a subclass that doesn't override _constructor) | ||
return DataFrame._from_mgr(mgr, axes=axes) | ||
|
@@ -659,7 +662,7 @@ def _constructor_from_mgr(self, mgr, axes): | |
def _sliced_from_mgr(self, mgr, axes) -> Series: | ||
return Series._from_mgr(mgr, axes) | ||
|
||
def _constructor_sliced_from_mgr(self, mgr, axes): | ||
def _constructor_sliced_from_mgr(self, mgr, axes) -> Series: | ||
if self._constructor_sliced is Series: | ||
ser = self._sliced_from_mgr(mgr, axes) | ||
ser._name = None # caller is responsible for setting real name | ||
|
@@ -1353,7 +1356,7 @@ def _get_values_for_csv( | |
decimal: str, | ||
na_rep: str, | ||
quoting, # int csv.QUOTE_FOO from stdlib | ||
) -> Self: | ||
) -> DataFrame: | ||
# helper used by to_csv | ||
mgr = self._mgr.get_values_for_csv( | ||
float_format=float_format, | ||
|
@@ -1831,7 +1834,7 @@ def from_dict( | |
a b 1 3 | ||
c 2 4 | ||
""" | ||
index = None | ||
index: list | Index | None = None | ||
orient = orient.lower() # type: ignore[assignment] | ||
if orient == "index": | ||
if len(data) > 0: | ||
|
@@ -1857,7 +1860,7 @@ def from_dict( | |
else: | ||
realdata = data["data"] | ||
|
||
def create_index(indexlist, namelist): | ||
def create_index(indexlist, namelist) -> Index: | ||
index: Index | ||
if len(namelist) > 1: | ||
index = MultiIndex.from_tuples(indexlist, names=namelist) | ||
|
@@ -2700,6 +2703,42 @@ def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: | |
|
||
to_feather(self, path, **kwargs) | ||
|
||
@overload | ||
def to_markdown( | ||
self, | ||
buf: None = ..., | ||
*, | ||
mode: str = ..., | ||
index: bool = ..., | ||
storage_options: StorageOptions | None = ..., | ||
**kwargs, | ||
) -> str: | ||
... | ||
|
||
@overload | ||
def to_markdown( | ||
self, | ||
buf: FilePath | WriteBuffer[str], | ||
*, | ||
mode: str = ..., | ||
index: bool = ..., | ||
storage_options: StorageOptions | None = ..., | ||
**kwargs, | ||
) -> None: | ||
... | ||
|
||
@overload | ||
def to_markdown( | ||
self, | ||
buf: FilePath | WriteBuffer[str] | None, | ||
*, | ||
mode: str = ..., | ||
index: bool = ..., | ||
storage_options: StorageOptions | None = ..., | ||
**kwargs, | ||
) -> str | None: | ||
... | ||
|
||
@doc( | ||
Series.to_markdown, | ||
klass=_shared_doc_kwargs["klass"], | ||
|
@@ -2881,6 +2920,39 @@ def to_parquet( | |
**kwargs, | ||
) | ||
|
||
@overload | ||
def to_orc( | ||
self, | ||
path: None = ..., | ||
*, | ||
engine: Literal["pyarrow"] = ..., | ||
index: bool | None = ..., | ||
engine_kwargs: dict[str, Any] | None = ..., | ||
) -> bytes: | ||
... | ||
|
||
@overload | ||
def to_orc( | ||
self, | ||
path: FilePath | WriteBuffer[bytes], | ||
*, | ||
engine: Literal["pyarrow"] = ..., | ||
index: bool | None = ..., | ||
engine_kwargs: dict[str, Any] | None = ..., | ||
) -> None: | ||
... | ||
|
||
@overload | ||
def to_orc( | ||
self, | ||
path: FilePath | WriteBuffer[bytes] | None, | ||
*, | ||
engine: Literal["pyarrow"] = ..., | ||
index: bool | None = ..., | ||
engine_kwargs: dict[str, Any] | None = ..., | ||
) -> bytes | None: | ||
... | ||
|
||
def to_orc( | ||
self, | ||
path: FilePath | WriteBuffer[bytes] | None = None, | ||
|
@@ -4027,7 +4099,7 @@ def _setitem_slice(self, key: slice, value) -> None: | |
# backwards-compat, xref GH#31469 | ||
self.iloc[key] = value | ||
|
||
def _setitem_array(self, key, value): | ||
def _setitem_array(self, key, value) -> None: | ||
# also raises Exception if object array with NA values | ||
if com.is_bool_indexer(key): | ||
# bool indexer is indexing along rows | ||
|
@@ -4061,7 +4133,7 @@ def _setitem_array(self, key, value): | |
elif np.ndim(value) > 1: | ||
# list of lists | ||
value = DataFrame(value).values | ||
return self._setitem_array(key, value) | ||
self._setitem_array(key, value) | ||
|
||
else: | ||
self._iset_not_inplace(key, value) | ||
|
@@ -4595,7 +4667,7 @@ def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: | |
|
||
return _eval(expr, inplace=inplace, **kwargs) | ||
|
||
def select_dtypes(self, include=None, exclude=None) -> Self: | ||
def select_dtypes(self, include=None, exclude=None) -> DataFrame: | ||
""" | ||
Return a subset of the DataFrame's columns based on the column dtypes. | ||
|
||
|
@@ -5474,9 +5546,21 @@ def pop(self, item: Hashable) -> Series: | |
""" | ||
return super().pop(item=item) | ||
|
||
@overload | ||
def _replace_columnwise( | ||
self, mapping: dict[Hashable, tuple[Any, Any]], inplace: Literal[True], regex | ||
) -> None: | ||
... | ||
|
||
@overload | ||
def _replace_columnwise( | ||
self, mapping: dict[Hashable, tuple[Any, Any]], inplace: Literal[False], regex | ||
) -> Self: | ||
... | ||
|
||
def _replace_columnwise( | ||
self, mapping: dict[Hashable, tuple[Any, Any]], inplace: bool, regex | ||
): | ||
) -> Self | None: | ||
""" | ||
Dispatch to Series.replace column-wise. | ||
|
||
|
@@ -5505,7 +5589,7 @@ def _replace_columnwise( | |
res._iset_item(i, newobj, inplace=inplace) | ||
|
||
if inplace: | ||
return | ||
return None | ||
return res.__finalize__(self) | ||
|
||
@doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"]) | ||
|
@@ -11815,19 +11899,19 @@ def kurt( | |
product = prod | ||
|
||
@doc(make_doc("cummin", ndim=2)) | ||
def cummin(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): | ||
def cummin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Self: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
return NDFrame.cummin(self, axis, skipna, *args, **kwargs) | ||
|
||
@doc(make_doc("cummax", ndim=2)) | ||
def cummax(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): | ||
def cummax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Self: | ||
return NDFrame.cummax(self, axis, skipna, *args, **kwargs) | ||
|
||
@doc(make_doc("cumsum", ndim=2)) | ||
def cumsum(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): | ||
def cumsum(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Self: | ||
return NDFrame.cumsum(self, axis, skipna, *args, **kwargs) | ||
|
||
@doc(make_doc("cumprod", 2)) | ||
def cumprod(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): | ||
def cumprod(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Self: | ||
return NDFrame.cumprod(self, axis, skipna, *args, **kwargs) | ||
|
||
def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series: | ||
|
@@ -12710,8 +12794,12 @@ def values(self) -> np.ndarray: | |
return self._mgr.as_array() | ||
|
||
|
||
def _from_nested_dict(data) -> collections.defaultdict: | ||
new_data: collections.defaultdict = collections.defaultdict(dict) | ||
def _from_nested_dict( | ||
data: Mapping[HashableT, Mapping[HashableT2, T]], | ||
) -> collections.defaultdict[HashableT2, dict[HashableT, T]]: | ||
new_data: collections.defaultdict[ | ||
HashableT2, dict[HashableT, T] | ||
] = collections.defaultdict(dict) | ||
for index, s in data.items(): | ||
for col, v in s.items(): | ||
new_data[col][index] = v | ||
|
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Curious why we need a second one?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
needed for
_from_nested_dict
. When using the same TypeVar in a function signature, the type checkers bind these to the same type:When we have a function that tries to bind two different sets of types, we need a new TypeVar
def (x: HashableT, y: HashableT2) -> dict[HashableT, dict[HashableT2, Any]]: ...
(pandas-stubs uses the name HashableT2)