Skip to content

STY: de-privatize funcs imported cross-module #36107

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Sep 4, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pandas/_libs/indexing.pyx
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
cdef class _NDFrameIndexerBase:
cdef class NDFrameIndexerBase:
"""
A base class for _NDFrameIndexer for fast instantiation and attribute access.
"""
Expand Down
4 changes: 2 additions & 2 deletions pandas/_libs/tslibs/parsing.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -771,7 +771,7 @@ class _timelex:
_DATEUTIL_LEXER_SPLIT = _timelex.split


def _format_is_iso(f) -> bint:
def format_is_iso(f: str) -> bint:
"""
Does format match the iso8601 set that can be handled by the C parser?
Generally of form YYYY-MM-DDTHH:MM:SS - date separator can be different
Expand All @@ -789,7 +789,7 @@ def _format_is_iso(f) -> bint:
return False


def _guess_datetime_format(
def guess_datetime_format(
dt_str,
bint dayfirst=False,
dt_str_parse=du_parse,
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -602,9 +602,9 @@ def astype(self, dtype, copy=True):
# Rendering Methods

def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs):
from pandas.io.formats.format import _get_format_datetime64_from_values
from pandas.io.formats.format import get_format_datetime64_from_values

fmt = _get_format_datetime64_from_values(self, date_format)
fmt = get_format_datetime64_from_values(self, date_format)

return tslib.format_array_from_datetime(
self.asi8.ravel(), tz=self.tz, format=fmt, na_rep=na_rep
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/arrays/timedeltas.py
Original file line number Diff line number Diff line change
Expand Up @@ -379,14 +379,14 @@ def median(
# Rendering Methods

def _formatter(self, boxed=False):
from pandas.io.formats.format import _get_format_timedelta64
from pandas.io.formats.format import get_format_timedelta64

return _get_format_timedelta64(self, box=True)
return get_format_timedelta64(self, box=True)

def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs):
from pandas.io.formats.format import _get_format_timedelta64
from pandas.io.formats.format import get_format_timedelta64

formatter = _get_format_timedelta64(self._data, na_rep)
formatter = get_format_timedelta64(self._data, na_rep)
return np.array([formatter(x) for x in self._data.ravel()]).reshape(self.shape)

# ----------------------------------------------------------------
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
ABCIndexClass,
ABCSeries,
)
from pandas.core.dtypes.inference import _iterable_not_string
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna, isnull, notnull # noqa


Expand Down Expand Up @@ -61,7 +61,7 @@ def flatten(l):
flattened : generator
"""
for el in l:
if _iterable_not_string(el):
if iterable_not_string(el):
for s in flatten(el):
yield s
else:
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/computation/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from pandas._config import get_option


def _ensure_decoded(s):
def ensure_decoded(s):
"""
If we have bytes, decode them to unicode.
"""
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/computation/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from pandas.core.dtypes.common import is_list_like, is_scalar

import pandas.core.common as com
from pandas.core.computation.common import _ensure_decoded, result_type_many
from pandas.core.computation.common import ensure_decoded, result_type_many
from pandas.core.computation.scope import _DEFAULT_GLOBALS

from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
Expand Down Expand Up @@ -466,7 +466,7 @@ def stringify(value):
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = Timestamp(_ensure_decoded(v))
v = Timestamp(ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert("UTC")
self.rhs.update(v)
Expand All @@ -475,7 +475,7 @@ def stringify(value):
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = Timestamp(_ensure_decoded(v))
v = Timestamp(ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert("UTC")
self.lhs.update(v)
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/computation/pytables.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
import pandas as pd
import pandas.core.common as com
from pandas.core.computation import expr, ops, scope as _scope
from pandas.core.computation.common import _ensure_decoded
from pandas.core.computation.common import ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import UndefinedVariableError, is_term
from pandas.core.construction import extract_array
Expand Down Expand Up @@ -189,12 +189,12 @@ def stringify(value):
encoder = pprint_thing
return encoder(value)

kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
kind = ensure_decoded(self.kind)
meta = ensure_decoded(self.meta)
if kind == "datetime64" or kind == "datetime":
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = ensure_decoded(v)
v = Timestamp(v)
if v.tz is not None:
v = v.tz_convert("UTC")
Expand Down
16 changes: 8 additions & 8 deletions pandas/core/dtypes/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -635,8 +635,8 @@ def is_dtype_equal(source, target) -> bool:
False
"""
try:
source = _get_dtype(source)
target = _get_dtype(target)
source = get_dtype(source)
target = get_dtype(target)
return source == target
except (TypeError, AttributeError):

Expand Down Expand Up @@ -984,10 +984,10 @@ def is_datetime64_ns_dtype(arr_or_dtype) -> bool:
if arr_or_dtype is None:
return False
try:
tipo = _get_dtype(arr_or_dtype)
tipo = get_dtype(arr_or_dtype)
except TypeError:
if is_datetime64tz_dtype(arr_or_dtype):
tipo = _get_dtype(arr_or_dtype.dtype)
tipo = get_dtype(arr_or_dtype.dtype)
else:
return False
return tipo == DT64NS_DTYPE or getattr(tipo, "base", None) == DT64NS_DTYPE
Expand Down Expand Up @@ -1372,7 +1372,7 @@ def is_bool_dtype(arr_or_dtype) -> bool:
if arr_or_dtype is None:
return False
try:
dtype = _get_dtype(arr_or_dtype)
dtype = get_dtype(arr_or_dtype)
except TypeError:
return False

Expand Down Expand Up @@ -1558,13 +1558,13 @@ def _is_dtype(arr_or_dtype, condition) -> bool:
if arr_or_dtype is None:
return False
try:
dtype = _get_dtype(arr_or_dtype)
dtype = get_dtype(arr_or_dtype)
except (TypeError, ValueError, UnicodeEncodeError):
return False
return condition(dtype)


def _get_dtype(arr_or_dtype) -> DtypeObj:
def get_dtype(arr_or_dtype) -> DtypeObj:
"""
Get the dtype instance associated with an array
or dtype object.
Expand Down Expand Up @@ -1695,7 +1695,7 @@ def infer_dtype_from_object(dtype):
try:
return infer_dtype_from_object(getattr(np, dtype))
except (AttributeError, TypeError):
# Handles cases like _get_dtype(int) i.e.,
# Handles cases like get_dtype(int) i.e.,
# Python objects that are valid dtypes
# (unlike user-defined types, in general)
#
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/dtypes/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def is_number(obj) -> bool:
return isinstance(obj, (Number, np.number))


def _iterable_not_string(obj) -> bool:
def iterable_not_string(obj) -> bool:
"""
Check if the object is an iterable but not a string.

Expand All @@ -83,11 +83,11 @@ def _iterable_not_string(obj) -> bool:

Examples
--------
>>> _iterable_not_string([1, 2, 3])
>>> iterable_not_string([1, 2, 3])
True
>>> _iterable_not_string("foo")
>>> iterable_not_string("foo")
False
>>> _iterable_not_string(1)
>>> iterable_not_string(1)
False
"""
return isinstance(obj, abc.Iterable) and not isinstance(obj, str)
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/dtypes/missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ def notna(obj):
notnull = notna


def _isna_compat(arr, fill_value=np.nan) -> bool:
def isna_compat(arr, fill_value=np.nan) -> bool:
"""
Parameters
----------
Expand Down Expand Up @@ -496,7 +496,7 @@ def array_equals(left: ArrayLike, right: ArrayLike) -> bool:
return array_equivalent(left, right, dtype_equal=True)


def _infer_fill_value(val):
def infer_fill_value(val):
"""
infer the fill value for the nan/NaT from the provided
scalar/ndarray/list-like if we are a NaT, return the correct dtyped
Expand All @@ -516,11 +516,11 @@ def _infer_fill_value(val):
return np.nan


def _maybe_fill(arr, fill_value=np.nan):
def maybe_fill(arr, fill_value=np.nan):
"""
if we have a compatible fill_value and arr dtype, then fill
"""
if _isna_compat(arr, fill_value):
if isna_compat(arr, fill_value):
arr.fill(fill_value)
return arr

Expand Down
8 changes: 3 additions & 5 deletions pandas/core/groupby/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import _maybe_fill, isna
from pandas.core.dtypes.missing import isna, maybe_fill

import pandas.core.algorithms as algorithms
from pandas.core.base import SelectionMixin
Expand Down Expand Up @@ -524,13 +524,11 @@ def _cython_operation(
codes, _, _ = self.group_info

if kind == "aggregate":
result = _maybe_fill(
np.empty(out_shape, dtype=out_dtype), fill_value=np.nan
)
result = maybe_fill(np.empty(out_shape, dtype=out_dtype), fill_value=np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, codes, func, min_count)
elif kind == "transform":
result = _maybe_fill(
result = maybe_fill(
np.empty_like(values, dtype=out_dtype), fill_value=np.nan
)

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/indexes/timedeltas.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,9 +177,9 @@ def _simple_new(cls, values: TimedeltaArray, name: Label = None):

@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_timedelta64
from pandas.io.formats.format import get_format_timedelta64

return _get_format_timedelta64(self, box=True)
return get_format_timedelta64(self, box=True)

# -------------------------------------------------------------------

Expand Down
10 changes: 5 additions & 5 deletions pandas/core/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

from pandas._config.config import option_context

from pandas._libs.indexing import _NDFrameIndexerBase
from pandas._libs.indexing import NDFrameIndexerBase
from pandas._libs.lib import item_from_zerodim
from pandas.errors import AbstractMethodError, InvalidIndexError
from pandas.util._decorators import doc
Expand All @@ -22,7 +22,7 @@
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import ABCDataFrame, ABCMultiIndex, ABCSeries
from pandas.core.dtypes.missing import _infer_fill_value, isna
from pandas.core.dtypes.missing import infer_fill_value, isna

import pandas.core.common as com
from pandas.core.construction import array as pd_array
Expand Down Expand Up @@ -583,7 +583,7 @@ def iat(self) -> "_iAtIndexer":
return _iAtIndexer("iat", self)


class _LocationIndexer(_NDFrameIndexerBase):
class _LocationIndexer(NDFrameIndexerBase):
_valid_types: str
axis = None

Expand Down Expand Up @@ -1604,7 +1604,7 @@ def _setitem_with_indexer(self, indexer, value):
return

# add a new item with the dtype setup
self.obj[key] = _infer_fill_value(value)
self.obj[key] = infer_fill_value(value)

new_indexer = convert_from_missing_indexer_tuple(
indexer, self.obj.axes
Expand Down Expand Up @@ -2017,7 +2017,7 @@ def _align_frame(self, indexer, df: ABCDataFrame):
raise ValueError("Incompatible indexer with DataFrame")


class _ScalarAccessIndexer(_NDFrameIndexerBase):
class _ScalarAccessIndexer(NDFrameIndexerBase):
"""
Access scalars quickly.
"""
Expand Down
14 changes: 7 additions & 7 deletions pandas/core/internals/blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@
ABCPandasArray,
ABCSeries,
)
from pandas.core.dtypes.missing import _isna_compat, is_valid_nat_for_dtype, isna
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, isna_compat

import pandas.core.algorithms as algos
from pandas.core.array_algos.transforms import shift
Expand Down Expand Up @@ -487,7 +487,7 @@ def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]
):
return blocks

return _extend_blocks([b.downcast(downcast) for b in blocks])
return extend_blocks([b.downcast(downcast) for b in blocks])

def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
Expand Down Expand Up @@ -2474,7 +2474,7 @@ def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]
return blocks

# split and convert the blocks
return _extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks])
return extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks])

def _can_hold_element(self, element: Any) -> bool:
return True
Expand Down Expand Up @@ -2503,7 +2503,7 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True):
result = b._replace_single(
to_rep, v, inplace=inplace, regex=regex, convert=convert
)
result_blocks = _extend_blocks(result, result_blocks)
result_blocks = extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks

Expand All @@ -2514,7 +2514,7 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True):
result = b._replace_single(
to_rep, value, inplace=inplace, regex=regex, convert=convert
)
result_blocks = _extend_blocks(result, result_blocks)
result_blocks = extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks

Expand Down Expand Up @@ -2769,7 +2769,7 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None):
# -----------------------------------------------------------------


def _extend_blocks(result, blocks=None):
def extend_blocks(result, blocks=None):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

btw, we i think you can remove this at this point (assuming all of the return values are List[Block])

""" return a new extended blocks, given the result """
if blocks is None:
blocks = []
Expand Down Expand Up @@ -2860,7 +2860,7 @@ def _putmask_smart(v: np.ndarray, mask: np.ndarray, n) -> np.ndarray:
else:
# make sure that we have a nullable type
# if we have nulls
if not _isna_compat(v, nn[0]):
if not isna_compat(v, nn[0]):
pass
elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)):
# only compare integers/floats
Expand Down
Loading