diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py index 63dd40a229dfc..40452b41998df 100644 --- a/pandas/io/clipboard/__init__.py +++ b/pandas/io/clipboard/__init__.py @@ -95,8 +95,8 @@ def _stringifyText(text): acceptedTypes = (str, int, float, bool) if not isinstance(text, acceptedTypes): raise PyperclipException( - "only str, int, float, and bool values" - "can be copied to the clipboard, not".format(text.__class__.__name__) + f"only str, int, float, and bool values" + f"can be copied to the clipboard, not {text.__class__.__name__}" ) return str(text) diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py index ab64bc14344f1..7fdca2d65b05d 100644 --- a/pandas/io/date_converters.py +++ b/pandas/io/date_converters.py @@ -57,8 +57,8 @@ def _check_columns(cols): for i, n in enumerate(map(len, tail)): if n != N: raise AssertionError( - "All columns must have the same length: {0}; " - "column {1} has length {2}".format(N, i, n) + f"All columns must have the same length: {N}; " + f"column {i} has length {n}" ) return N diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 3a67f8306fff1..97556f9685001 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -61,7 +61,7 @@ def get_sheet_by_name(self, name: str): if table.getAttribute("name") == name: return table - raise ValueError("sheet {} not found".format(name)) + raise ValueError(f"sheet {name} not found") def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]: """Parse an ODF Table into a list of lists diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index d8f5da5ab5bc6..d0d6096a4425e 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -46,7 +46,8 @@ def save(self): @classmethod def _convert_to_style(cls, style_dict): """ - converts a style_dict to an openpyxl style object + Converts a style_dict to an openpyxl style object. + Parameters ---------- style_dict : style dictionary to convert @@ -72,7 +73,8 @@ def _convert_to_style(cls, style_dict): def _convert_to_style_kwargs(cls, style_dict): """ Convert a style_dict to a set of kwargs suitable for initializing - or updating-on-copy an openpyxl v2 style object + or updating-on-copy an openpyxl v2 style object. + Parameters ---------- style_dict : dict @@ -83,6 +85,7 @@ def _convert_to_style_kwargs(cls, style_dict): 'alignment' 'number_format' 'protection' + Returns ------- style_kwargs : dict @@ -107,7 +110,8 @@ def _convert_to_style_kwargs(cls, style_dict): @classmethod def _convert_to_color(cls, color_spec): """ - Convert ``color_spec`` to an openpyxl v2 Color object + Convert ``color_spec`` to an openpyxl v2 Color object. + Parameters ---------- color_spec : str, dict @@ -120,6 +124,7 @@ def _convert_to_color(cls, color_spec): 'tint' 'index' 'type' + Returns ------- color : openpyxl.styles.Color @@ -135,7 +140,8 @@ def _convert_to_color(cls, color_spec): @classmethod def _convert_to_font(cls, font_dict): """ - Convert ``font_dict`` to an openpyxl v2 Font object + Convert ``font_dict`` to an openpyxl v2 Font object. + Parameters ---------- font_dict : dict @@ -154,6 +160,7 @@ def _convert_to_font(cls, font_dict): 'outline' 'shadow' 'condense' + Returns ------- font : openpyxl.styles.Font @@ -185,11 +192,13 @@ def _convert_to_stop(cls, stop_seq): """ Convert ``stop_seq`` to a list of openpyxl v2 Color objects, suitable for initializing the ``GradientFill`` ``stop`` parameter. + Parameters ---------- stop_seq : iterable An iterable that yields objects suitable for consumption by ``_convert_to_color``. + Returns ------- stop : list of openpyxl.styles.Color @@ -200,7 +209,8 @@ def _convert_to_stop(cls, stop_seq): @classmethod def _convert_to_fill(cls, fill_dict): """ - Convert ``fill_dict`` to an openpyxl v2 Fill object + Convert ``fill_dict`` to an openpyxl v2 Fill object. + Parameters ---------- fill_dict : dict @@ -216,6 +226,7 @@ def _convert_to_fill(cls, fill_dict): 'top' 'bottom' 'stop' + Returns ------- fill : openpyxl.styles.Fill @@ -262,7 +273,8 @@ def _convert_to_fill(cls, fill_dict): @classmethod def _convert_to_side(cls, side_spec): """ - Convert ``side_spec`` to an openpyxl v2 Side object + Convert ``side_spec`` to an openpyxl v2 Side object. + Parameters ---------- side_spec : str, dict @@ -270,6 +282,7 @@ def _convert_to_side(cls, side_spec): of the following keys (or their synonyms). 'style' ('border_style') 'color' + Returns ------- side : openpyxl.styles.Side @@ -295,7 +308,8 @@ def _convert_to_side(cls, side_spec): @classmethod def _convert_to_border(cls, border_dict): """ - Convert ``border_dict`` to an openpyxl v2 Border object + Convert ``border_dict`` to an openpyxl v2 Border object. + Parameters ---------- border_dict : dict @@ -311,6 +325,7 @@ def _convert_to_border(cls, border_dict): 'diagonalUp' ('diagonalup') 'diagonalDown' ('diagonaldown') 'outline' + Returns ------- border : openpyxl.styles.Border @@ -335,7 +350,8 @@ def _convert_to_border(cls, border_dict): @classmethod def _convert_to_alignment(cls, alignment_dict): """ - Convert ``alignment_dict`` to an openpyxl v2 Alignment object + Convert ``alignment_dict`` to an openpyxl v2 Alignment object. + Parameters ---------- alignment_dict : dict diff --git a/pandas/io/excel/_xlwt.py b/pandas/io/excel/_xlwt.py index fe3d0a208de6a..996ae1caa14c8 100644 --- a/pandas/io/excel/_xlwt.py +++ b/pandas/io/excel/_xlwt.py @@ -77,7 +77,9 @@ def write_cells( wks.write(startrow + cell.row, startcol + cell.col, val, style) @classmethod - def _style_to_xlwt(cls, item, firstlevel=True, field_sep=",", line_sep=";"): + def _style_to_xlwt( + cls, item, firstlevel: bool = True, field_sep=",", line_sep=";" + ) -> str: """helper which recursively generate an xlwt easy style string for example: @@ -117,6 +119,7 @@ def _style_to_xlwt(cls, item, firstlevel=True, field_sep=",", line_sep=";"): def _convert_to_style(cls, style_dict, num_format_str=None): """ converts a style_dict to an xlwt style object + Parameters ---------- style_dict : style dictionary to convert diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index dd6519275ad15..d9e88f42c2ef2 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -10,7 +10,7 @@ from pandas.io.common import _stringify_path -def to_feather(df, path): +def to_feather(df: DataFrame, path): """ Write a DataFrame to the feather-format diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index e25862537cbfc..f0493036b934a 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -327,7 +327,7 @@ def _save(self): self._save_chunk(start_i, end_i) - def _save_chunk(self, start_i, end_i): + def _save_chunk(self, start_i: int, end_i: int): data_index = self.data_index diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index b9c847ad64c57..cd0889044094f 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -63,8 +63,9 @@ def __init__(self, inherited=None): compute_css = CSSResolver() - def __call__(self, declarations_str): - """Convert CSS declarations to ExcelWriter style + def __call__(self, declarations_str: str): + """ + Convert CSS declarations to ExcelWriter style. Parameters ---------- @@ -279,6 +280,7 @@ def build_font(self, props): if "text-shadow" in props else None ), + # FIXME: dont leave commented-out # 'vertAlign':, # 'charset': , # 'scheme': , @@ -665,7 +667,7 @@ def _format_hierarchical_rows(self): for cell in self._generate_body(gcolidx): yield cell - def _generate_body(self, coloffset): + def _generate_body(self, coloffset: int): if self.styler is None: styles = None else: diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py index ca9db88ae7be4..6f903e770c86c 100644 --- a/pandas/io/formats/latex.py +++ b/pandas/io/formats/latex.py @@ -11,8 +11,8 @@ class LatexFormatter(TableFormatter): - """ Used to render a DataFrame to a LaTeX tabular/longtable environment - output. + """ + Used to render a DataFrame to a LaTeX tabular/longtable environment output. Parameters ---------- @@ -106,18 +106,19 @@ def pad_empties(x): # Get rid of old multiindex column and add new ones strcols = out + strcols[1:] - column_format = self.column_format - if column_format is None: + if self.column_format is None: dtypes = self.frame.dtypes._values column_format = "".join(map(get_col_type, dtypes)) if self.fmt.index: index_format = "l" * self.frame.index.nlevels column_format = index_format + column_format - elif not isinstance(column_format, str): # pragma: no cover + elif not isinstance(self.column_format, str): # pragma: no cover raise AssertionError( "column_format must be str or unicode, " "not {typ}".format(typ=type(column_format)) ) + else: + column_format = self.column_format if self.longtable: self._write_longtable_begin(buf, column_format) @@ -265,7 +266,7 @@ def _format_multirow( def _print_cline(self, buf: IO[str], i: int, icol: int) -> None: """ - Print clines after multirow-blocks are finished + Print clines after multirow-blocks are finished. """ for cl in self.clinebuf: if cl[0] == i: @@ -273,7 +274,7 @@ def _print_cline(self, buf: IO[str], i: int, icol: int) -> None: # remove entries that have been written to buffer self.clinebuf = [x for x in self.clinebuf if x[0] != i] - def _write_tabular_begin(self, buf, column_format): + def _write_tabular_begin(self, buf, column_format: str): """ Write the beginning of a tabular environment or nested table/tabular environments including caption and label. @@ -283,11 +284,10 @@ def _write_tabular_begin(self, buf, column_format): buf : string or file handle File path or object. If not specified, the result is returned as a string. - column_format : str, default None + column_format : str The columns format as specified in `LaTeX table format `__ e.g 'rcl' for 3 columns - """ if self.caption is not None or self.label is not None: # then write output in a nested table/tabular environment @@ -327,7 +327,7 @@ def _write_tabular_end(self, buf): else: pass - def _write_longtable_begin(self, buf, column_format): + def _write_longtable_begin(self, buf, column_format: str): """ Write the beginning of a longtable environment including caption and label if provided by user. @@ -337,11 +337,10 @@ def _write_longtable_begin(self, buf, column_format): buf : string or file handle File path or object. If not specified, the result is returned as a string. - column_format : str, default None + column_format : str The columns format as specified in `LaTeX table format `__ e.g 'rcl' for 3 columns - """ buf.write("\\begin{{longtable}}{{{fmt}}}\n".format(fmt=column_format)) diff --git a/pandas/io/html.py b/pandas/io/html.py index 9a368907b65aa..ed2b21994fdca 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -57,7 +57,7 @@ def _importers(): _RE_WHITESPACE = re.compile(r"[\r\n]+|\s{2,}") -def _remove_whitespace(s, regex=_RE_WHITESPACE): +def _remove_whitespace(s: str, regex=_RE_WHITESPACE) -> str: """ Replace extra whitespace inside of a string with a single space. @@ -65,8 +65,7 @@ def _remove_whitespace(s, regex=_RE_WHITESPACE): ---------- s : str or unicode The string from which to remove extra whitespace. - - regex : regex + regex : re.Pattern The regular expression to use to remove extra whitespace. Returns @@ -253,7 +252,8 @@ def _text_getter(self, obj): raise AbstractMethodError(self) def _parse_td(self, obj): - """Return the td elements from a row element. + """ + Return the td elements from a row element. Parameters ---------- @@ -600,7 +600,7 @@ def _build_doc(self): ) -def _build_xpath_expr(attrs): +def _build_xpath_expr(attrs) -> str: """Build an xpath expression to simulate bs4's ability to pass in kwargs to search for attributes when using the lxml parser. @@ -810,7 +810,8 @@ def _data_to_frame(**kwargs): def _parser_dispatch(flavor): - """Choose the parser based on the input flavor. + """ + Choose the parser based on the input flavor. Parameters ---------- @@ -850,7 +851,7 @@ def _parser_dispatch(flavor): return _valid_parsers[flavor] -def _print_as_set(s): +def _print_as_set(s) -> str: return "{" + "{arg}".format(arg=", ".join(pprint_thing(el) for el in s)) + "}" diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 0a8f275cf54a9..26a3248262f9a 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -711,7 +711,7 @@ def _get_data_from_filepath(self, filepath_or_buffer): return data - def _combine_lines(self, lines): + def _combine_lines(self, lines) -> str: """ Combines a list of JSON objects into one JSON object. """ @@ -1169,7 +1169,7 @@ def _try_convert_dates(self): convert_dates = [] convert_dates = set(convert_dates) - def is_ok(col): + def is_ok(col) -> bool: """ Return if this col is ok to try for a date parse. """ diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index ee08e2abb2289..03eb8570e436e 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -92,7 +92,7 @@ def _ensure_str(name): Term = Expr -def _ensure_term(where, scope_level): +def _ensure_term(where, scope_level: int): """ ensure that the where is a Term or a list of Term this makes sure that we are capturing the scope of variables @@ -252,7 +252,7 @@ def to_hdf( complevel=None, complib=None, append=None, - **kwargs + **kwargs, ): """ store this object, close it if we opened it """ @@ -271,7 +271,7 @@ def to_hdf( f(path_or_buf) -def read_hdf(path_or_buf, key=None, mode="r", **kwargs): +def read_hdf(path_or_buf, key=None, mode: str = "r", **kwargs): """ Read from the store, close it if we opened it. @@ -340,8 +340,8 @@ def read_hdf(path_or_buf, key=None, mode="r", **kwargs): if mode not in ["r", "r+", "a"]: raise ValueError( - "mode {0} is not allowed while performing a read. " - "Allowed modes are r, r+ and a.".format(mode) + f"mode {mode} is not allowed while performing a read. " + f"Allowed modes are r, r+ and a." ) # grab the scope if "where" in kwargs: @@ -406,7 +406,7 @@ def read_hdf(path_or_buf, key=None, mode="r", **kwargs): raise -def _is_metadata_of(group, parent_group): +def _is_metadata_of(group, parent_group) -> bool: """Check if a given group is a metadata group for a given parent_group.""" if group._v_depth <= parent_group._v_depth: return False @@ -466,7 +466,13 @@ class HDFStore: """ def __init__( - self, path, mode=None, complevel=None, complib=None, fletcher32=False, **kwargs + self, + path, + mode=None, + complevel=None, + complib=None, + fletcher32: bool = False, + **kwargs, ): if "format" in kwargs: @@ -577,7 +583,7 @@ def items(self): iteritems = items - def open(self, mode="a", **kwargs): + def open(self, mode: str = "a", **kwargs): """ Open the file in the specified mode @@ -615,19 +621,19 @@ def open(self, mode="a", **kwargs): try: self._handle = tables.open_file(self._path, self._mode, **kwargs) - except (IOError) as e: # pragma: no cover - if "can not be written" in str(e): + except IOError as err: # pragma: no cover + if "can not be written" in str(err): print("Opening {path} in read-only mode".format(path=self._path)) self._handle = tables.open_file(self._path, "r", **kwargs) else: raise - except (ValueError) as e: + except ValueError as err: # trap PyTables >= 3.1 FILE_OPEN_POLICY exception # to provide an updated message - if "FILE_OPEN_POLICY" in str(e): - e = ValueError( + if "FILE_OPEN_POLICY" in str(err): + err = ValueError( "PyTables [{version}] no longer supports opening multiple " "files\n" "even in read-only mode on this HDF5 version " @@ -641,14 +647,14 @@ def open(self, mode="a", **kwargs): ) ) - raise e + raise err - except (Exception) as e: + except Exception as err: # trying to read from a non-existent file causes an error which # is not part of IOError, make it one - if self._mode == "r" and "Unable to open/create file" in str(e): - raise IOError(str(e)) + if self._mode == "r" and "Unable to open/create file" in str(err): + raise IOError(str(err)) raise def close(self): @@ -660,7 +666,7 @@ def close(self): self._handle = None @property - def is_open(self): + def is_open(self) -> bool: """ return a boolean indicating whether the file is open """ @@ -668,7 +674,7 @@ def is_open(self): return False return bool(self._handle.isopen) - def flush(self, fsync=False): + def flush(self, fsync: bool = False): """ Force all buffered modifications to be written to disk. @@ -719,8 +725,8 @@ def select( columns=None, iterator=False, chunksize=None, - auto_close=False, - **kwargs + auto_close: bool = False, + **kwargs, ): """ Retrieve pandas object stored in file, optionally based on where criteria. @@ -824,10 +830,11 @@ def select_as_multiple( stop=None, iterator=False, chunksize=None, - auto_close=False, - **kwargs + auto_close: bool = False, + **kwargs, ): - """ Retrieve pandas objects from multiple tables + """ + Retrieve pandas objects from multiple tables. Parameters ---------- @@ -839,6 +846,8 @@ def select_as_multiple( stop : integer (defaults to None), row number to stop selection iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator + auto_close : bool, default False + Should automatically close the store when finished. Raises ------ @@ -860,7 +869,7 @@ def select_as_multiple( stop=stop, iterator=iterator, chunksize=chunksize, - **kwargs + **kwargs, ) if not isinstance(keys, (list, tuple)): @@ -1262,27 +1271,28 @@ def copy( self, file, mode="w", - propindexes=True, + propindexes: bool = True, keys=None, complib=None, complevel=None, - fletcher32=False, + fletcher32: bool = False, overwrite=True, ): - """ copy the existing store to a new file, upgrading in place - - Parameters - ---------- - propindexes: restore indexes in copied file (defaults to True) - keys : list of keys to include in the copy (defaults to all) - overwrite : overwrite (remove and replace) existing nodes in the - new store (default is True) - mode, complib, complevel, fletcher32 same as in HDFStore.__init__ + """ + Copy the existing store to a new file, updating in place. - Returns - ------- - open file handle of the new store + Parameters + ---------- + propindexes: bool, default True + Restore indexes in copied file. + keys : list of keys to include in the copy (defaults to all) + overwrite : overwrite (remove and replace) existing nodes in the + new store (default is True) + mode, complib, complevel, fletcher32 same as in HDFStore.__init__ + Returns + ------- + open file handle of the new store """ new_store = HDFStore( file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32 @@ -1302,7 +1312,7 @@ def copy( data = self.select(k) if s.is_table: - index = False + index = False # type: Union[bool, list] if propindexes: index = [a.name for a in s.axes if a.is_indexed] new_store.append( @@ -1317,7 +1327,7 @@ def copy( return new_store - def info(self): + def info(self) -> str: """ Print detailed information on the store. @@ -1478,7 +1488,7 @@ def _write_to_group( append=False, complib=None, encoding=None, - **kwargs + **kwargs, ): group = self.get_node(key) @@ -1550,13 +1560,16 @@ class TableIterator: nrows : the rows to iterate on start : the passed start value (default is None) stop : the passed stop value (default is None) - iterator : boolean, whether to use the default iterator + iterator : bool, default False + Whether to use the default iterator. chunksize : the passed chunking value (default is 50000) auto_close : boolean, automatically close the store at the end of iteration, default is False kwargs : the passed kwargs """ + chunksize: Optional[int] + def __init__( self, store, @@ -1566,7 +1579,7 @@ def __init__( nrows, start=None, stop=None, - iterator=False, + iterator: bool = False, chunksize=None, auto_close=False, ): @@ -1619,7 +1632,7 @@ def close(self): if self.auto_close: self.store.close() - def get_result(self, coordinates=False): + def get_result(self, coordinates: bool = False): # return the actual iterator if self.chunksize is not None: @@ -1676,7 +1689,7 @@ def __init__( freq=None, tz=None, index_name=None, - **kwargs + **kwargs, ): self.values = values self.kind = kind @@ -1708,13 +1721,13 @@ def set_name(self, name, kind_attr=None): return self - def set_axis(self, axis): + def set_axis(self, axis: int): """ set the axis over which I index """ self.axis = axis return self - def set_pos(self, pos): + def set_pos(self, pos: int): """ set the position of this column in the Table """ self.pos = pos if pos is not None and self.typ is not None: @@ -1736,23 +1749,23 @@ def __repr__(self) -> str: ) ) - def __eq__(self, other): + def __eq__(self, other) -> bool: """ compare 2 col items """ return all( getattr(self, a, None) == getattr(other, a, None) for a in ["name", "cname", "axis", "pos"] ) - def __ne__(self, other): + def __ne__(self, other) -> bool: return not self.__eq__(other) @property - def is_indexed(self): + def is_indexed(self) -> bool: """ return whether I am an indexed column """ try: return getattr(self.table.cols, self.cname).is_indexed except AttributeError: - False + return False def copy(self): new_self = copy.copy(self) @@ -1767,7 +1780,9 @@ def infer(self, handler): new_self.read_metadata(handler) return new_self - def convert(self, values, nan_rep, encoding, errors, start=None, stop=None): + def convert( + self, values: np.ndarray, nan_rep, encoding, errors, start=None, stop=None + ): """ set the values from this selection: take = take ownership """ # values is a recarray @@ -1961,7 +1976,7 @@ class GenericIndexCol(IndexCol): """ an index which is not represented in the data of the table """ @property - def is_indexed(self): + def is_indexed(self) -> bool: return False def convert(self, values, nan_rep, encoding, errors, start=None, stop=None): @@ -2042,7 +2057,7 @@ def __init__( meta=None, metadata=None, block=None, - **kwargs + **kwargs, ): super().__init__(values=values, kind=kind, typ=typ, cname=cname, **kwargs) self.dtype = None @@ -2497,7 +2512,7 @@ def __init__(self, parent, group, encoding=None, errors="strict", **kwargs): self.set_version() @property - def is_old_version(self): + def is_old_version(self) -> bool: return self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1 def set_version(self): @@ -2515,7 +2530,7 @@ def pandas_type(self): return _ensure_decoded(getattr(self.group._v_attrs, "pandas_type", None)) @property - def format_type(self): + def format_type(self) -> str: return "fixed" def __repr__(self) -> str: @@ -2590,7 +2605,7 @@ def storable(self): return self.group @property - def is_exists(self): + def is_exists(self) -> bool: return False @property @@ -2647,7 +2662,7 @@ class GenericFixed(Fixed): attributes = [] # type: List[str] # indexer helpders - def _class_to_alias(self, cls): + def _class_to_alias(self, cls) -> str: return self._index_type_map.get(cls, "") def _alias_to_class(self, alias): @@ -2700,7 +2715,7 @@ def validate_read(self, kwargs): return kwargs @property - def is_exists(self): + def is_exists(self) -> bool: return True def set_attrs(self): @@ -2908,14 +2923,14 @@ def read_index_node(self, node, start=None, stop=None): data, kind, encoding=self.encoding, errors=self.errors ), dtype=object, - **kwargs + **kwargs, ) else: index = factory( _unconvert_index( data, kind, encoding=self.encoding, errors=self.errors ), - **kwargs + **kwargs, ) index.name = name @@ -2931,7 +2946,7 @@ def write_array_empty(self, key, value): getattr(self.group, key)._v_attrs.value_type = str(value.dtype) getattr(self.group, key)._v_attrs.shape = value.shape - def _is_empty_array(self, shape): + def _is_empty_array(self, shape) -> bool: """Returns true if any axis is zero length.""" return any(x == 0 for x in shape) @@ -3210,7 +3225,7 @@ def table_type_short(self): return self.table_type.split("_")[0] @property - def format_type(self): + def format_type(self) -> str: return "table" def __repr__(self) -> str: @@ -3309,7 +3324,7 @@ def nrows_expected(self): return np.prod([i.cvalues.shape[0] for i in self.index_axes]) @property - def is_exists(self): + def is_exists(self) -> bool: """ has this table been created """ return "table" in self.group @@ -3335,12 +3350,12 @@ def axes(self): return itertools.chain(self.index_axes, self.values_axes) @property - def ncols(self): + def ncols(self) -> int: """ the number of total columns in the values axes """ return sum(len(a.values) for a in self.values_axes) @property - def is_transposed(self): + def is_transposed(self) -> bool: return False @property @@ -3378,7 +3393,7 @@ def values_cols(self): """ return a list of my values cols """ return [i.cname for i in self.values_axes] - def _get_metadata_path(self, key): + def _get_metadata_path(self, key) -> str: """ return the metadata pathname for this key """ return "{group}/meta/{key}/meta".format(group=self.group._v_pathname, key=key) @@ -3572,9 +3587,19 @@ def create_index(self, columns=None, optlevel=None, kind=None): ) v.create_index(**kw) - def read_axes(self, where, **kwargs): - """create and return the axes sniffed from the table: return boolean - for success + def read_axes(self, where, **kwargs) -> bool: + """ + Create the axes sniffed from the table. + + Parameters + ---------- + where : ??? + **kwargs + + Returns + ------- + bool + Indicates success. """ # validate the version @@ -3654,7 +3679,7 @@ def create_axes( nan_rep=None, data_columns=None, min_itemsize=None, - **kwargs + **kwargs, ): """ create and return the axes legacy tables create an indexable column, indexable index, @@ -3941,7 +3966,7 @@ def process_filter(field, filt): return obj def create_description( - self, complib=None, complevel=None, fletcher32=False, expectedrows=None + self, complib=None, complevel=None, fletcher32: bool = False, expectedrows=None ): """ create the description of the table from the axes & values """ @@ -4104,7 +4129,7 @@ def write( chunksize=None, expectedrows=None, dropna=False, - **kwargs + **kwargs, ): if not append and self.is_exists: @@ -4340,7 +4365,7 @@ class AppendableFrameTable(AppendableTable): obj_type = DataFrame # type: Type[Union[DataFrame, Series]] @property - def is_transposed(self): + def is_transposed(self) -> bool: return self.index_axes[0].axis == 1 def get_object(self, obj): @@ -4411,7 +4436,7 @@ class AppendableSeriesTable(AppendableFrameTable): storage_obj_type = DataFrame @property - def is_transposed(self): + def is_transposed(self) -> bool: return False def get_object(self, obj): @@ -4547,7 +4572,7 @@ def read(self, **kwargs): return df -def _reindex_axis(obj, axis, labels, other=None): +def _reindex_axis(obj, axis: int, labels: Index, other=None): ax = obj._get_axis(axis) labels = ensure_index(labels) @@ -4562,7 +4587,7 @@ def _reindex_axis(obj, axis, labels, other=None): if other is not None: labels = ensure_index(other.unique()).intersection(labels, sort=False) if not labels.equals(ax): - slicer = [slice(None, None)] * obj.ndim + slicer = [slice(None, None)] * obj.ndim # type: List[Union[slice, Index]] slicer[axis] = labels obj = obj.loc[tuple(slicer)] return obj @@ -4588,7 +4613,7 @@ def _get_tz(tz): return zone -def _set_tz(values, tz, preserve_UTC=False, coerce=False): +def _set_tz(values, tz, preserve_UTC: bool = False, coerce: bool = False): """ coerce the values to a DatetimeIndex if tz is set preserve the input shape if possible @@ -4597,7 +4622,7 @@ def _set_tz(values, tz, preserve_UTC=False, coerce=False): ---------- values : ndarray tz : string/pickled tz object - preserve_UTC : boolean, + preserve_UTC : bool, preserve the UTC of the result coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray """ @@ -4842,7 +4867,7 @@ def _unconvert_string_array(data, nan_rep=None, encoding=None, errors="strict"): return data.reshape(shape) -def _maybe_convert(values, val_kind, encoding, errors): +def _maybe_convert(values: np.ndarray, val_kind, encoding, errors): if _need_convert(val_kind): conv = _get_converter(val_kind, encoding, errors) # conv = np.frompyfunc(conv, 1, 1) @@ -4862,7 +4887,7 @@ def _get_converter(kind, encoding, errors): raise ValueError("invalid kind {kind}".format(kind=kind)) -def _need_convert(kind): +def _need_convert(kind) -> bool: kind = _ensure_decoded(kind) if kind in ("datetime", "datetime64", "string"): return True diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index ea26a9b8efdbf..2f2dbdbc76215 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -143,7 +143,7 @@ """ -def _parse_date(datestr): +def _parse_date(datestr: str) -> datetime: """ Given a date in xport format, return Python date. """ try: # e.g. "16FEB11:10:07:55" @@ -152,11 +152,11 @@ def _parse_date(datestr): return pd.NaT -def _split_line(s, parts): +def _split_line(s: str, parts): """ Parameters ---------- - s: string + s: str Fixed-length string to split parts: list of (name, length) pairs Used to break up string, name '_' will be filtered from output. @@ -402,7 +402,7 @@ def _read_header(self): def __next__(self): return self.read(nrows=self._chunksize or 1) - def _record_count(self): + def _record_count(self) -> int: """ Get number of records in file.