From 24a774a1e2d12c904b36a66a16154f97b4e6d4fd Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Tue, 11 Feb 2020 17:58:21 +0000 Subject: [PATCH] D209 Multi-line docstring closing quotes should be on a separate line --- pandas/_testing.py | 3 ++- pandas/core/arrays/sparse/scipy_sparse.py | 3 ++- pandas/core/computation/pytables.py | 9 ++++--- pandas/io/excel/_base.py | 3 ++- pandas/io/excel/_xlrd.py | 3 ++- pandas/io/parsers.py | 3 ++- pandas/io/pytables.py | 15 ++++++++---- pandas/io/sas/sas7bdat.py | 3 ++- pandas/io/stata.py | 15 ++++++++---- pandas/tests/generic/test_generic.py | 3 ++- pandas/tests/groupby/test_categorical.py | 3 ++- pandas/tests/indexing/multiindex/conftest.py | 3 ++- pandas/tests/indexing/test_floats.py | 3 ++- pandas/tests/io/pytables/common.py | 3 ++- pandas/tests/io/test_compression.py | 3 ++- pandas/tests/resample/conftest.py | 24 ++++++++++++------- .../merge/test_merge_index_as_string.py | 6 +++-- pandas/tests/test_register_accessor.py | 3 ++- 18 files changed, 72 insertions(+), 36 deletions(-) diff --git a/pandas/_testing.py b/pandas/_testing.py index 13af8703cef93..9e71524263a18 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -2150,7 +2150,8 @@ def optional_args(decorator): @my_decorator def function(): pass - Calls decorator with decorator(f, *args, **kwargs)""" + Calls decorator with decorator(f, *args, **kwargs) + """ @wraps(decorator) def wrapper(*args, **kwargs): diff --git a/pandas/core/arrays/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py index 17a953fce9ec0..b67f2c9f52c76 100644 --- a/pandas/core/arrays/sparse/scipy_sparse.py +++ b/pandas/core/arrays/sparse/scipy_sparse.py @@ -19,7 +19,8 @@ def _check_is_partition(parts, whole): def _to_ijv(ss, row_levels=(0,), column_levels=(1,), sort_labels=False): """ For arbitrary (MultiIndexed) sparse Series return (v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for - passing to scipy.sparse.coo constructor. """ + passing to scipy.sparse.coo constructor. + """ # index and column levels must be a partition of the index _check_is_partition([row_levels, column_levels], range(ss.index.nlevels)) diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index be652ca0e6a36..48129b23171f1 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -151,7 +151,8 @@ def is_valid(self) -> bool: @property def is_in_table(self) -> bool: """ return True if this is a valid column name for generation (e.g. an - actual column in the table) """ + actual column in the table) + """ return self.queryables.get(self.lhs) is not None @property @@ -176,7 +177,8 @@ def generate(self, v) -> str: def convert_value(self, v) -> "TermValue": """ convert the expression that is in the term to something that is - accepted by pytables """ + accepted by pytables + """ def stringify(value): if self.encoding is not None: @@ -602,7 +604,8 @@ def __init__(self, value, converted, kind: str): def tostring(self, encoding) -> str: """ quote the string if not encoded - else encode and return """ + else encode and return + """ if self.kind == "string": if encoding is not None: return str(self.converted) diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 5ad56e30eeb39..70c09151258ff 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -756,7 +756,8 @@ def _value_with_fmt(self, val): @classmethod def check_extension(cls, ext): """checks that path's extension against the Writer's supported - extensions. If it isn't supported, raises UnsupportedFiletypeError.""" + extensions. If it isn't supported, raises UnsupportedFiletypeError. + """ if ext.startswith("."): ext = ext[1:] if not any(ext in extension for extension in cls.supported_extensions): diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py index be1b78eeb146e..9b18beda03c66 100644 --- a/pandas/io/excel/_xlrd.py +++ b/pandas/io/excel/_xlrd.py @@ -58,7 +58,8 @@ def get_sheet_data(self, sheet, convert_float): def _parse_cell(cell_contents, cell_typ): """converts the contents of the cell into a pandas - appropriate object""" + appropriate object + """ if cell_typ == XL_CELL_DATE: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 8bc8470ae7658..616839262bd48 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1458,7 +1458,8 @@ def _extract_multi_indexer_columns( self, header, index_names, col_names, passed_names=False ): """ extract and return the names, index_names, col_names - header is a list-of-lists returned from the parsers """ + header is a list-of-lists returned from the parsers + """ if len(header) < 2: return header[0], index_names, col_names, passed_names diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 0e2b909d5cdc7..1f115a0e90c50 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2001,7 +2001,8 @@ def __iter__(self): def maybe_set_size(self, min_itemsize=None): """ maybe set a string col itemsize: min_itemsize can be an integer or a dict with this columns name - with an integer size """ + with an integer size + """ if _ensure_decoded(self.kind) == "string": if isinstance(min_itemsize, dict): @@ -2052,7 +2053,8 @@ def validate_attr(self, append: bool): def update_info(self, info): """ set/update the info for this indexable with the key/value - if there is a conflict raise/warn as needed """ + if there is a conflict raise/warn as needed + """ for key in self._info_fields: @@ -2597,7 +2599,8 @@ def validate_version(self, where=None): def infer_axes(self): """ infer the axes of my storer - return a boolean indicating if we have a valid storer or not """ + return a boolean indicating if we have a valid storer or not + """ s = self.storable if s is None: @@ -4095,7 +4098,8 @@ def read( stop: Optional[int] = None, ): """ read the indices and the indexing array, calculate offset rows and - return """ + return + """ raise NotImplementedError("WORMTable needs to implement read") def write(self, **kwargs): @@ -4171,7 +4175,8 @@ def write( def write_data(self, chunksize: Optional[int], dropna: bool = False): """ we form the data into a 2-d including indexes,values,mask - write chunk-by-chunk """ + write chunk-by-chunk + """ names = self.dtype.names nrows = self.nrows_expected diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 9b40778dbcfdf..3affa31f93d07 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -121,7 +121,8 @@ def column_data_offsets(self): def column_types(self): """Returns a numpy character array of the column types: - s (string) or d (double)""" + s (string) or d (double) + """ return np.asarray(self._column_types, dtype=np.dtype("S1")) def close(self): diff --git a/pandas/io/stata.py b/pandas/io/stata.py index d651fe9f67773..db379c3f192d7 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -2133,7 +2133,8 @@ def _write_bytes(self, value: bytes) -> None: def _prepare_categoricals(self, data: DataFrame) -> DataFrame: """Check for categorical columns, retain categorical information for - Stata file and convert categorical data to int""" + Stata file and convert categorical data to int + """ is_cat = [is_categorical_dtype(data[col]) for col in data] self._is_col_cat = is_cat @@ -2175,7 +2176,8 @@ def _prepare_categoricals(self, data: DataFrame) -> DataFrame: def _replace_nans(self, data: DataFrame) -> DataFrame: # return data """Checks floating point data columns for nans, and replaces these with - the generic Stata for missing value (.)""" + the generic Stata for missing value (.) + """ for c in data: dtype = data[c].dtype if dtype in (np.float32, np.float64): @@ -3041,7 +3043,8 @@ def _write_header( def _write_map(self) -> None: """Called twice during file write. The first populates the values in the map with 0s. The second call writes the final map locations when - all blocks have been written.""" + all blocks have been written. + """ assert self._file is not None if not self._map: self._map = dict( @@ -3189,7 +3192,8 @@ def _write_file_close_tag(self) -> None: def _update_strl_names(self) -> None: """Update column names for conversion to strl if they might have been - changed to comply with Stata naming rules""" + changed to comply with Stata naming rules + """ # Update convert_strl if names changed for orig, new in self._converted_names.items(): if orig in self._convert_strl: @@ -3198,7 +3202,8 @@ def _update_strl_names(self) -> None: def _convert_strls(self, data: DataFrame) -> DataFrame: """Convert columns to StrLs if either very large or in the - convert_strl variable""" + convert_strl variable + """ convert_cols = [ col for i, col in enumerate(data) diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index 7645c6b4cf709..7e1f499ec9493 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -25,7 +25,8 @@ def _axes(self): def _construct(self, shape, value=None, dtype=None, **kwargs): """ construct an object for the given shape if value is specified use that if its a scalar - if value is an array, repeat it as needed """ + if value is an array, repeat it as needed + """ if isinstance(shape, int): shape = tuple([shape] * self._ndim) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 1c2de8c8c223f..9b07269811d8e 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -20,7 +20,8 @@ def cartesian_product_for_groupers(result, args, names): """ Reindex to a cartesian production for the groupers, - preserving the nature (Categorical) of each grouper """ + preserving the nature (Categorical) of each grouper + """ def f(a): if isinstance(a, (CategoricalIndex, Categorical)): diff --git a/pandas/tests/indexing/multiindex/conftest.py b/pandas/tests/indexing/multiindex/conftest.py index e6d5a9eb84410..48e090b242208 100644 --- a/pandas/tests/indexing/multiindex/conftest.py +++ b/pandas/tests/indexing/multiindex/conftest.py @@ -21,7 +21,8 @@ def multiindex_dataframe_random_data(): @pytest.fixture def multiindex_year_month_day_dataframe_random_data(): """DataFrame with 3 level MultiIndex (year, month, day) covering - first 100 business days from 2000-01-01 with random data""" + first 100 business days from 2000-01-01 with random data + """ tdf = tm.makeTimeDataFrame(100) ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum() # use Int64Index, to make sure things work diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index 6cc18a3989266..b3f6d65da5db5 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -503,7 +503,8 @@ def test_slice_integer(self): def test_integer_positional_indexing(self): """ make sure that we are raising on positional indexing - w.r.t. an integer index """ + w.r.t. an integer index + """ s = Series(range(2, 6), index=range(2, 6)) diff --git a/pandas/tests/io/pytables/common.py b/pandas/tests/io/pytables/common.py index d06f467760518..7f0b3ab7957e6 100644 --- a/pandas/tests/io/pytables/common.py +++ b/pandas/tests/io/pytables/common.py @@ -75,7 +75,8 @@ def ensure_clean_path(path): def _maybe_remove(store, key): """For tests using tables, try removing the table to be sure there is - no content from previous tests using the same table name.""" + no content from previous tests using the same table name. + """ try: store.remove(key) except (ValueError, KeyError): diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py index fb81e57912dac..841241d5124e0 100644 --- a/pandas/tests/io/test_compression.py +++ b/pandas/tests/io/test_compression.py @@ -129,7 +129,8 @@ def test_with_missing_lzma(): def test_with_missing_lzma_runtime(): """Tests if RuntimeError is hit when calling lzma without - having the module available.""" + having the module available. + """ code = textwrap.dedent( """ import sys diff --git a/pandas/tests/resample/conftest.py b/pandas/tests/resample/conftest.py index bb4f7ced3350f..a4ac15d9f3b07 100644 --- a/pandas/tests/resample/conftest.py +++ b/pandas/tests/resample/conftest.py @@ -99,7 +99,8 @@ def _index_name(): @pytest.fixture def index(_index_factory, _index_start, _index_end, _index_freq, _index_name): """Fixture for parametrization of date_range, period_range and - timedelta_range indexes""" + timedelta_range indexes + """ return _index_factory(_index_start, _index_end, freq=_index_freq, name=_index_name) @@ -107,35 +108,40 @@ def index(_index_factory, _index_start, _index_end, _index_freq, _index_name): def _static_values(index): """Fixture for parametrization of values used in parametrization of Series and DataFrames with date_range, period_range and - timedelta_range indexes""" + timedelta_range indexes + """ return np.arange(len(index)) @pytest.fixture def _series_name(): """Fixture for parametrization of Series name for Series used with - date_range, period_range and timedelta_range indexes""" + date_range, period_range and timedelta_range indexes + """ return None @pytest.fixture def series(index, _series_name, _static_values): """Fixture for parametrization of Series with date_range, period_range and - timedelta_range indexes""" + timedelta_range indexes + """ return Series(_static_values, index=index, name=_series_name) @pytest.fixture def empty_series(series): """Fixture for parametrization of empty Series with date_range, - period_range and timedelta_range indexes""" + period_range and timedelta_range indexes + """ return series[:0] @pytest.fixture def frame(index, _series_name, _static_values): """Fixture for parametrization of DataFrame with date_range, period_range - and timedelta_range indexes""" + and timedelta_range indexes + """ # _series_name is intentionally unused return DataFrame({"value": _static_values}, index=index) @@ -143,7 +149,8 @@ def frame(index, _series_name, _static_values): @pytest.fixture def empty_frame(series): """Fixture for parametrization of empty DataFrame with date_range, - period_range and timedelta_range indexes""" + period_range and timedelta_range indexes + """ index = series.index[:0] return DataFrame(index=index) @@ -151,7 +158,8 @@ def empty_frame(series): @pytest.fixture(params=[Series, DataFrame]) def series_and_frame(request, series, frame): """Fixture for parametrization of Series and DataFrame with date_range, - period_range and timedelta_range indexes""" + period_range and timedelta_range indexes + """ if request.param == Series: return series if request.param == DataFrame: diff --git a/pandas/tests/reshape/merge/test_merge_index_as_string.py b/pandas/tests/reshape/merge/test_merge_index_as_string.py index 691f2549c0ece..9075a4e791583 100644 --- a/pandas/tests/reshape/merge/test_merge_index_as_string.py +++ b/pandas/tests/reshape/merge/test_merge_index_as_string.py @@ -30,7 +30,8 @@ def df2(): @pytest.fixture(params=[[], ["outer"], ["outer", "inner"]]) def left_df(request, df1): """ Construct left test DataFrame with specified levels - (any of 'outer', 'inner', and 'v1')""" + (any of 'outer', 'inner', and 'v1') + """ levels = request.param if levels: df1 = df1.set_index(levels) @@ -41,7 +42,8 @@ def left_df(request, df1): @pytest.fixture(params=[[], ["outer"], ["outer", "inner"]]) def right_df(request, df2): """ Construct right test DataFrame with specified levels - (any of 'outer', 'inner', and 'v2')""" + (any of 'outer', 'inner', and 'v2') + """ levels = request.param if levels: diff --git a/pandas/tests/test_register_accessor.py b/pandas/tests/test_register_accessor.py index 08a5581886522..d839936f731a3 100644 --- a/pandas/tests/test_register_accessor.py +++ b/pandas/tests/test_register_accessor.py @@ -9,7 +9,8 @@ @contextlib.contextmanager def ensure_removed(obj, attr): """Ensure that an attribute added to 'obj' during the test is - removed when we're done""" + removed when we're done + """ try: yield finally: