diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 54a7afd90a09a..549359259bbd4 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -6,7 +6,7 @@ Key items to import for compatible code: * lists: lrange(), lmap(), lzip(), lfilter() -* iterable method compatibility: iteritems, iterkeys, itervalues +* iterable method compatibility: iterkeys, itervalues * Uses the original method if available, otherwise uses items, keys, values. * add_metaclass(metaclass) - class decorator that recreates class with with the given metaclass instead (and avoids intermediary class creation) @@ -45,10 +45,6 @@ def lfilter(*args, **kwargs): return list(filter(*args, **kwargs)) -def iteritems(obj, **kw): - return iter(obj.items(**kw)) - - def iterkeys(obj, **kw): return iter(obj.keys(**kw)) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index cd49946652566..995ed59ddabb1 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -9,7 +9,6 @@ from pandas._config import get_option from pandas._libs import algos as libalgos, lib -import pandas.compat as compat from pandas.compat import lzip from pandas.compat.numpy import function as nv from pandas.util._decorators import ( @@ -1317,7 +1316,7 @@ def __setstate__(self, state): state['_dtype'] = CategoricalDtype(state['_categories'], state['_ordered']) - for k, v in compat.iteritems(state): + for k, v in state.items(): setattr(self, k, v) @property diff --git a/pandas/core/base.py b/pandas/core/base.py index d4294e59cc845..40f090f661c2f 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -364,7 +364,7 @@ def nested_renaming_depr(level=4): # be list-likes if any(is_aggregator(x) for x in compat.itervalues(arg)): new_arg = OrderedDict() - for k, v in compat.iteritems(arg): + for k, v in arg.items(): if not isinstance(v, (tuple, list, dict)): new_arg[k] = [v] else: @@ -432,7 +432,7 @@ def _agg(arg, func): return an OrderedDict """ result = OrderedDict() - for fname, agg_how in compat.iteritems(arg): + for fname, agg_how in arg.items(): result[fname] = func(fname, agg_how) return result diff --git a/pandas/core/common.py b/pandas/core/common.py index 3cb23e9ee921d..e62a2119df820 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -14,7 +14,7 @@ import numpy as np from pandas._libs import lib, tslibs -from pandas.compat import PY36, iteritems +from pandas.compat import PY36 from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas.core.dtypes.common import ( @@ -362,7 +362,7 @@ def dict_compat(d): dict """ - return {maybe_box_datetimelike(key): value for key, value in iteritems(d)} + return {maybe_box_datetimelike(key): value for key, value in d.items()} def standardize_mapping(into): diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index 71b57ec4ecd1e..a7524161dd80e 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -9,7 +9,6 @@ from pandas.errors import PerformanceWarning import pandas as pd -from pandas import compat import pandas.core.common as com from pandas.core.computation.common import _result_type_many @@ -30,7 +29,7 @@ def _align_core_single_unary_op(term): def _zip_axes_from_type(typ, new_axes): axes = {ax_name: new_axes[ax_ind] - for ax_ind, ax_name in compat.iteritems(typ._AXIS_NAMES)} + for ax_ind, ax_name in typ._AXIS_NAMES.items()} return axes @@ -84,7 +83,7 @@ def _align_core(terms): if not axes[ax].is_(itm): axes[ax] = axes[ax].join(itm, how='outer') - for i, ndim in compat.iteritems(ndims): + for i, ndim in ndims.items(): for axis, items in zip(range(ndim), axes): ti = terms[i].value diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index e61dbd07dac5d..245cd9c403080 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -10,7 +10,7 @@ import numpy as np -from pandas.compat import iteritems, lmap +from pandas.compat import lmap import pandas as pd from pandas.core import common as com @@ -300,7 +300,7 @@ def f(self, node, *args, **kwargs): def add_ops(op_classes): """Decorator to add default implementation of ops.""" def f(cls): - for op_attr_name, op_class in iteritems(op_classes): + for op_attr_name, op_class in op_classes.items(): ops = getattr(cls, '{name}_ops'.format(name=op_attr_name)) ops_map = getattr(cls, '{name}_op_nodes_map'.format( name=op_attr_name)) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index fdc99e957e257..501bc7811a385 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -33,7 +33,6 @@ from pandas.util._validators import (validate_bool_kwarg, validate_axis_style_args) -from pandas import compat from pandas.compat import PY36, lmap, lzip, raise_with_traceback from pandas.compat.numpy import function as nv from pandas.core.dtypes.cast import ( @@ -1275,9 +1274,9 @@ def to_dict(self, orient='dict', into=dict): into_c = com.standardize_mapping(into) if orient.lower().startswith('d'): return into_c( - (k, v.to_dict(into)) for k, v in compat.iteritems(self)) + (k, v.to_dict(into)) for k, v in self.items()) elif orient.lower().startswith('l'): - return into_c((k, v.tolist()) for k, v in compat.iteritems(self)) + return into_c((k, v.tolist()) for k, v in self.items()) elif orient.lower().startswith('sp'): return into_c((('index', self.index.tolist()), ('columns', self.columns.tolist()), @@ -1287,14 +1286,14 @@ def to_dict(self, orient='dict', into=dict): ]))) elif orient.lower().startswith('s'): return into_c((k, com.maybe_box_datetimelike(v)) - for k, v in compat.iteritems(self)) + for k, v in self.items()) elif orient.lower().startswith('r'): columns = self.columns.tolist() rows = (dict(zip(columns, row)) for row in self.itertuples(index=False, name=None)) return [ into_c((k, com.maybe_box_datetimelike(v)) - for k, v in compat.iteritems(row)) + for k, v in row.items()) for row in rows] elif orient.lower().startswith('i'): if not self.index.is_unique: @@ -1480,7 +1479,7 @@ def from_records(cls, data, index=None, exclude=None, columns=None, else: arrays = [] arr_columns = [] - for k, v in compat.iteritems(data): + for k, v in data.items(): if k in columns: arr_columns.append(k) arrays.append(v) @@ -2430,7 +2429,7 @@ def _sizeof_fmt(num, size_qualifier): counts = self.get_dtype_counts() dtypes = ['{k}({kk:d})'.format(k=k[0], kk=k[1]) for k - in sorted(compat.iteritems(counts))] + in sorted(counts.items())] lines.append('dtypes: {types}'.format(types=', '.join(dtypes))) if memory_usage is None: @@ -8051,8 +8050,8 @@ def isin(self, values): def _from_nested_dict(data): # TODO: this should be seriously cythonized new_data = OrderedDict() - for index, s in compat.iteritems(data): - for col, v in compat.iteritems(s): + for index, s in data.items(): + for col, v in s.items(): new_data[col] = new_data.get(col, OrderedDict()) new_data[col][index] = v return new_data diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 885c499c58dfa..e17e3fd5d3e92 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -15,7 +15,6 @@ from pandas._config import config from pandas._libs import Timestamp, iNaT, properties -import pandas.compat as compat from pandas.compat import lrange, lzip, set_function_name, to_str from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError @@ -6154,7 +6153,7 @@ def fillna(self, value=None, method=None, axis=None, inplace=False, 'by column') result = self if inplace else self.copy() - for k, v in compat.iteritems(value): + for k, v in value.items(): if k not in result: continue obj = result[k] @@ -6512,7 +6511,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, to_replace = regex regex = True - items = list(compat.iteritems(to_replace)) + items = list(to_replace.items()) keys, values = lzip(*items) or ([], []) are_mappings = [is_dict_like(v) for v in values] @@ -6551,7 +6550,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} res = self if inplace else self.copy() - for c, src in compat.iteritems(to_replace): + for c, src in to_replace.items(): if c in value and c in self: # object conversion is handled in # series.replace which is called recursivelly @@ -6563,7 +6562,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, # {'A': NA} -> 0 elif not is_list_like(value): - keys = [(k, src) for k, src in compat.iteritems(to_replace) + keys = [(k, src) for k, src in to_replace.items() if k in self] keys_len = len(keys) - 1 for i, (k, src) in enumerate(keys): @@ -6610,7 +6609,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} new_data = self._data - for k, v in compat.iteritems(value): + for k, v in value.items(): if k in self: new_data = new_data.replace(to_replace=to_replace, value=v, filter=[k], diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 92cb4db2ac868..b1936a8f5121f 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -20,7 +20,6 @@ class providing the base-class of operations. from pandas._config.config import option_context from pandas._libs import Timestamp, groupby as libgroupby -import pandas.compat as compat from pandas.compat import set_function_name from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError @@ -876,7 +875,7 @@ def _python_agg_general(self, func, *args, **kwargs): if self.grouper._filter_empty_groups: mask = counts.ravel() > 0 - for name, result in compat.iteritems(output): + for name, result in output.items(): # since we are masking, make sure that we have a float object values = result diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 98647a6895574..4848170eaea43 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -11,7 +11,6 @@ from pandas._libs.lib import is_datetime_array from pandas._libs.tslibs import OutOfBoundsDatetime, Timedelta, Timestamp from pandas._libs.tslibs.timezones import tz_compare -import pandas.compat as compat from pandas.compat import set_function_name from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution, cache_readonly @@ -535,7 +534,7 @@ def _simple_new(cls, values, name=None, dtype=None, **kwargs): # we actually set this value too. result._index_data = values result.name = name - for k, v in compat.iteritems(kwargs): + for k, v in kwargs.items(): setattr(result, k, v) return result._reset_identity() @@ -1754,7 +1753,7 @@ def __setstate__(self, state): if isinstance(state, dict): self._data = state.pop('data') - for k, v in compat.iteritems(state): + for k, v in state.items(): setattr(self, k, v) elif isinstance(state, tuple): @@ -4486,7 +4485,7 @@ def groupby(self, values): result = values._reverse_indexer() # map to the label - result = {k: self.take(v) for k, v in compat.iteritems(result)} + result = {k: self.take(v) for k, v in result.items()} return result diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 4493136e3e61e..930b2a4a5161f 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -238,7 +238,7 @@ def _simple_new(cls, values, name=None, dtype=None, **kwargs): values = cls._create_categorical(values, dtype=dtype) result._data = values result.name = name - for k, v in compat.iteritems(kwargs): + for k, v in kwargs.items(): setattr(result, k, v) result._reset_identity() diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index da6a917c93ba4..52899ea311e9b 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -156,7 +156,7 @@ def _simple_new(cls, start, stop=None, step=None, name=None, result._stop = stop or 0 result._step = step or 1 result.name = name - for k, v in compat.iteritems(kwargs): + for k, v in kwargs.items(): setattr(result, k, v) result._reset_identity() diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index cfc42d26c5471..6c08cacb551df 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -103,7 +103,7 @@ def __call__(self, alt): @functools.wraps(alt) def f(values, axis=None, skipna=True, **kwds): if len(self.kwargs) > 0: - for k, v in compat.iteritems(self.kwargs): + for k, v in self.kwargs.items(): if k not in kwds: kwds[k] = v try: diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 542b1075313bf..3fb14c5d2ad9a 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -200,13 +200,13 @@ def _init_dict(self, data, axes, dtype=None): if haxis is not None: haxis = ensure_index(haxis) data = OrderedDict((k, v) - for k, v in compat.iteritems(data) + for k, v in data.items() if k in haxis) else: keys = com.dict_keys_to_ordered_list(data) haxis = Index(keys) - for k, v in compat.iteritems(data): + for k, v in data.items(): if isinstance(v, dict): data[k] = self._constructor_sliced(v) @@ -266,8 +266,8 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): orient = orient.lower() if orient == 'minor': new_data = defaultdict(OrderedDict) - for col, df in compat.iteritems(data): - for item, s in compat.iteritems(df): + for col, df in data.items(): + for item, s in df.items(): new_data[item][col] = s data = new_data elif orient != 'items': # pragma: no cover @@ -1500,7 +1500,7 @@ def _homogenize_dict(self, frames, intersect=True, dtype=None): result = OrderedDict() adj_frames = OrderedDict() - for k, v in compat.iteritems(frames): + for k, v in frames.items(): if isinstance(v, dict): adj_frames[k] = self._constructor_sliced(v) else: @@ -1512,7 +1512,7 @@ def _homogenize_dict(self, frames, intersect=True, dtype=None): reindex_dict = {self._AXIS_SLICEMAP[a]: axes_dict[a] for a in axes} reindex_dict['copy'] = False - for key, frame in compat.iteritems(adj_frames): + for key, frame in adj_frames.items(): if frame is not None: result[key] = frame.reindex(**reindex_dict) else: diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 99224f6fb7c5b..65b28a7ecc849 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -10,7 +10,6 @@ from pandas.core.dtypes.generic import ABCMultiIndex from pandas.core.dtypes.missing import notna -from pandas import compat from pandas.core.arrays import Categorical from pandas.core.frame import _shared_docs from pandas.core.indexes.base import Index @@ -173,7 +172,7 @@ def lreshape(data, groups, dropna=True, label=None): for c in pivot_cols: mask &= notna(mdata[c]) if not mask.all(): - mdata = {k: v[mask] for k, v in compat.iteritems(mdata)} + mdata = {k: v[mask] for k, v in mdata.items()} return data._constructor(mdata, columns=id_cols + pivot_cols) diff --git a/pandas/core/series.py b/pandas/core/series.py index 8a22765d85aec..716ccb0201fea 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -12,7 +12,6 @@ from pandas._config import get_option from pandas._libs import iNaT, index as libindex, lib, tslibs -import pandas.compat as compat from pandas.compat import PY36 from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution, deprecate @@ -291,7 +290,7 @@ def _init_dict(self, data, index=None, dtype=None): # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: - keys, values = zip(*compat.iteritems(data)) + keys, values = zip(*data.items()) values = list(values) elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar @@ -1523,7 +1522,7 @@ def to_dict(self, into=dict): """ # GH16122 into_c = com.standardize_mapping(into) - return into_c(compat.iteritems(self)) + return into_c(self.items()) def to_frame(self, name=None): """ diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 08729442e701f..0ae371d8c8c77 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -7,7 +7,6 @@ import numpy as np from pandas._libs.sparse import BlockIndex, get_blocks -import pandas.compat as compat from pandas.compat import lmap from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender @@ -145,7 +144,7 @@ def _init_dict(self, data, index, columns, dtype=None): # pre-filter out columns if we passed it if columns is not None: columns = ensure_index(columns) - data = {k: v for k, v in compat.iteritems(data) if k in columns} + data = {k: v for k, v in data.items() if k in columns} else: keys = com.dict_keys_to_ordered_list(data) columns = Index(keys) @@ -158,7 +157,7 @@ def sp_maker(x): fill_value=self._default_fill_value, copy=True, dtype=dtype) sdict = {} - for k, v in compat.iteritems(data): + for k, v in data.items(): if isinstance(v, Series): # Force alignment, no copy necessary if not v.index.equals(index): @@ -322,7 +321,7 @@ def _unpickle_sparse_frame_compat(self, state): index = idx series_dict = DataFrame() - for col, (sp_index, sp_values) in compat.iteritems(series): + for col, (sp_index, sp_values) in series.items(): series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index, fill_value=fv) @@ -338,7 +337,7 @@ def to_dense(self): ------- df : DataFrame """ - data = {k: v.to_dense() for k, v in compat.iteritems(self)} + data = {k: v.to_dense() for k, v in self.items()} return DataFrame(data, index=self.index, columns=self.columns) def _apply_columns(self, func): @@ -347,7 +346,7 @@ def _apply_columns(self, func): """ new_data = {col: func(series) - for col, series in compat.iteritems(self)} + for col, series in self.items()} return self._constructor( data=new_data, index=self.index, columns=self.columns, @@ -380,7 +379,7 @@ def density(self): represented in the frame """ tot_nonsparse = sum(ser.sp_index.npoints - for _, ser in compat.iteritems(self)) + for _, ser in self.items()) tot = len(self.index) * len(self.columns) return tot_nonsparse / float(tot) @@ -599,7 +598,7 @@ def _combine_match_index(self, other, func, level=None): this, other = self.align(other, join='outer', axis=0, level=level, copy=False) - for col, series in compat.iteritems(this): + for col, series in this.items(): new_data[col] = func(series.values, other.values) fill_value = self._get_op_result_fill_value(other, func) @@ -723,7 +722,7 @@ def _reindex_columns(self, columns, method, copy, level, fill_value=None, raise NotImplementedError("'method' argument is not supported") # TODO: fill value handling - sdict = {k: v for k, v in compat.iteritems(self) if k in columns} + sdict = {k: v for k, v in self.items() if k in columns} return self._constructor( sdict, index=self.index, columns=columns, default_fill_value=self._default_fill_value).__finalize__(self) @@ -739,7 +738,7 @@ def _reindex_with_indexers(self, reindexers, method=None, fill_value=None, fill_value = np.nan reindexers = {self._get_axis_number(a): val - for (a, val) in compat.iteritems(reindexers)} + for (a, val) in reindexers.items()} index, row_indexer = reindexers.get(0, (None, None)) columns, col_indexer = reindexers.get(1, (None, None)) @@ -917,7 +916,7 @@ def apply(self, func, axis=0, broadcast=None, reduce=None, if isinstance(func, np.ufunc): new_series = {} - for k, v in compat.iteritems(self): + for k, v in self.items(): applied = func(v) applied.fill_value = func(v.fill_value) new_series[k] = applied @@ -969,7 +968,7 @@ def stack_sparse_frame(frame): """ Only makes sense when fill_value is NaN """ - lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)] + lengths = [s.sp_index.npoints for _, s in frame.items()] nobs = sum(lengths) # this is pretty fast @@ -980,7 +979,7 @@ def stack_sparse_frame(frame): # TODO: Figure out whether this can be reached. # I think this currently can't be reached because you can't build a # SparseDataFrame with a non-np.NaN fill value (fails earlier). - for _, series in compat.iteritems(frame): + for _, series in frame.items(): if not np.isnan(series.fill_value): raise TypeError('This routine assumes NaN fill value') @@ -1021,7 +1020,7 @@ def homogenize(series_dict): need_reindex = False - for _, series in compat.iteritems(series_dict): + for _, series in series_dict.items(): if not np.isnan(series.fill_value): raise TypeError('this method is only valid with NaN fill values') @@ -1033,7 +1032,7 @@ def homogenize(series_dict): if need_reindex: output = {} - for name, series in compat.iteritems(series_dict): + for name, series in series_dict.items(): if not series.sp_index.equals(index): series = series.sparse_reindex(index) diff --git a/pandas/io/html.py b/pandas/io/html.py index 1d588632b69f8..e449bf223ba94 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -9,7 +9,7 @@ import os import re -from pandas.compat import iteritems, lmap, lrange, raise_with_traceback +from pandas.compat import lmap, lrange, raise_with_traceback from pandas.errors import AbstractMethodError, EmptyDataError from pandas.core.dtypes.common import is_list_like @@ -617,7 +617,7 @@ def _build_xpath_expr(attrs): if 'class_' in attrs: attrs['class'] = attrs.pop('class_') - s = ["@{key}={val!r}".format(key=k, val=v) for k, v in iteritems(attrs)] + s = ["@{key}={val!r}".format(key=k, val=v) for k, v in attrs.items()] return '[{expr}]'.format(expr=' and '.join(s)) @@ -769,7 +769,7 @@ def _expand_elements(body): not_max = lens[lens != lens_max] empty = [''] - for ind, length in iteritems(not_max): + for ind, length in not_max.items(): body[ind] += empty * (lens_max - length) diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 28cc768ba4e21..8a9533991fada 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -12,7 +12,7 @@ from pandas.core.dtypes.common import is_period_dtype -from pandas import DataFrame, MultiIndex, Series, compat, isna, to_datetime +from pandas import DataFrame, MultiIndex, Series, isna, to_datetime from pandas.core.reshape.concat import concat from pandas.io.common import ( @@ -822,8 +822,8 @@ def _parse_no_numpy(self): json = self.json orient = self.orient if orient == "split": - decoded = {str(k): v for k, v in compat.iteritems( - loads(json, precise_float=self.precise_float))} + decoded = {str(k): v for k, v in loads( + json, precise_float=self.precise_float).items()} self.check_keys_split(decoded) self.obj = Series(dtype=None, **decoded) else: @@ -837,7 +837,7 @@ def _parse_numpy(self): if orient == "split": decoded = loads(json, dtype=None, numpy=True, precise_float=self.precise_float) - decoded = {str(k): v for k, v in compat.iteritems(decoded)} + decoded = {str(k): v for k, v in decoded.items()} self.check_keys_split(decoded) self.obj = Series(**decoded) elif orient == "columns" or orient == "index": @@ -875,7 +875,7 @@ def _parse_numpy(self): elif orient == "split": decoded = loads(json, dtype=None, numpy=True, precise_float=self.precise_float) - decoded = {str(k): v for k, v in compat.iteritems(decoded)} + decoded = {str(k): v for k, v in decoded.items()} self.check_keys_split(decoded) self.obj = DataFrame(**decoded) elif orient == "values": @@ -895,8 +895,8 @@ def _parse_no_numpy(self): self.obj = DataFrame( loads(json, precise_float=self.precise_float), dtype=None) elif orient == "split": - decoded = {str(k): v for k, v in compat.iteritems( - loads(json, precise_float=self.precise_float))} + decoded = {str(k): v for k, v in loads( + json, precise_float=self.precise_float).items()} self.check_keys_split(decoded) self.obj = DataFrame(dtype=None, **decoded) elif orient == "index": diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py index a836faec2b04f..26bf6a8cf410d 100644 --- a/pandas/io/json/normalize.py +++ b/pandas/io/json/normalize.py @@ -273,7 +273,7 @@ def _recursive_extract(data, path, seen_meta, level=0): columns=lambda x: "{p}{c}".format(p=record_prefix, c=x)) # Data types, a problem - for k, v in compat.iteritems(meta_vals): + for k, v in meta_vals.items(): if meta_prefix is not None: k = meta_prefix + k diff --git a/pandas/io/packers.py b/pandas/io/packers.py index cff0f0e4b34d0..ac9b132b191b6 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -462,7 +462,7 @@ def encode(obj): # for f in ['default_fill_value', 'default_kind']: # d[f] = getattr(obj, f, None) # d['data'] = dict([(name, ss) - # for name, ss in compat.iteritems(obj)]) + # for name, ss in obj.items()]) # return d else: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 5d73b377838b6..5ad6eb009b6ee 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -912,7 +912,7 @@ def _get_options_with_defaults(self, engine): options = {} - for argname, default in compat.iteritems(_parser_defaults): + for argname, default in _parser_defaults.items(): value = kwds.get(argname, default) # see gh-12935 @@ -922,7 +922,7 @@ def _get_options_with_defaults(self, engine): else: options[argname] = value - for argname, default in compat.iteritems(_c_parser_defaults): + for argname, default in _c_parser_defaults.items(): if argname in kwds: value = kwds[argname] @@ -941,7 +941,7 @@ def _get_options_with_defaults(self, engine): options[argname] = value if engine == 'python-fwf': - for argname, default in compat.iteritems(_fwf_defaults): + for argname, default in _fwf_defaults.items(): options[argname] = kwds.get(argname, default) return options @@ -1657,7 +1657,7 @@ def _agg_index(self, index, try_parse_dates=True): def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False, converters=None, dtypes=None): result = {} - for c, values in compat.iteritems(dct): + for c, values in dct.items(): conv_f = None if converters is None else converters.get(c, None) if isinstance(dtypes, dict): cast_type = dtypes.get(c, None) @@ -2471,7 +2471,7 @@ def _convert_data(self, data): def _clean_mapping(mapping): "converts col numbers to names" clean = {} - for col, v in compat.iteritems(mapping): + for col, v in mapping.items(): if isinstance(col, int) and col not in self.orig_names: col = self.orig_names[col] clean[col] = v @@ -3258,7 +3258,7 @@ def _isindex(colspec): elif isinstance(parse_spec, dict): # dict of new name to column list - for new_name, colspec in compat.iteritems(parse_spec): + for new_name, colspec in parse_spec.items(): if new_name in data_dict: raise ValueError( 'Date column {name} already in dict'.format(name=new_name)) @@ -3316,7 +3316,7 @@ def _clean_na_values(na_values, keep_default_na=True): # into array-likes for further use. This is also # where we append the default NaN values, provided # that `keep_default_na=True`. - for k, v in compat.iteritems(old_na_values): + for k, v in old_na_values.items(): if not is_list_like(v): v = [v] @@ -3386,7 +3386,7 @@ def _get_empty_meta(columns, index_col, index_names, dtype=None): dtype = defaultdict(lambda: np.object) # Convert column indexes to column names. - for k, v in compat.iteritems(_dtype): + for k, v in _dtype.items(): col = columns[k] if is_integer(k) else k dtype[col] = v diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 2dedeaf0a4cda..2cfc1bc5eac2e 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -29,8 +29,8 @@ from pandas import ( DataFrame, DatetimeIndex, Index, Int64Index, MultiIndex, PeriodIndex, - Series, SparseDataFrame, SparseSeries, TimedeltaIndex, compat, concat, - isna, to_datetime) + Series, SparseDataFrame, SparseSeries, TimedeltaIndex, concat, isna, + to_datetime) from pandas.core.arrays.categorical import Categorical from pandas.core.arrays.sparse import BlockIndex, IntIndex from pandas.core.base import StringMixin @@ -2448,7 +2448,7 @@ class GenericFixed(Fixed): """ a generified fixed version """ _index_type_map = {DatetimeIndex: 'datetime', PeriodIndex: 'period'} - _reverse_index_map = {v: k for k, v in compat.iteritems(_index_type_map)} + _reverse_index_map = {v: k for k, v in _index_type_map.items()} attributes = [] # indexer helpders @@ -2912,7 +2912,7 @@ def read(self, **kwargs): def write(self, obj, **kwargs): """ write it as a collection of individual sparse series """ super(SparseFrameFixed, self).write(obj, **kwargs) - for name, ss in compat.iteritems(obj): + for name, ss in obj.items(): key = 'sparse_series_{name}'.format(name=name) if key not in self.group._v_children: node = self._handle.create_group(self.group, key) diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index af23c13063aa3..06560f5d702d6 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -9,7 +9,6 @@ from pandas._config import get_option -import pandas.compat as compat from pandas.compat import lrange from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, cache_readonly @@ -1627,7 +1626,7 @@ def _validate_color_args(self): if isinstance(self.color, dict): valid_keys = ['boxes', 'whiskers', 'medians', 'caps'] - for key, values in compat.iteritems(self.color): + for key, values in self.color.items(): if key not in valid_keys: raise ValueError("color dict contains invalid " "key '{0}' " diff --git a/pandas/tests/frame/common.py b/pandas/tests/frame/common.py index 5e9a73719f67b..0485ddb0e6f43 100644 --- a/pandas/tests/frame/common.py +++ b/pandas/tests/frame/common.py @@ -3,7 +3,6 @@ from pandas.util._decorators import cache_readonly import pandas as pd -from pandas import compat import pandas.util.testing as tm _seriesd = tm.getSeriesData() @@ -11,8 +10,7 @@ _frame = pd.DataFrame(_seriesd) _frame2 = pd.DataFrame(_seriesd, columns=['D', 'C', 'B', 'A']) -_intframe = pd.DataFrame({k: v.astype(int) - for k, v in compat.iteritems(_seriesd)}) +_intframe = pd.DataFrame({k: v.astype(int) for k, v in _seriesd.items()}) _tsframe = pd.DataFrame(_tsd) @@ -33,7 +31,7 @@ def frame2(self): @cache_readonly def intframe(self): # force these all to int64 to avoid platform testing issues - return pd.DataFrame({c: s for c, s in compat.iteritems(_intframe)}, + return pd.DataFrame({c: s for c, s in _intframe.items()}, dtype=np.int64) @cache_readonly diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py index fbe03325a3ad9..27c0e070c10c2 100644 --- a/pandas/tests/frame/conftest.py +++ b/pandas/tests/frame/conftest.py @@ -1,7 +1,7 @@ import numpy as np import pytest -from pandas import DataFrame, NaT, compat, date_range +from pandas import DataFrame, NaT, date_range import pandas.util.testing as tm @@ -51,10 +51,9 @@ def int_frame(): Columns are ['A', 'B', 'C', 'D'] """ - df = DataFrame({k: v.astype(int) - for k, v in compat.iteritems(tm.getSeriesData())}) + df = DataFrame({k: v.astype(int) for k, v in tm.getSeriesData().items()}) # force these all to int64 to avoid platform testing issues - return DataFrame({c: s for c, s in compat.iteritems(df)}, dtype=np.int64) + return DataFrame({c: s for c, s in df.items()}, dtype=np.int64) @pytest.fixture @@ -101,8 +100,7 @@ def mixed_int_frame(): Columns are ['A', 'B', 'C', 'D']. """ - df = DataFrame({k: v.astype(int) - for k, v in compat.iteritems(tm.getSeriesData())}) + df = DataFrame({k: v.astype(int) for k, v in tm.getSeriesData().items()}) df.A = df.A.astype('int32') df.B = np.ones(len(df.B), dtype='uint64') df.C = df.C.astype('uint8') diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 600575b5255d2..4d715d19dccc0 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -193,7 +193,7 @@ def test_nonzero(self, float_frame, float_string_frame): def test_iteritems(self): df = self.klass([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b']) - for k, v in compat.iteritems(df): + for k, v in df.items(): assert isinstance(v, self.klass._constructor_sliced) def test_items(self): @@ -343,8 +343,8 @@ def test_to_numpy_copy(self): def test_transpose(self, float_frame): frame = float_frame dft = frame.T - for idx, series in compat.iteritems(dft): - for col, value in compat.iteritems(series): + for idx, series in dft.items(): + for col, value in series.items(): if np.isnan(value): assert np.isnan(frame[col][idx]) else: @@ -355,7 +355,7 @@ def test_transpose(self, float_frame): mixed = self.klass(data, index=index) mixed_T = mixed.T - for col, s in compat.iteritems(mixed_T): + for col, s in mixed_T.items(): assert s.dtype == np.object_ def test_swapaxes(self): @@ -398,12 +398,12 @@ def test_repr_with_mi_nat(self, float_string_frame): assert result == expected def test_iteritems_names(self, float_string_frame): - for k, v in compat.iteritems(float_string_frame): + for k, v in float_string_frame.items(): assert v.name == k def test_series_put_names(self, float_string_frame): series = float_string_frame._series - for k, v in compat.iteritems(series): + for k, v in series.items(): assert v.name == k def test_empty_nonzero(self): @@ -459,7 +459,7 @@ def test_deepcopy(self, float_frame): cp = deepcopy(float_frame) series = cp['A'] series[:] = 10 - for idx, value in compat.iteritems(series): + for idx, value in series.items(): assert float_frame['A'][idx] != value def test_transpose_get_view(self, float_frame): diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index af6d4391dca74..b2f531bfea249 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -12,8 +12,7 @@ from pandas.core.dtypes.dtypes import CategoricalDtype import pandas as pd -from pandas import ( - DataFrame, MultiIndex, Series, Timestamp, compat, date_range, notna) +from pandas import DataFrame, MultiIndex, Series, Timestamp, date_range, notna from pandas.conftest import _get_cython_table_params from pandas.core.apply import frame_apply import pandas.util.testing as tm @@ -334,13 +333,13 @@ def test_apply_differently_indexed(self): result0 = df.apply(Series.describe, axis=0) expected0 = DataFrame({i: v.describe() - for i, v in compat.iteritems(df)}, + for i, v in df.items()}, columns=df.columns) assert_frame_equal(result0, expected0) result1 = df.apply(Series.describe, axis=1) expected1 = DataFrame({i: v.describe() - for i, v in compat.iteritems(df.T)}, + for i, v in df.T.items()}, columns=df.index).T assert_frame_equal(result1, expected1) diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 4df297bcc436e..f06d3d38e0a6d 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -10,8 +10,7 @@ import pandas as pd from pandas import ( - Categorical, DataFrame, Index, MultiIndex, Series, compat, date_range, - isna) + Categorical, DataFrame, Index, MultiIndex, Series, date_range, isna) from pandas.tests.frame.common import TestData import pandas.util.testing as tm from pandas.util.testing import assert_frame_equal @@ -212,7 +211,7 @@ def test_reindex(self): newFrame = self.frame.reindex(self.ts1.index) for col in newFrame.columns: - for idx, val in compat.iteritems(newFrame[col]): + for idx, val in newFrame[col].items(): if idx in self.frame.index: if np.isnan(val): assert np.isnan(self.frame[col][idx]) @@ -221,7 +220,7 @@ def test_reindex(self): else: assert np.isnan(val) - for col, series in compat.iteritems(newFrame): + for col, series in newFrame.items(): assert tm.equalContents(series.index, newFrame.index) emptyFrame = self.frame.reindex(Index([])) assert len(emptyFrame.index) == 0 @@ -230,7 +229,7 @@ def test_reindex(self): nonContigFrame = self.frame.reindex(self.ts1.index[::2]) for col in nonContigFrame.columns: - for idx, val in compat.iteritems(nonContigFrame[col]): + for idx, val in nonContigFrame[col].items(): if idx in self.frame.index: if np.isnan(val): assert np.isnan(self.frame[col][idx]) @@ -239,7 +238,7 @@ def test_reindex(self): else: assert np.isnan(val) - for col, series in compat.iteritems(nonContigFrame): + for col, series in nonContigFrame.items(): assert tm.equalContents(series.index, nonContigFrame.index) # corner cases diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index e8736e514425f..d071e13599e5d 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -17,7 +17,7 @@ import pandas as pd from pandas import ( Categorical, DataFrame, Index, MultiIndex, RangeIndex, Series, Timedelta, - Timestamp, compat, date_range, isna) + Timestamp, date_range, isna) from pandas.tests.frame.common import TestData import pandas.util.testing as tm @@ -462,11 +462,11 @@ def test_constructor_subclass_dict(self): data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)), 'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))} df = DataFrame(data) - refdf = DataFrame({col: dict(compat.iteritems(val)) - for col, val in compat.iteritems(data)}) + refdf = DataFrame({col: dict(val.items()) + for col, val in data.items()}) tm.assert_frame_equal(refdf, df) - data = tm.TestSubDict(compat.iteritems(data)) + data = tm.TestSubDict(data.items()) df = DataFrame(data) tm.assert_frame_equal(refdf, df) @@ -474,7 +474,7 @@ def test_constructor_subclass_dict(self): from collections import defaultdict data = {} self.frame['B'][:10] = np.nan - for k, v in compat.iteritems(self.frame): + for k, v in self.frame.items(): dct = defaultdict(dict) dct.update(v.to_dict()) data[k] = dct @@ -526,7 +526,7 @@ def test_constructor_dict_of_tuples(self): data = {'a': (1, 2, 3), 'b': (4, 5, 6)} result = DataFrame(data) - expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)}) + expected = DataFrame({k: list(v) for k, v in data.items()}) tm.assert_frame_equal(result, expected, check_dtype=False) def test_constructor_dict_multiindex(self): @@ -2099,13 +2099,13 @@ def test_from_records_sequencelike(self): tuples = [] columns = [] dtypes = [] - for dtype, b in compat.iteritems(blocks): + for dtype, b in blocks.items(): columns.extend(b.columns) dtypes.extend([(c, np.dtype(dtype).descr[0][1]) for c in b.columns]) for i in range(len(df.index)): tup = [] - for _, b in compat.iteritems(blocks): + for _, b in blocks.items(): tup.extend(b.iloc[i].values) tuples.append(tuple(tup)) @@ -2172,11 +2172,11 @@ def test_from_records_dictlike(self): # from the dict blocks = df._to_dict_of_blocks() columns = [] - for dtype, b in compat.iteritems(blocks): + for dtype, b in blocks.items(): columns.extend(b.columns) - asdict = {x: y for x, y in compat.iteritems(df)} - asdict2 = {x: y.values for x, y in compat.iteritems(df)} + asdict = {x: y for x, y in df.items()} + asdict2 = {x: y.values for x, y in df.items()} # dict of series & dict of ndarrays (have dtype info) results = [] diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py index decd9ec304b37..9aad010a899d2 100644 --- a/pandas/tests/frame/test_convert_to.py +++ b/pandas/tests/frame/test_convert_to.py @@ -8,8 +8,7 @@ import pytz from pandas import ( - CategoricalDtype, DataFrame, MultiIndex, Series, Timestamp, compat, - date_range) + CategoricalDtype, DataFrame, MultiIndex, Series, Timestamp, date_range) from pandas.tests.frame.common import TestData import pandas.util.testing as tm @@ -374,20 +373,20 @@ def test_to_dict(self, mapping): # GH16122 recons_data = DataFrame(test_data).to_dict(into=mapping) - for k, v in compat.iteritems(test_data): - for k2, v2 in compat.iteritems(v): + for k, v in test_data.items(): + for k2, v2 in v.items(): assert (v2 == recons_data[k][k2]) recons_data = DataFrame(test_data).to_dict("l", mapping) - for k, v in compat.iteritems(test_data): - for k2, v2 in compat.iteritems(v): + for k, v in test_data.items(): + for k2, v2 in v.items(): assert (v2 == recons_data[k][int(k2) - 1]) recons_data = DataFrame(test_data).to_dict("s", mapping) - for k, v in compat.iteritems(test_data): - for k2, v2 in compat.iteritems(v): + for k, v in test_data.items(): + for k2, v2 in v.items(): assert (v2 == recons_data[k][k2]) recons_data = DataFrame(test_data).to_dict("sp", mapping) @@ -407,8 +406,8 @@ def test_to_dict(self, mapping): # GH10844 recons_data = DataFrame(test_data).to_dict("i") - for k, v in compat.iteritems(test_data): - for k2, v2 in compat.iteritems(v): + for k, v in test_data.items(): + for k2, v2 in v.items(): assert (v2 == recons_data[k2][k]) df = DataFrame(test_data) @@ -416,8 +415,8 @@ def test_to_dict(self, mapping): recons_data = df.to_dict("i") comp_data = test_data.copy() comp_data['duped'] = comp_data[df.columns[0]] - for k, v in compat.iteritems(comp_data): - for k2, v2 in compat.iteritems(v): + for k, v in comp_data.items(): + for k2, v2 in v.items(): assert (v2 == recons_data[k2][k]) @pytest.mark.parametrize('mapping', [list, defaultdict, []]) diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index fa8a6ab3c29bd..9a10595a9f7ea 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -11,7 +11,7 @@ import pandas as pd from pandas import ( Categorical, DataFrame, Series, Timedelta, Timestamp, - _np_version_under1p14, compat, concat, date_range, option_context) + _np_version_under1p14, concat, date_range, option_context) from pandas.core.arrays import integer_array from pandas.tests.frame.common import TestData import pandas.util.testing as tm @@ -388,8 +388,7 @@ def test_select_dtypes_typecodes(self): def test_dtypes_gh8722(self): self.mixed_frame['bool'] = self.mixed_frame['A'] > 0 result = self.mixed_frame.dtypes - expected = Series({k: v.dtype - for k, v in compat.iteritems(self.mixed_frame)}, + expected = Series({k: v.dtype for k, v in self.mixed_frame.items()}, index=result.index) assert_series_equal(result, expected) @@ -431,7 +430,7 @@ def test_astype(self): # mixed casting def _check_cast(df, v): assert (list({s.dtype.name for - _, s in compat.iteritems(df)})[0] == v) + _, s in df.items()})[0] == v) mn = self.all_mixed._get_numeric_data().copy() mn['little_float'] = np.array(12345., dtype='float16') diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 9149b305f5d0d..f58fe85cad258 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -15,7 +15,7 @@ import pandas as pd from pandas import ( Categorical, DataFrame, DatetimeIndex, Index, MultiIndex, Series, - Timestamp, compat, date_range, isna, notna) + Timestamp, date_range, isna, notna) import pandas.core.common as com from pandas.core.indexing import IndexingError from pandas.tests.frame.common import TestData @@ -34,11 +34,11 @@ def test_getitem(self): assert len(sl.index) == 20 # Column access - for _, series in compat.iteritems(sl): + for _, series in sl.items(): assert len(series.index) == 20 assert tm.equalContents(series.index, sl.index) - for key, _ in compat.iteritems(self.frame._series): + for key, _ in self.frame._series.items(): assert self.frame[key] is not None assert 'random' not in self.frame @@ -2438,7 +2438,7 @@ def test_at_time_between_time_datetimeindex(self): def test_xs(self): idx = self.frame.index[5] xs = self.frame.xs(idx) - for item, value in compat.iteritems(xs): + for item, value in xs.items(): if np.isnan(value): assert np.isnan(self.frame[item][idx]) else: @@ -2595,7 +2595,7 @@ def is_ok(s): s.dtype != 'uint8') return DataFrame(dict((c, s + 1) if is_ok(s) else (c, s) - for c, s in compat.iteritems(df))) + for c, s in df.items())) def _check_get(df, cond, check_dtypes=True): other1 = _safe_add(df) @@ -2713,7 +2713,7 @@ def _check_set(df, cond, check_dtypes=True): # dtypes (and confirm upcasts)x if check_dtypes: - for k, v in compat.iteritems(df.dtypes): + for k, v in df.dtypes.items(): if issubclass(v.type, np.integer) and not cond[k].all(): v = np.dtype('float64') assert dfi[k].dtype == v diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index fc991cd17cae8..edce25566e361 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -7,7 +7,7 @@ import pytest import pandas as pd -from pandas import DataFrame, MultiIndex, Series, compat +from pandas import DataFrame, MultiIndex, Series import pandas.core.common as com from pandas.tests.frame.common import _check_mixed_float import pandas.util.testing as tm @@ -383,7 +383,7 @@ def test_combineSeries(self, float_frame, mixed_float_frame, added = float_frame + series - for key, s in compat.iteritems(added): + for key, s in added.items(): assert_series_equal(s, float_frame[key] + series[key]) larger_series = series.to_dict() @@ -391,7 +391,7 @@ def test_combineSeries(self, float_frame, mixed_float_frame, larger_series = Series(larger_series) larger_added = float_frame + larger_series - for key, s in compat.iteritems(float_frame): + for key, s in float_frame.items(): assert_series_equal(larger_added[key], s + series[key]) assert 'E' in larger_added assert np.isnan(larger_added['E']).all() @@ -424,7 +424,7 @@ def test_combineSeries(self, float_frame, mixed_float_frame, # and require explicit broadcasting added = datetime_frame.add(ts, axis='index') - for key, col in compat.iteritems(datetime_frame): + for key, col in datetime_frame.items(): result = col + ts assert_series_equal(added[key], result, check_names=False) assert added[key].name == key @@ -465,7 +465,7 @@ def test_combineFunc(self, float_frame, mixed_float_frame): # vs mix result = mixed_float_frame * 2 - for c, s in compat.iteritems(result): + for c, s in result.items(): tm.assert_numpy_array_equal( s.values, mixed_float_frame[c].values * 2) _check_mixed_float(result, dtype=dict(C=None)) diff --git a/pandas/tests/frame/test_replace.py b/pandas/tests/frame/test_replace.py index f44739e83267f..20479f9a4fcbf 100644 --- a/pandas/tests/frame/test_replace.py +++ b/pandas/tests/frame/test_replace.py @@ -10,7 +10,7 @@ from pandas.compat import lrange import pandas as pd -from pandas import DataFrame, Index, Series, Timestamp, compat, date_range +from pandas import DataFrame, Index, Series, Timestamp, date_range from pandas.tests.frame.common import TestData from pandas.util.testing import assert_frame_equal, assert_series_equal @@ -809,8 +809,7 @@ def test_replace_input_formats_listlike(self): df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5], 'C': ['', 'asdf', 'fd']}) filled = df.replace(to_rep, values) - expected = {k: v.replace(to_rep[k], values[k]) - for k, v in compat.iteritems(df)} + expected = {k: v.replace(to_rep[k], values[k]) for k, v in df.items()} assert_frame_equal(filled, DataFrame(expected)) result = df.replace([0, 2, 5], [5, 2, 0]) @@ -823,8 +822,7 @@ def test_replace_input_formats_listlike(self): df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5], 'C': ['', 'asdf', 'fd']}) filled = df.replace(np.nan, values) - expected = {k: v.replace(np.nan, values[k]) - for k, v in compat.iteritems(df)} + expected = {k: v.replace(np.nan, values[k]) for k, v in df.items()} assert_frame_equal(filled, DataFrame(expected)) # list to list @@ -847,8 +845,7 @@ def test_replace_input_formats_scalar(self): # dict to scalar to_rep = {'A': np.nan, 'B': 0, 'C': ''} filled = df.replace(to_rep, 0) - expected = {k: v.replace(to_rep[k], 0) - for k, v in compat.iteritems(df)} + expected = {k: v.replace(to_rep[k], 0) for k, v in df.items()} assert_frame_equal(filled, DataFrame(expected)) msg = "value argument must be scalar, dict, or Series" diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 0d1575a35a0bc..187fea5403aea 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -10,7 +10,7 @@ import pandas as pd from pandas import ( - DataFrame, Index, MultiIndex, Series, Timestamp, compat, date_range, isna) + DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna) import pandas.core.nanops as nanops from pandas.util import testing as tm @@ -392,7 +392,7 @@ def test_groupby_non_arithmetic_agg_int_like_precision(i): "args": [1]}, "count": {"expected": 2}} - for method, data in compat.iteritems(grp_exp): + for method, data in grp_exp.items(): if "args" not in data: data["args"] = [] diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 885def32db046..31b602e38c4ad 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -12,7 +12,7 @@ import pandas as pd from pandas import ( - DataFrame, Index, MultiIndex, Panel, Series, Timestamp, compat, date_range, + DataFrame, Index, MultiIndex, Panel, Series, Timestamp, date_range, read_csv) import pandas.core.common as com import pandas.util.testing as tm @@ -403,7 +403,7 @@ def test_frame_groupby(tsframe): groups = grouped.groups indices = grouped.indices - for k, v in compat.iteritems(groups): + for k, v in groups.items(): samething = tsframe.index.take(indices[k]) assert (samething == v).all() @@ -524,7 +524,7 @@ def test_groupby_multiple_columns(df, op): for n2, gp2 in gp1.groupby('B'): expected[n1][n2] = op(gp2.loc[:, ['C', 'D']]) expected = {k: DataFrame(v) - for k, v in compat.iteritems(expected)} + for k, v in expected.items()} expected = Panel.fromDict(expected).swapaxes(0, 1) expected.major_axis.name, expected.minor_axis.name = 'A', 'B' @@ -1275,7 +1275,7 @@ def _check_groupby(df, result, keys, field, f=lambda x: x.sum()): tups = lmap(tuple, df[keys].values) tups = com.asarray_tuplesafe(tups) expected = f(df.groupby(tups)[field]) - for k, v in compat.iteritems(expected): + for k, v in expected.items(): assert (result[k] == v) _check_groupby(df, result, ['a', 'b'], 'd') diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 8382111ec9901..867cb8365476e 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -9,7 +9,7 @@ import pandas as pd from pandas import ( - CategoricalIndex, DataFrame, Index, MultiIndex, Series, Timestamp, compat, + CategoricalIndex, DataFrame, Index, MultiIndex, Series, Timestamp, date_range) from pandas.core.groupby.grouper import Grouping import pandas.util.testing as tm @@ -671,14 +671,14 @@ def test_groups(self, df): groups = grouped.groups assert groups is grouped.groups # caching works - for k, v in compat.iteritems(grouped.groups): + for k, v in grouped.groups.items(): assert (df.loc[v]['A'] == k).all() grouped = df.groupby(['A', 'B']) groups = grouped.groups assert groups is grouped.groups # caching works - for k, v in compat.iteritems(grouped.groups): + for k, v in grouped.groups.items(): assert (df.loc[v]['A'] == k[0]).all() assert (df.loc[v]['B'] == k[1]).all() diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index be266798973d1..3f0656615545c 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -4,7 +4,6 @@ import pytest from pandas._libs.tslib import iNaT -import pandas.compat as compat from pandas.core.dtypes.dtypes import CategoricalDtype @@ -235,7 +234,7 @@ def test_copy_name(self): # gh-12309: Check that the "name" argument # passed at initialization is honored. - for name, index in compat.iteritems(self.indices): + for name, index in self.indices.items(): if isinstance(index, MultiIndex): continue @@ -262,7 +261,7 @@ def test_copy_name(self): def test_ensure_copied_data(self): # Check the "copy" argument of each Index.__new__ is honoured # GH12309 - for name, index in compat.iteritems(self.indices): + for name, index in self.indices.items(): init_kwargs = {} if isinstance(index, PeriodIndex): # Needs "freq" specification: @@ -298,7 +297,7 @@ def test_ensure_copied_data(self): check_same='same') def test_memory_usage(self): - for name, index in compat.iteritems(self.indices): + for name, index in self.indices.items(): result = index.memory_usage() if len(index): index.get_loc(index[0]) @@ -428,7 +427,7 @@ def test_where(self, klass): @pytest.mark.parametrize("method", ["intersection", "union", "difference", "symmetric_difference"]) def test_set_ops_error_cases(self, case, method): - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): # non-iterable input msg = "Input must be Index or array-like" @@ -436,7 +435,7 @@ def test_set_ops_error_cases(self, case, method): getattr(idx, method)(case) def test_intersection_base(self): - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): first = idx[:5] second = idx[:3] intersect = first.intersection(second) @@ -466,7 +465,7 @@ def test_intersection_base(self): first.intersection([1, 2, 3]) def test_union_base(self): - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): first = idx[3:] second = idx[:5] everything = idx @@ -494,7 +493,7 @@ def test_union_base(self): @pytest.mark.parametrize("sort", [None, False]) def test_difference_base(self, sort): - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): first = idx[2:] second = idx[:4] answer = idx[4:] @@ -529,7 +528,7 @@ def test_difference_base(self, sort): first.difference([1, 2, 3], sort) def test_symmetric_difference(self): - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): first = idx[1:] second = idx[:-1] if isinstance(idx, CategoricalIndex): @@ -560,7 +559,7 @@ def test_symmetric_difference(self): def test_insert_base(self): - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): result = idx[1:4] if not len(idx): @@ -571,7 +570,7 @@ def test_insert_base(self): def test_delete_base(self): - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): if not len(idx): continue @@ -596,7 +595,7 @@ def test_delete_base(self): def test_equals(self): - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): assert idx.equals(idx) assert idx.equals(idx.copy()) assert idx.equals(idx.astype(object)) @@ -682,7 +681,7 @@ def test_numpy_ufuncs(self): # test ufuncs of numpy, see: # http://docs.scipy.org/doc/numpy/reference/ufuncs.html - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): for func in [np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10, np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin, np.arccos, np.arctan, np.sinh, np.cosh, np.tanh, diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index c3b00133228d8..8bdf4d84427ba 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -4,8 +4,6 @@ import pytest import pytz -import pandas.compat as compat - import pandas as pd from pandas import DatetimeIndex, Index, Timestamp, date_range, notna import pandas.util.testing as tm @@ -413,7 +411,7 @@ def test_delete(self): -1: expected_4, 4: expected_4, 1: expected_1} - for n, expected in compat.iteritems(cases): + for n, expected in cases.items(): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name @@ -460,7 +458,7 @@ def test_delete_slice(self): cases = {(0, 1, 2): expected_0_2, (7, 8, 9): expected_7_9, (3, 4, 5): expected_3_5} - for n, expected in compat.iteritems(cases): + for n, expected in cases.items(): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index fa08315e13600..ed1028b45f5db 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -22,8 +22,8 @@ import pandas as pd from pandas import ( - DataFrame, DatetimeIndex, Index, NaT, Series, Timestamp, compat, - date_range, isna, to_datetime) + DataFrame, DatetimeIndex, Index, NaT, Series, Timestamp, date_range, isna, + to_datetime) from pandas.core.arrays import DatetimeArray from pandas.core.tools import datetimes as tools from pandas.util import testing as tm @@ -1701,7 +1701,7 @@ def test_parsers_dayfirst_yearfirst(self, cache): (True, True, datetime(2020, 12, 21))]} - for date_str, values in compat.iteritems(cases): + for date_str, values in cases.items(): for dayfirst, yearfirst, expected in values: # odd comparisons across version @@ -1739,7 +1739,7 @@ def test_parsers_timestring(self, cache): cases = {'10:15': (parse('10:15'), datetime(1, 1, 1, 10, 15)), '9:05': (parse('9:05'), datetime(1, 1, 1, 9, 5))} - for date_str, (exp_now, exp_def) in compat.iteritems(cases): + for date_str, (exp_now, exp_def) in cases.items(): result1, _, _ = parsing.parse_time_string(date_str) result2 = to_datetime(date_str) result3 = to_datetime([date_str]) diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py index a6264e4dad4f0..7233f53572625 100644 --- a/pandas/tests/indexes/timedeltas/test_indexing.py +++ b/pandas/tests/indexes/timedeltas/test_indexing.py @@ -4,7 +4,7 @@ import pytest import pandas as pd -from pandas import Index, Timedelta, TimedeltaIndex, compat, timedelta_range +from pandas import Index, Timedelta, TimedeltaIndex, timedelta_range import pandas.util.testing as tm @@ -240,7 +240,7 @@ def test_delete(self): -1: expected_4, 4: expected_4, 1: expected_1} - for n, expected in compat.iteritems(cases): + for n, expected in cases.items(): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name @@ -267,7 +267,7 @@ def test_delete_slice(self): cases = {(0, 1, 2): expected_0_2, (7, 8, 9): expected_7_9, (3, 4, 5): expected_3_5} - for n, expected in compat.iteritems(cases): + for n, expected in cases.items(): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name diff --git a/pandas/tests/io/json/__pycache__/tmp2c7r4efu b/pandas/tests/io/json/__pycache__/tmp2c7r4efu new file mode 100644 index 0000000000000..d1258abbcdf40 Binary files /dev/null and b/pandas/tests/io/json/__pycache__/tmp2c7r4efu differ diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 9ca5bf95e5e08..42c3b7bf556da 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -13,8 +13,7 @@ import pandas.util._test_decorators as td import pandas as pd -from pandas import ( - DataFrame, DatetimeIndex, Series, Timestamp, compat, read_json) +from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json import pandas.util.testing as tm from pandas.util.testing import ( assert_almost_equal, assert_frame_equal, assert_index_equal, @@ -26,7 +25,7 @@ _frame = DataFrame(_seriesd) _frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A']) _intframe = DataFrame({k: v.astype(np.int64) - for k, v in compat.iteritems(_seriesd)}) + for k, v in _seriesd.items()}) _tsframe = DataFrame(_tsd) _cat_frame = _frame.copy() diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index 8bf315b73366e..916d9ce63f4ee 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -40,7 +40,7 @@ def _clean_dict(d): cleaned_dict : dict """ - return {str(k): v for k, v in compat.iteritems(d)} + return {str(k): v for k, v in d.items()} @pytest.fixture(params=[ diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index c5bac4724d70b..7f827808b6aae 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -13,7 +13,6 @@ import pandas._libs.parsers as parser from pandas._libs.parsers import TextReader -import pandas.compat as compat from pandas import DataFrame import pandas.util.testing as tm @@ -347,6 +346,6 @@ def test_empty_csv_input(self): def assert_array_dicts_equal(left, right): - for k, v in compat.iteritems(left): + for k, v in left.items(): assert tm.assert_numpy_array_equal(np.asarray(v), np.asarray(right[k])) diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 1377888a58d07..b06da91f72e28 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -11,7 +11,7 @@ from numpy import nan import pytest -from pandas.compat import PY36, iteritems +from pandas.compat import PY36 import pandas.util._test_decorators as td import pandas as pd @@ -798,7 +798,7 @@ def tdf(col_sheet_name): with ensure_clean(ext) as pth: with ExcelWriter(pth) as ew: - for sheetname, df in iteritems(dfs): + for sheetname, df in dfs.items(): df.to_excel(ew, sheetname) dfs_returned = read_excel(pth, sheet_name=sheets, index_col=0) diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 52fead0166dc5..407095e379a04 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -13,7 +13,6 @@ import numpy as np import pytest -import pandas.compat as compat from pandas.compat import iterkeys from pandas.core.dtypes.common import is_categorical_dtype @@ -685,7 +684,7 @@ def test_variable_labels(self): sr_117 = rdr.variable_labels() keys = ('var1', 'var2', 'var3') labels = ('label1', 'label2', 'label3') - for k, v in compat.iteritems(sr_115): + for k, v in sr_115.items(): assert k in sr_117 assert v == sr_117[k] assert k in keys diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index 1eef226749383..30736b11817c0 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -7,7 +7,6 @@ import numpy as np from numpy import random -from pandas.compat import iteritems from pandas.util._decorators import cache_readonly import pandas.util._test_decorators as td @@ -416,7 +415,7 @@ def _check_box_return_type(self, returned, return_type, expected_keys=None, assert isinstance(returned, Series) assert sorted(returned.keys()) == sorted(expected_keys) - for key, value in iteritems(returned): + for key, value in returned.items(): assert isinstance(value, types[return_type]) # check returned dict has correct mapping if return_type == 'axes': diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index 63ee899944e92..cce1b6f13c942 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -5,7 +5,6 @@ import pytest from pandas._libs import join as libjoin -import pandas.compat as compat from pandas.compat import lrange import pandas as pd @@ -783,6 +782,6 @@ def _join_by_hand(a, b, how='left'): result_columns = a.columns.append(b.columns) - for col, s in compat.iteritems(b_re): + for col, s in b_re.items(): a_re[col] = s return a_re.reindex(columns=result_columns) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 5080db9354a1f..c065900975869 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -11,8 +11,6 @@ from numpy.random import randn import pytest -from pandas.compat import iteritems - from pandas.core.dtypes.dtypes import CategoricalDtype import pandas as pd @@ -102,13 +100,13 @@ def _check_expected_dtype(self, obj, label): def test_dtypes(self): # to confirm test case covers intended dtypes - for typ, vals in iteritems(self.data): + for typ, vals in self.data.items(): self._check_expected_dtype(pd.Index(vals), typ) self._check_expected_dtype(pd.Series(vals), typ) def test_concatlike_same_dtypes(self): # GH 13660 - for typ1, vals1 in iteritems(self.data): + for typ1, vals1 in self.data.items(): vals2 = vals1 vals3 = vals1 @@ -214,8 +212,8 @@ def test_concatlike_same_dtypes(self): def test_concatlike_dtypes_coercion(self): # GH 13660 - for typ1, vals1 in iteritems(self.data): - for typ2, vals2 in iteritems(self.data): + for typ1, vals1 in self.data.items(): + for typ2, vals2 in self.data.items(): vals3 = vals2 diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index a7c2768c5b319..ffc8de59e4d63 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -10,7 +10,6 @@ from pandas._libs.tslibs.parsing import DateParseError from pandas._libs.tslibs.period import IncompatibleFrequency from pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz -from pandas.compat import iteritems from pandas.compat.numpy import np_datetime64_compat import pandas as pd @@ -708,7 +707,7 @@ def test_period_deprecated_freq(self): "N": ["NANOSECOND", "NANOSECONDLY", "nanosecond"]} msg = INVALID_FREQ_ERR_MSG - for exp, freqs in iteritems(cases): + for exp, freqs in cases.items(): for freq in freqs: with pytest.raises(ValueError, match=msg): Period('2016-03-01 09:00', freq=freq) diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/indexing/test_alter_index.py index edc21282ccafa..579ef7955ddb1 100644 --- a/pandas/tests/series/indexing/test_alter_index.py +++ b/pandas/tests/series/indexing/test_alter_index.py @@ -7,7 +7,6 @@ from numpy import nan import pytest -import pandas.compat as compat from pandas.compat import lrange import pandas as pd @@ -171,13 +170,13 @@ def test_reindex(test_data): subIndex = test_data.series.index[10:20] subSeries = test_data.series.reindex(subIndex) - for idx, val in compat.iteritems(subSeries): + for idx, val in subSeries.items(): assert val == test_data.series[idx] subIndex2 = test_data.ts.index[10:20] subTS = test_data.ts.reindex(subIndex2) - for idx, val in compat.iteritems(subTS): + for idx, val in subTS.items(): assert val == test_data.ts[idx] stuffSeries = test_data.ts.reindex(subIndex) @@ -186,7 +185,7 @@ def test_reindex(test_data): # This is extremely important for the Cython code to not screw up nonContigIndex = test_data.ts.index[::2] subNonContig = test_data.ts.reindex(nonContigIndex) - for idx, val in compat.iteritems(subNonContig): + for idx, val in subNonContig.items(): assert val == test_data.ts[idx] # return a copy the same index here diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 81642f65f05a4..279d09ea335ff 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -7,7 +7,6 @@ import numpy as np import pytest -import pandas.compat as compat from pandas.compat import lzip import pandas as pd @@ -145,7 +144,7 @@ def test_constructor_dict(self): def test_constructor_subclass_dict(self): data = tm.TestSubDict((x, 10.0 * x) for x in range(10)) series = self.series_klass(data) - expected = self.series_klass(dict(compat.iteritems(data))) + expected = self.series_klass(dict(data.items())) self._assert_series_equal(series, expected) def test_constructor_ordereddict(self): @@ -315,10 +314,10 @@ def test_values(self): tm.assert_almost_equal(self.ts.values, self.ts, check_dtype=False) def test_iteritems(self): - for idx, val in compat.iteritems(self.series): + for idx, val in self.series.items(): assert val == self.series[idx] - for idx, val in compat.iteritems(self.ts): + for idx, val in self.ts.items(): assert val == self.ts[idx] # assert is lazy (genrators don't define reverse, lists do) diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index 162a27db34cb1..27dee79603af5 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -7,7 +7,6 @@ import numpy as np import pytest -import pandas.compat as compat from pandas.compat import lrange import pandas as pd @@ -432,13 +431,13 @@ def test_map(self, datetime_series): merged = target.map(source) - for k, v in compat.iteritems(merged): + for k, v in merged.items(): assert v == source[target[k]] # input could be a dict merged = target.map(source.to_dict()) - for k, v in compat.iteritems(merged): + for k, v in merged.items(): assert v == source[target[k]] # function diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py index 45e3dffde60f7..9ef771ac6e5d6 100644 --- a/pandas/tests/series/test_combine_concat.py +++ b/pandas/tests/series/test_combine_concat.py @@ -8,7 +8,7 @@ import pytest import pandas as pd -from pandas import DataFrame, DatetimeIndex, Series, compat, date_range +from pandas import DataFrame, DatetimeIndex, Series, date_range import pandas.util.testing as tm from pandas.util.testing import assert_frame_equal, assert_series_equal @@ -17,7 +17,7 @@ class TestSeriesCombine(object): def test_append(self, datetime_series, string_series, object_series): appendedSeries = string_series.append(object_series) - for idx, value in compat.iteritems(appendedSeries): + for idx, value in appendedSeries.items(): if idx in string_series.index: assert value == string_series[idx] elif idx in object_series.index: diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py index d15320cee644f..b687720f8d0a2 100644 --- a/pandas/tests/series/test_rank.py +++ b/pandas/tests/series/test_rank.py @@ -7,7 +7,6 @@ from pandas._libs.algos import Infinity, NegInfinity from pandas._libs.tslib import iNaT -import pandas.compat as compat import pandas.util._test_decorators as td from pandas import NaT, Series, Timestamp, date_range @@ -376,7 +375,7 @@ def test_rank_descending(self): def test_rank_int(self): s = self.s.dropna().astype('i8') - for method, res in compat.iteritems(self.results): + for method, res in self.results.items(): result = s.rank(method=method) expected = Series(res).dropna() expected.index = result.index diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index 45df08ccfeb48..31f5f6cb2d7be 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -78,7 +78,7 @@ def test_copy(self, float_frame): def test_constructor(self, float_frame, float_frame_int_kind, float_frame_fill0): - for col, series in compat.iteritems(float_frame): + for col, series in float_frame.items(): assert isinstance(series, SparseSeries) assert isinstance(float_frame_int_kind['A'].sp_index, IntIndex) @@ -96,11 +96,11 @@ def test_constructor(self, float_frame, float_frame_int_kind, # construct no data sdf = SparseDataFrame(columns=np.arange(10), index=np.arange(10)) - for col, series in compat.iteritems(sdf): + for col, series in sdf.items(): assert isinstance(series, SparseSeries) # construct from nested dict - data = {c: s.to_dict() for c, s in compat.iteritems(float_frame)} + data = {c: s.to_dict() for c, s in float_frame.items()} sdf = SparseDataFrame(data) tm.assert_sp_frame_equal(sdf, float_frame) diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py index 703ae3bde71d1..7d702b8cd2b5a 100644 --- a/pandas/tests/sparse/series/test_series.py +++ b/pandas/tests/sparse/series/test_series.py @@ -14,7 +14,7 @@ import pandas as pd from pandas import ( - DataFrame, Series, SparseDtype, SparseSeries, bdate_range, compat, isna) + DataFrame, Series, SparseDtype, SparseSeries, bdate_range, isna) from pandas.core.reshape.util import cartesian_product import pandas.core.sparse.frame as spf from pandas.tests.series.test_api import SharedWithSparse @@ -431,7 +431,7 @@ def _check_all(self, check_func): def test_getitem(self): def _check_getitem(sp, dense): - for idx, val in compat.iteritems(dense): + for idx, val in dense.items(): tm.assert_almost_equal(val, sp[idx]) for i in range(len(dense)): @@ -850,7 +850,7 @@ def _check_matches(indices, expected): # homogenized is only valid with NaN fill values homogenized = spf.homogenize(data) - for k, v in compat.iteritems(homogenized): + for k, v in homogenized.items(): assert (v.sp_index.equals(expected)) indices1 = [BlockIndex(10, [2], [7]), BlockIndex(10, [1, 6], [3, 4]), diff --git a/pandas/tests/test_compat.py b/pandas/tests/test_compat.py index 0fa9fcd8aae9c..148b3fba45375 100644 --- a/pandas/tests/test_compat.py +++ b/pandas/tests/test_compat.py @@ -6,7 +6,7 @@ import re from pandas.compat import ( - iteritems, iterkeys, itervalues, lfilter, lmap, lrange, lzip, re_type) + iterkeys, itervalues, lfilter, lmap, lrange, lzip, re_type) class TestBuiltinIterators(object): @@ -54,7 +54,6 @@ def test_lzip(self): def test_dict_iterators(self): assert next(itervalues({1: 2})) == 2 assert next(iterkeys({1: 2})) == 1 - assert next(iteritems({1: 2})) == (1, 2) def test_re_type(): diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index 04a50cf6facd5..b5e7a5f6abf4c 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -6,8 +6,7 @@ from numpy import nan import pytest -from pandas import ( - DataFrame, MultiIndex, Series, compat, concat, merge, to_datetime) +from pandas import DataFrame, MultiIndex, Series, concat, merge, to_datetime from pandas.core import common as com from pandas.core.sorting import ( decons_group_index, get_group_index, is_int64_overflow_possible, @@ -51,7 +50,7 @@ def test_int64_overflow(self): expected = df.groupby(tups).sum()['values'] - for k, v in compat.iteritems(expected): + for k, v in expected.items(): assert left[k] == right[k[::-1]] assert left[k] == v assert len(left) == len(right) diff --git a/pandas/tests/tseries/frequencies/test_freq_code.py b/pandas/tests/tseries/frequencies/test_freq_code.py index 0aa29e451b1ba..7de1e8117289e 100644 --- a/pandas/tests/tseries/frequencies/test_freq_code.py +++ b/pandas/tests/tseries/frequencies/test_freq_code.py @@ -3,12 +3,11 @@ from pandas._libs.tslibs import frequencies as libfrequencies, resolution from pandas._libs.tslibs.frequencies import ( FreqGroup, _period_code_map, get_freq, get_freq_code) -import pandas.compat as compat import pandas.tseries.offsets as offsets -@pytest.fixture(params=list(compat.iteritems(_period_code_map))) +@pytest.fixture(params=list(_period_code_map.items())) def period_code_item(request): return request.param diff --git a/pandas/tests/tseries/frequencies/test_inference.py b/pandas/tests/tseries/frequencies/test_inference.py index c2ef939d1915e..fb65ec1eb9961 100644 --- a/pandas/tests/tseries/frequencies/test_inference.py +++ b/pandas/tests/tseries/frequencies/test_inference.py @@ -5,7 +5,6 @@ from pandas._libs.tslibs.ccalendar import DAYS, MONTHS from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG -import pandas.compat as compat from pandas.compat import is_platform_windows from pandas import ( @@ -218,14 +217,14 @@ def test_infer_freq_index(freq, expected): @pytest.mark.parametrize( "expected,dates", - list(compat.iteritems( + list( {"AS-JAN": ["2009-01-01", "2010-01-01", "2011-01-01", "2012-01-01"], "Q-OCT": ["2009-01-31", "2009-04-30", "2009-07-31", "2009-10-31"], "M": ["2010-11-30", "2010-12-31", "2011-01-31", "2011-02-28"], "W-SAT": ["2010-12-25", "2011-01-01", "2011-01-08", "2011-01-15"], "D": ["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], "H": ["2011-12-31 22:00", "2011-12-31 23:00", - "2012-01-01 00:00", "2012-01-01 01:00"]})) + "2012-01-01 00:00", "2012-01-01 01:00"]}.items()) ) def test_infer_freq_tz(tz_naive_fixture, expected, dates): # see gh-7310 diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 0c58e515979c2..ea13be8601463 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -709,7 +709,7 @@ def test_onOffset(self): @pytest.mark.parametrize('case', apply_cases) def test_apply(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) def test_apply_large_n(self): @@ -924,7 +924,7 @@ def test_roll_date_object(self): @pytest.mark.parametrize('case', normalize_cases) def test_normalize(self, case): offset, cases = case - for dt, expected in compat.iteritems(cases): + for dt, expected in cases.items(): assert offset.apply(dt) == expected on_offset_cases = [] @@ -964,7 +964,7 @@ def test_normalize(self, case): @pytest.mark.parametrize('case', on_offset_cases) def test_onOffset(self, case): offset, cases = case - for dt, expected in compat.iteritems(cases): + for dt, expected in cases.items(): assert offset.onOffset(dt) == expected opening_time_cases = [] @@ -1130,7 +1130,7 @@ def test_onOffset(self, case): def test_opening_time(self, case): _offsets, cases = case for offset in _offsets: - for dt, (exp_next, exp_prev) in compat.iteritems(cases): + for dt, (exp_next, exp_prev) in cases.items(): assert offset._next_opening_time(dt) == exp_next assert offset._prev_opening_time(dt) == exp_prev @@ -1290,7 +1290,7 @@ def test_opening_time(self, case): @pytest.mark.parametrize('case', apply_cases) def test_apply(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) apply_large_n_cases = [] @@ -1346,7 +1346,7 @@ def test_apply(self, case): @pytest.mark.parametrize('case', apply_large_n_cases) def test_apply_large_n(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) def test_apply_nanoseconds(self): @@ -1369,7 +1369,7 @@ def test_apply_nanoseconds(self): '2014-07-03 17:00') - Nano(5), })) for offset, cases in tests: - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) def test_datetimeindex(self): @@ -1561,7 +1561,7 @@ def test_roll_date_object(self): @pytest.mark.parametrize('norm_cases', normalize_cases) def test_normalize(self, norm_cases): offset, cases = norm_cases - for dt, expected in compat.iteritems(cases): + for dt, expected in cases.items(): assert offset.apply(dt) == expected def test_onOffset(self): @@ -1577,7 +1577,7 @@ def test_onOffset(self): datetime(2014, 7, 6, 12): False})) for offset, cases in tests: - for dt, expected in compat.iteritems(cases): + for dt, expected in cases.items(): assert offset.onOffset(dt) == expected apply_cases = [] @@ -1622,7 +1622,7 @@ def test_onOffset(self): @pytest.mark.parametrize('apply_case', apply_cases) def test_apply(self, apply_case): offset, cases = apply_case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) nano_cases = [] @@ -1647,7 +1647,7 @@ def test_apply(self, apply_case): @pytest.mark.parametrize('nano_case', nano_cases) def test_apply_nanoseconds(self, nano_case): offset, cases = nano_case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) @@ -1778,7 +1778,7 @@ def test_onOffset(self, case): @pytest.mark.parametrize('case', apply_cases) def test_apply(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) def test_apply_large_n(self): @@ -1977,7 +1977,7 @@ def test_onOffset(self, case): @pytest.mark.parametrize('case', apply_cases) def test_apply(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) def test_apply_large_n(self): @@ -2094,7 +2094,7 @@ def test_onOffset(self, case): @pytest.mark.parametrize('case', apply_cases) def test_apply(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) def test_apply_large_n(self): @@ -2193,7 +2193,7 @@ def test_isAnchored(self): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) @pytest.mark.parametrize('weekday', range(7)) @@ -2518,7 +2518,7 @@ def test_offset_whole_year(self): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) @pytest.mark.parametrize('case', offset_cases) @@ -2709,7 +2709,7 @@ def test_offset_whole_year(self): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) @pytest.mark.parametrize('case', offset_cases) @@ -2831,7 +2831,7 @@ def setup_method(self, method): _offset_map.clear() def test_alias_equality(self): - for k, v in compat.iteritems(_offset_map): + for k, v in _offset_map.items(): if v is None: continue assert k == v.copy() diff --git a/pandas/tests/tseries/offsets/test_yqm_offsets.py b/pandas/tests/tseries/offsets/test_yqm_offsets.py index 9ee03d2e886f3..6d121eb9eb8df 100644 --- a/pandas/tests/tseries/offsets/test_yqm_offsets.py +++ b/pandas/tests/tseries/offsets/test_yqm_offsets.py @@ -7,7 +7,7 @@ import pytest import pandas as pd -from pandas import Timestamp, compat +from pandas import Timestamp from pandas.tseries.offsets import ( BMonthBegin, BMonthEnd, BQuarterBegin, BQuarterEnd, BYearBegin, BYearEnd, @@ -105,7 +105,7 @@ class TestMonthBegin(Base): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) @@ -164,7 +164,7 @@ def test_normalize(self): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) on_offset_cases = [(MonthEnd(), datetime(2007, 12, 31), True), @@ -224,7 +224,7 @@ def test_offsets_compare_equal(self): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) on_offset_cases = [(BMonthBegin(), datetime(2007, 12, 31), False), @@ -289,7 +289,7 @@ def test_offsets_compare_equal(self): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) on_offset_cases = [(BMonthEnd(), datetime(2007, 12, 31), True), @@ -379,7 +379,7 @@ def test_offset_corner_case(self): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) @@ -458,7 +458,7 @@ def test_offset_corner_case(self): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) on_offset_cases = [ @@ -590,7 +590,7 @@ def test_offset_corner_case(self): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) @@ -668,7 +668,7 @@ def test_offset_corner_case(self): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) on_offset_cases = [ @@ -787,7 +787,7 @@ def test_misspecified(self): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) on_offset_cases = [(YearBegin(), datetime(2007, 1, 3), False), @@ -838,7 +838,7 @@ def test_misspecified(self): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) on_offset_cases = [(YearEnd(), datetime(2007, 12, 31), True), @@ -884,7 +884,7 @@ class TestYearEndDiffMonth(Base): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) on_offset_cases = [(YearEnd(month=3), datetime(2007, 3, 31), True), @@ -943,7 +943,7 @@ def test_misspecified(self): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) @@ -980,7 +980,7 @@ class TestBYearEnd(Base): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) on_offset_cases = [(BYearEnd(), datetime(2007, 12, 31), True), @@ -1016,7 +1016,7 @@ def test_bad_month_fail(self): @pytest.mark.parametrize('case', offset_cases) def test_offset(self, case): offset, cases = case - for base, expected in compat.iteritems(cases): + for base, expected in cases.items(): assert_offset_equal(offset, base, expected) def test_roll(self): diff --git a/pandas/util/_doctools.py b/pandas/util/_doctools.py index 4aee0a2e5350e..d6d4792c19ea8 100644 --- a/pandas/util/_doctools.py +++ b/pandas/util/_doctools.py @@ -1,7 +1,5 @@ import numpy as np -import pandas.compat as compat - import pandas as pd @@ -152,7 +150,7 @@ def _make_table(self, ax, df, title, height=None): height = 1.0 / (len(df) + 1) props = tb.properties() - for (r, c), cell in compat.iteritems(props['celld']): + for (r, c), cell in props['celld'].items(): if c == -1: cell.set_visible(False) elif r < col_nlevels and c < idx_nlevels: diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 95b27161a5858..9659cb33686d0 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -22,7 +22,6 @@ can_set_locale, get_locales, set_locale) from pandas._libs import testing as _testing -import pandas.compat as compat from pandas.compat import lmap, lrange, lzip, raise_with_traceback from pandas.core.dtypes.common import ( @@ -1499,7 +1498,7 @@ def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True, if check_fill_value: assert_attr_equal('default_fill_value', left, right, obj=obj) - for col, series in compat.iteritems(left): + for col, series in left.items(): assert (col in right) # trade-off?