diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index f32e001ea984a..1a3de7b463a19 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -138,7 +138,7 @@ def test_set_index_nonuniq(self): 'E': np.random.randn(5)}) with assertRaisesRegexp(ValueError, 'Index has duplicate keys'): df.set_index('A', verify_integrity=True, inplace=True) - self.assertIn('A', df) + assert 'A' in df def test_set_index_bug(self): # GH1590 diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 5b3a0a9e01f35..636194d32ad46 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -734,7 +734,7 @@ def test_filter_regex_search(self): # regex filtered = fcopy.filter(regex='[A]+') self.assertEqual(len(filtered.columns), 2) - self.assertIn('AA', filtered) + assert 'AA' in filtered # doesn't have to be at beginning df = DataFrame({'aBBa': [1, 2], diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py index 64edc52508216..6a49c88f17526 100644 --- a/pandas/tests/frame/test_convert_to.py +++ b/pandas/tests/frame/test_convert_to.py @@ -156,16 +156,16 @@ def test_to_records_index_name(self): df = DataFrame(np.random.randn(3, 3)) df.index.name = 'X' rs = df.to_records() - self.assertIn('X', rs.dtype.fields) + assert 'X' in rs.dtype.fields df = DataFrame(np.random.randn(3, 3)) rs = df.to_records() - self.assertIn('index', rs.dtype.fields) + assert 'index' in rs.dtype.fields df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')]) df.index.names = ['A', None] rs = df.to_records() - self.assertIn('level_0', rs.dtype.fields) + assert 'level_0' in rs.dtype.fields def test_to_records_with_unicode_index(self): # GH13172 diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 780cb3d0457bd..12c5b33fcbce9 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -422,7 +422,7 @@ def test_setitem(self): # not sure what else to do here series = self.frame['A'][::2] self.frame['col5'] = series - self.assertIn('col5', self.frame) + assert 'col5' in self.frame self.assertEqual(len(series), 15) self.assertEqual(len(self.frame), 30) @@ -600,7 +600,7 @@ def test_setitem_corner(self): index=np.arange(3)) del df['B'] df['B'] = [1., 2., 3.] - self.assertIn('B', df) + assert 'B' in df self.assertEqual(len(df.columns), 2) df['A'] = 'beginning' diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index ce756ca188bf0..18639990662b0 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -831,7 +831,7 @@ def test_combineSeries(self): for key, s in compat.iteritems(self.frame): assert_series_equal(larger_added[key], s + series[key]) - self.assertIn('E', larger_added) + assert 'E' in larger_added self.assertTrue(np.isnan(larger_added['E']).all()) # vs mix (upcast) as needed diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index be55efac2992b..efbdc05ba23c8 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -171,7 +171,7 @@ def test_repr_column_name_unicode_truncation_bug(self): ' the File through the code..')}) result = repr(df) - self.assertIn('StringCol', result) + assert 'StringCol' in result def test_latex_repr(self): result = r"""\begin{tabular}{llll} diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 2df2e23c3f877..0fd1df0b733f8 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -909,7 +909,7 @@ def test_to_csv_compression_gzip(self): text = f.read().decode('utf8') f.close() for col in df.columns: - self.assertIn(col, text) + assert col in text def test_to_csv_compression_bz2(self): # GH7615 @@ -932,7 +932,7 @@ def test_to_csv_compression_bz2(self): text = f.read().decode('utf8') f.close() for col in df.columns: - self.assertIn(col, text) + assert col in text def test_to_csv_compression_xz(self): # GH11852 diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 752c0689b0660..05fe1c6f58e9a 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2483,14 +2483,14 @@ def test_groupby_series_with_name(self): result = self.df.groupby(self.df['A']).mean() result2 = self.df.groupby(self.df['A'], as_index=False).mean() self.assertEqual(result.index.name, 'A') - self.assertIn('A', result2) + assert 'A' in result2 result = self.df.groupby([self.df['A'], self.df['B']]).mean() result2 = self.df.groupby([self.df['A'], self.df['B']], as_index=False).mean() self.assertEqual(result.index.names, ('A', 'B')) - self.assertIn('A', result2) - self.assertIn('B', result2) + assert 'A' in result2 + assert 'B' in result2 def test_seriesgroupby_name_attr(self): # GH 6265 @@ -3357,10 +3357,10 @@ def test_groupby_with_small_elem(self): 'change': [1234, 5678]}, index=pd.DatetimeIndex(['2014-09-10', '2013-10-10'])) grouped = df.groupby([pd.TimeGrouper(freq='M'), 'event']) - self.assertEqual(len(grouped.groups), 2) - self.assertEqual(grouped.ngroups, 2) - self.assertIn((pd.Timestamp('2014-09-30'), 'start'), grouped.groups) - self.assertIn((pd.Timestamp('2013-10-31'), 'start'), grouped.groups) + assert len(grouped.groups) == 2 + assert grouped.ngroups == 2 + assert (pd.Timestamp('2014-09-30'), 'start') in grouped.groups + assert (pd.Timestamp('2013-10-31'), 'start') in grouped.groups res = grouped.get_group((pd.Timestamp('2014-09-30'), 'start')) tm.assert_frame_equal(res, df.iloc[[0], :]) @@ -3372,10 +3372,10 @@ def test_groupby_with_small_elem(self): index=pd.DatetimeIndex(['2014-09-10', '2013-10-10', '2014-09-15'])) grouped = df.groupby([pd.TimeGrouper(freq='M'), 'event']) - self.assertEqual(len(grouped.groups), 2) - self.assertEqual(grouped.ngroups, 2) - self.assertIn((pd.Timestamp('2014-09-30'), 'start'), grouped.groups) - self.assertIn((pd.Timestamp('2013-10-31'), 'start'), grouped.groups) + assert len(grouped.groups) == 2 + assert grouped.ngroups == 2 + assert (pd.Timestamp('2014-09-30'), 'start') in grouped.groups + assert (pd.Timestamp('2013-10-31'), 'start') in grouped.groups res = grouped.get_group((pd.Timestamp('2014-09-30'), 'start')) tm.assert_frame_equal(res, df.iloc[[0, 2], :]) @@ -3388,11 +3388,11 @@ def test_groupby_with_small_elem(self): index=pd.DatetimeIndex(['2014-09-10', '2013-10-10', '2014-08-05'])) grouped = df.groupby([pd.TimeGrouper(freq='M'), 'event']) - self.assertEqual(len(grouped.groups), 3) - self.assertEqual(grouped.ngroups, 3) - self.assertIn((pd.Timestamp('2014-09-30'), 'start'), grouped.groups) - self.assertIn((pd.Timestamp('2013-10-31'), 'start'), grouped.groups) - self.assertIn((pd.Timestamp('2014-08-31'), 'start'), grouped.groups) + assert len(grouped.groups) == 3 + assert grouped.ngroups == 3 + assert (pd.Timestamp('2014-09-30'), 'start') in grouped.groups + assert (pd.Timestamp('2013-10-31'), 'start') in grouped.groups + assert (pd.Timestamp('2014-08-31'), 'start') in grouped.groups res = grouped.get_group((pd.Timestamp('2014-09-30'), 'start')) tm.assert_frame_equal(res, df.iloc[[0], :]) diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 7cef5eeb94915..93fc855178800 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -101,7 +101,7 @@ def test_reasonable_keyerror(self): try: index.get_loc('1/1/2000') except KeyError as e: - self.assertIn('2000', str(e)) + assert '2000' in str(e) def test_roundtrip_pickle_with_tz(self): diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 2eff8a12dee77..235c8f1de1fae 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -632,7 +632,7 @@ def test_nonunique_contains(self): for idx in map(DatetimeIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1], ['2015', '2015', '2016'], ['2015', '2015', '2014'])): - tm.assertIn(idx[0], idx) + assert idx[0] in idx def test_order(self): # with freq diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 9a166aa3340e3..7db7410d79349 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -857,10 +857,10 @@ def test_add_string(self): def test_iadd_string(self): index = pd.Index(['a', 'b', 'c']) # doesn't fail test unless there is a check before `+=` - self.assertIn('a', index) + assert 'a' in index index += '_x' - self.assertIn('a_x', index) + assert 'a_x' in index def test_difference(self): @@ -963,8 +963,8 @@ def test_summary(self): ind = Index(['{other}%s', "~:{range}:0"], name='A') result = ind.summary() # shouldn't be formatted accidentally. - self.assertIn('~:{range}:0', result) - self.assertIn('{other}%s', result) + assert '~:{range}:0' in result + assert '{other}%s' in result def test_format(self): self._check_method_works(Index.format) diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 5000a71dfc756..98fce437d26c8 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -1597,8 +1597,8 @@ def test_union(self): # other = Index(['A', 'B', 'C']) # result = other.union(self.index) - # self.assertIn(('foo', 'one'), result) - # self.assertIn('B', result) + # assert ('foo', 'one') in result + # assert 'B' in result # result2 = self.index.union(other) # self.assertTrue(result.equals(result2)) diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index 5201af3af3531..adf164977205f 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -561,7 +561,7 @@ def test_nonunique_contains(self): for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1], ['00:01:00', '00:01:00', '00:02:00'], ['00:01:00', '00:01:00', '00:00:01'])): - tm.assertIn(idx[0], idx) + assert idx[0] in idx def test_unknown_attribute(self): # see gh-9680 diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py index c0d83c580d1d1..b776d3c2d08ea 100644 --- a/pandas/tests/indexing/test_chaining_and_caching.py +++ b/pandas/tests/indexing/test_chaining_and_caching.py @@ -373,15 +373,15 @@ def test_cache_updating(self): df['A'] # cache series with catch_warnings(record=True): df.ix["Hello Friend"] = df.ix[0] - self.assertIn("Hello Friend", df['A'].index) - self.assertIn("Hello Friend", df['B'].index) + assert "Hello Friend" in df['A'].index + assert "Hello Friend" in df['B'].index with catch_warnings(record=True): panel = tm.makePanel() panel.ix[0] # get first item into cache panel.ix[:, :, 'A+1'] = panel.ix[:, :, 'A'] + 1 - self.assertIn("A+1", panel.ix[0].columns) - self.assertIn("A+1", panel.ix[1].columns) + assert "A+1" in panel.ix[0].columns + assert "A+1" in panel.ix[1].columns # 5216 # make sure that we don't try to set a dead cache diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index ea796a497bd19..ba59add4305d8 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -959,7 +959,7 @@ def test_wide_repr_named(self): self.assertTrue(len(wider_repr) < len(wide_repr)) for line in wide_repr.splitlines()[1::13]: - self.assertIn('DataFrame Index', line) + assert 'DataFrame Index' in line reset_option('display.expand_frame_repr') @@ -981,7 +981,7 @@ def test_wide_repr_multiindex(self): self.assertTrue(len(wider_repr) < len(wide_repr)) for line in wide_repr.splitlines()[1::13]: - self.assertIn('Level 0 Level 1', line) + assert 'Level 0 Level 1' in line reset_option('display.expand_frame_repr') @@ -1875,9 +1875,9 @@ def test_float_trim_zeros(self): if line.startswith('dtype:'): continue if _three_digit_exp(): - self.assertIn('+010', line) + assert '+010' in line else: - self.assertIn('+10', line) + assert '+10' in line def test_datetimeindex(self): diff --git a/pandas/tests/io/parser/c_parser_only.py b/pandas/tests/io/parser/c_parser_only.py index 6d3dc8f637012..f4ca632e09f39 100644 --- a/pandas/tests/io/parser/c_parser_only.py +++ b/pandas/tests/io/parser/c_parser_only.py @@ -33,7 +33,7 @@ def test_buffer_overflow(self): try: self.read_table(StringIO(malf)) except Exception as err: - self.assertIn(cperr, str(err)) + assert cperr in str(err) def test_buffer_rd_bytes(self): # see gh-12098: src->buffer in the C parser can be freed twice leading diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py index cdc4f9fa9d84f..b7147cd77f4f6 100644 --- a/pandas/tests/io/parser/parse_dates.py +++ b/pandas/tests/io/parser/parse_dates.py @@ -135,7 +135,7 @@ def test_multiple_date_cols_int_cast(self): # it works! df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec, date_parser=conv.parse_date_time) - self.assertIn('nominal', df) + assert 'nominal' in df def test_multiple_date_col_timestamp_parse(self): data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25 @@ -530,7 +530,7 @@ def test_parse_date_time(self): df = self.read_csv(StringIO(data), sep=',', header=0, parse_dates=datecols, date_parser=conv.parse_date_time) - self.assertIn('date_time', df) + assert 'date_time' in df self.assertEqual(df.date_time.loc[0], datetime(2001, 1, 5, 10, 0, 0)) data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n" @@ -558,7 +558,7 @@ def test_parse_date_fields(self): df = self.read_csv(StringIO(data), sep=',', header=0, parse_dates=datecols, date_parser=conv.parse_date_fields) - self.assertIn('ymd', df) + assert 'ymd' in df self.assertEqual(df.ymd.loc[0], datetime(2001, 1, 10)) def test_datetime_six_col(self): @@ -585,7 +585,7 @@ def test_datetime_six_col(self): df = self.read_csv(StringIO(data), sep=',', header=0, parse_dates=datecols, date_parser=conv.parse_all_fields) - self.assertIn('ymdHMS', df) + assert 'ymdHMS' in df self.assertEqual(df.ymdHMS.loc[0], datetime(2001, 1, 5, 10, 0, 0)) def test_datetime_fractional_seconds(self): @@ -598,7 +598,7 @@ def test_datetime_fractional_seconds(self): df = self.read_csv(StringIO(data), sep=',', header=0, parse_dates=datecols, date_parser=conv.parse_all_fields) - self.assertIn('ymdHMS', df) + assert 'ymdHMS' in df self.assertEqual(df.ymdHMS.loc[0], datetime(2001, 1, 5, 10, 0, 0, microsecond=123456)) self.assertEqual(df.ymdHMS.loc[1], datetime(2001, 1, 5, 10, 0, 0, @@ -611,7 +611,7 @@ def test_generic(self): df = self.read_csv(StringIO(data), sep=',', header=0, parse_dates=datecols, date_parser=dateconverter) - self.assertIn('ym', df) + assert 'ym' in df self.assertEqual(df.ym.loc[0], date(2001, 1, 1)) def test_dateparser_resolution_if_not_ns(self): diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 866ed2cf2f359..e7eaab098fe4d 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -566,10 +566,10 @@ def test_gold_canyon(self): with open(self.banklist_data, 'r') as f: raw_text = f.read() - self.assertIn(gc, raw_text) + assert gc in raw_text df = self.read_html(self.banklist_data, 'Gold Canyon', attrs={'id': 'table'})[0] - self.assertIn(gc, df.to_string()) + assert gc in df.to_string() def test_different_number_of_rows(self): expected = """