From 47bf1a1a0794811df817fae5987bbda1522bcd50 Mon Sep 17 00:00:00 2001 From: Elliott Sales de Andrade Date: Tue, 14 Feb 2017 18:01:44 -0500 Subject: [PATCH 1/4] TST: Replace ENGINES_PARSERS by parametrize. --- pandas/tests/computation/test_compat.py | 11 +- pandas/tests/computation/test_eval.py | 169 +++++++++--------------- 2 files changed, 63 insertions(+), 117 deletions(-) diff --git a/pandas/tests/computation/test_compat.py b/pandas/tests/computation/test_compat.py index 77994ac6d2f53..59bdde83aedd8 100644 --- a/pandas/tests/computation/test_compat.py +++ b/pandas/tests/computation/test_compat.py @@ -12,8 +12,6 @@ import pandas.computation.expr as expr from pandas.computation import _MIN_NUMEXPR_VERSION -ENGINES_PARSERS = list(product(_engines, expr._parsers)) - def test_compat(): # test we have compat with our version of nu @@ -30,12 +28,9 @@ def test_compat(): pytest.skip("not testing numexpr version compat") -def test_invalid_numexpr_version(): - for engine, parser in ENGINES_PARSERS: - yield check_invalid_numexpr_version, engine, parser - - -def check_invalid_numexpr_version(engine, parser): +@pytest.mark.parametrize('engine', _engines) +@pytest.mark.parametrize('parser', expr._parsers) +def test_invalid_numexpr_version(engine, parser): def testit(): a, b = 1, 2 res = pd.eval('a + b', engine=engine, parser=parser) diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index ada714c8ac52e..fa5ca21cf8622 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -774,16 +774,19 @@ def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs): f = lambda *args, **kwargs: np.random.randn() -ENGINES_PARSERS = list(product(_engines, expr._parsers)) - #------------------------------------- # typecasting rules consistency with python # issue #12388 class TestTypeCasting(object): - - def check_binop_typecasting(self, engine, parser, op, dt): + @pytest.mark.parametrize('engine', _engines) + @pytest.mark.parametrize('parser', expr._parsers) + @pytest.mark.parametrize('op', ['+', '-', '*', '**', '/']) + # maybe someday... numexpr has too many upcasting rules now + # chain(*(np.sctypes[x] for x in ['uint', 'int', 'float'])) + @pytest.mark.parametrize('dt', [np.float32, np.float64]) + def test_binop_typecasting(self, engine, parser, op, dt): tm.skip_if_no_ne(engine) df = mkdf(5, 3, data_gen_f=f, dtype=dt) s = 'df {} 3'.format(op) @@ -798,15 +801,6 @@ def check_binop_typecasting(self, engine, parser, op, dt): assert res.values.dtype == dt assert_frame_equal(res, eval(s)) - def test_binop_typecasting(self): - for engine, parser in ENGINES_PARSERS: - for op in ['+', '-', '*', '**', '/']: - # maybe someday... numexpr has too many upcasting rules now - # for dt in chain(*(np.sctypes[x] for x in ['uint', 'int', - # 'float'])): - for dt in [np.float32, np.float64]: - yield self.check_binop_typecasting, engine, parser, op, dt - #------------------------------------- # basic and complex alignment @@ -826,18 +820,18 @@ class TestAlignment(object): index_types = 'i', 'u', 'dt' lhs_index_types = index_types + ('s',) # 'p' - def check_align_nested_unary_op(self, engine, parser): + @pytest.mark.parametrize('engine', _engines) + @pytest.mark.parametrize('parser', expr._parsers) + def test_align_nested_unary_op(self, engine, parser): tm.skip_if_no_ne(engine) s = 'df * ~2' df = mkdf(5, 3, data_gen_f=f) res = pd.eval(s, engine=engine, parser=parser) assert_frame_equal(res, df * ~2) - def test_align_nested_unary_op(self): - for engine, parser in ENGINES_PARSERS: - yield self.check_align_nested_unary_op, engine, parser - - def check_basic_frame_alignment(self, engine, parser): + @pytest.mark.parametrize('engine', _engines) + @pytest.mark.parametrize('parser', expr._parsers) + def test_basic_frame_alignment(self, engine, parser): tm.skip_if_no_ne(engine) args = product(self.lhs_index_types, self.index_types, self.index_types) @@ -856,11 +850,9 @@ def check_basic_frame_alignment(self, engine, parser): res = pd.eval('df + df2', engine=engine, parser=parser) assert_frame_equal(res, df + df2) - def test_basic_frame_alignment(self): - for engine, parser in ENGINES_PARSERS: - yield self.check_basic_frame_alignment, engine, parser - - def check_frame_comparison(self, engine, parser): + @pytest.mark.parametrize('engine', _engines) + @pytest.mark.parametrize('parser', expr._parsers) + def test_frame_comparison(self, engine, parser): tm.skip_if_no_ne(engine) args = product(self.lhs_index_types, repeat=2) for r_idx_type, c_idx_type in args: @@ -874,11 +866,10 @@ def check_frame_comparison(self, engine, parser): res = pd.eval('df < df3', engine=engine, parser=parser) assert_frame_equal(res, df < df3) - def test_frame_comparison(self): - for engine, parser in ENGINES_PARSERS: - yield self.check_frame_comparison, engine, parser - - def check_medium_complex_frame_alignment(self, engine, parser): + @slow + @pytest.mark.parametrize('engine', _engines) + @pytest.mark.parametrize('parser', expr._parsers) + def test_medium_complex_frame_alignment(self, engine, parser): tm.skip_if_no_ne(engine) args = product(self.lhs_index_types, self.index_types, self.index_types, self.index_types) @@ -899,12 +890,9 @@ def check_medium_complex_frame_alignment(self, engine, parser): engine=engine, parser=parser) assert_frame_equal(res, df + df2 + df3) - @slow - def test_medium_complex_frame_alignment(self): - for engine, parser in ENGINES_PARSERS: - yield self.check_medium_complex_frame_alignment, engine, parser - - def check_basic_frame_series_alignment(self, engine, parser): + @pytest.mark.parametrize('engine', _engines) + @pytest.mark.parametrize('parser', expr._parsers) + def test_basic_frame_series_alignment(self, engine, parser): tm.skip_if_no_ne(engine) def testit(r_idx_type, c_idx_type, index_name): @@ -932,11 +920,9 @@ def testit(r_idx_type, c_idx_type, index_name): for r_idx_type, c_idx_type, index_name in args: testit(r_idx_type, c_idx_type, index_name) - def test_basic_frame_series_alignment(self): - for engine, parser in ENGINES_PARSERS: - yield self.check_basic_frame_series_alignment, engine, parser - - def check_basic_series_frame_alignment(self, engine, parser): + @pytest.mark.parametrize('engine', _engines) + @pytest.mark.parametrize('parser', expr._parsers) + def test_basic_series_frame_alignment(self, engine, parser): tm.skip_if_no_ne(engine) def testit(r_idx_type, c_idx_type, index_name): @@ -968,11 +954,9 @@ def testit(r_idx_type, c_idx_type, index_name): for r_idx_type, c_idx_type, index_name in args: testit(r_idx_type, c_idx_type, index_name) - def test_basic_series_frame_alignment(self): - for engine, parser in ENGINES_PARSERS: - yield self.check_basic_series_frame_alignment, engine, parser - - def check_series_frame_commutativity(self, engine, parser): + @pytest.mark.parametrize('engine', _engines) + @pytest.mark.parametrize('parser', expr._parsers) + def test_series_frame_commutativity(self, engine, parser): tm.skip_if_no_ne(engine) args = product(self.lhs_index_types, self.index_types, ('+', '*'), ('index', 'columns')) @@ -1000,11 +984,10 @@ def check_series_frame_commutativity(self, engine, parser): if engine == 'numexpr': assert_frame_equal(a, b) - def test_series_frame_commutativity(self): - for engine, parser in ENGINES_PARSERS: - yield self.check_series_frame_commutativity, engine, parser - - def check_complex_series_frame_alignment(self, engine, parser): + @slow + @pytest.mark.parametrize('engine', _engines) + @pytest.mark.parametrize('parser', expr._parsers) + def test_complex_series_frame_alignment(self, engine, parser): tm.skip_if_no_ne(engine) import random @@ -1050,12 +1033,9 @@ def check_complex_series_frame_alignment(self, engine, parser): tm.assert_equal(res.shape, expected.shape) assert_frame_equal(res, expected) - @slow - def test_complex_series_frame_alignment(self): - for engine, parser in ENGINES_PARSERS: - yield self.check_complex_series_frame_alignment, engine, parser - - def check_performance_warning_for_poor_alignment(self, engine, parser): + @pytest.mark.parametrize('engine', _engines) + @pytest.mark.parametrize('parser', expr._parsers) + def test_performance_warning_for_poor_alignment(self, engine, parser): tm.skip_if_no_ne(engine) df = DataFrame(randn(1000, 10)) s = Series(randn(10000)) @@ -1098,11 +1078,6 @@ def check_performance_warning_for_poor_alignment(self, engine, parser): "".format(1, 'df', np.log10(s.size - df.shape[1]))) tm.assert_equal(msg, expected) - def test_performance_warning_for_poor_alignment(self): - for engine, parser in ENGINES_PARSERS: - yield (self.check_performance_warning_for_poor_alignment, engine, - parser) - #------------------------------------ # slightly more complex ops @@ -1832,31 +1807,27 @@ def test_disallowed_nodes(): yield check_disallowed_nodes, engine, visitor -def check_syntax_error_exprs(engine, parser): +@pytest.mark.parametrize('engine', _engines) +@pytest.mark.parametrize('parser', expr._parsers) +def test_syntax_error_exprs(engine, parser): tm.skip_if_no_ne(engine) e = 's +' with pytest.raises(SyntaxError): pd.eval(e, engine=engine, parser=parser) -def test_syntax_error_exprs(): - for engine, parser in ENGINES_PARSERS: - yield check_syntax_error_exprs, engine, parser - - -def check_name_error_exprs(engine, parser): +@pytest.mark.parametrize('engine', _engines) +@pytest.mark.parametrize('parser', expr._parsers) +def test_name_error_exprs(engine, parser): tm.skip_if_no_ne(engine) e = 's + t' with tm.assertRaises(NameError): pd.eval(e, engine=engine, parser=parser) -def test_name_error_exprs(): - for engine, parser in ENGINES_PARSERS: - yield check_name_error_exprs, engine, parser - - -def check_invalid_local_variable_reference(engine, parser): +@pytest.mark.parametrize('engine', _engines) +@pytest.mark.parametrize('parser', expr._parsers) +def test_invalid_local_variable_reference(engine, parser): tm.skip_if_no_ne(engine) a, b = 1, 2 @@ -1870,12 +1841,9 @@ def check_invalid_local_variable_reference(engine, parser): pd.eval(exprs, engine=engine, parser=parser) -def test_invalid_local_variable_reference(): - for engine, parser in ENGINES_PARSERS: - yield check_invalid_local_variable_reference, engine, parser - - -def check_numexpr_builtin_raises(engine, parser): +@pytest.mark.parametrize('engine', _engines) +@pytest.mark.parametrize('parser', expr._parsers) +def test_numexpr_builtin_raises(engine, parser): tm.skip_if_no_ne(engine) sin, dotted_line = 1, 2 if engine == 'numexpr': @@ -1887,12 +1855,9 @@ def check_numexpr_builtin_raises(engine, parser): tm.assert_equal(res, sin + dotted_line) -def test_numexpr_builtin_raises(): - for engine, parser in ENGINES_PARSERS: - yield check_numexpr_builtin_raises, engine, parser - - -def check_bad_resolver_raises(engine, parser): +@pytest.mark.parametrize('engine', _engines) +@pytest.mark.parametrize('parser', expr._parsers) +def test_bad_resolver_raises(engine, parser): tm.skip_if_no_ne(engine) cannot_resolve = 42, 3.0 with tm.assertRaisesRegexp(TypeError, 'Resolver of type .+'): @@ -1900,35 +1865,24 @@ def check_bad_resolver_raises(engine, parser): parser=parser) -def test_bad_resolver_raises(): - for engine, parser in ENGINES_PARSERS: - yield check_bad_resolver_raises, engine, parser - - -def check_empty_string_raises(engine, parser): +@pytest.mark.parametrize('engine', _engines) +@pytest.mark.parametrize('parser', expr._parsers) +def test_empty_string_raises(engine, parser): # GH 13139 tm.skip_if_no_ne(engine) with tm.assertRaisesRegexp(ValueError, 'expr cannot be an empty string'): pd.eval('', engine=engine, parser=parser) -def test_empty_string_raises(): - for engine, parser in ENGINES_PARSERS: - yield check_empty_string_raises, engine, parser - - -def check_more_than_one_expression_raises(engine, parser): +@pytest.mark.parametrize('engine', _engines) +@pytest.mark.parametrize('parser', expr._parsers) +def test_more_than_one_expression_raises(engine, parser): tm.skip_if_no_ne(engine) with tm.assertRaisesRegexp(SyntaxError, 'only a single expression is allowed'): pd.eval('1 + 1; 2 + 2', engine=engine, parser=parser) -def test_more_than_one_expression_raises(): - for engine, parser in ENGINES_PARSERS: - yield check_more_than_one_expression_raises, engine, parser - - def check_bool_ops_fails_on_scalars(gen, lhs, cmp, rhs, engine, parser): tm.skip_if_no_ne(engine) mid = gen[type(lhs)]() @@ -1951,7 +1905,9 @@ def test_bool_ops_fails_on_scalars(): gen[dtype2](), engine, parser) -def check_inf(engine, parser): +@pytest.mark.parametrize('engine', _engines) +@pytest.mark.parametrize('parser', expr._parsers) +def test_inf(engine, parser): tm.skip_if_no_ne(engine) s = 'inf + 1' expected = np.inf @@ -1959,11 +1915,6 @@ def check_inf(engine, parser): tm.assert_equal(result, expected) -def test_inf(): - for engine, parser in ENGINES_PARSERS: - yield check_inf, engine, parser - - def check_negate_lt_eq_le(engine, parser): tm.skip_if_no_ne(engine) df = pd.DataFrame([[0, 10], [1, 20]], columns=['cat', 'count']) From c6cd3469c74ddabdff30fe9ebe73a68659cad959 Mon Sep 17 00:00:00 2001 From: Elliott Sales de Andrade Date: Tue, 14 Feb 2017 19:22:48 -0500 Subject: [PATCH 2/4] TST: Parametrize remaining simple yield tests. Simple meaning those that loop through a constant and are not in a unittest class. --- pandas/tests/computation/test_eval.py | 69 +++++++++++--------------- pandas/tests/io/parser/test_network.py | 26 +++++----- pandas/util/testing.py | 15 ++++-- 3 files changed, 53 insertions(+), 57 deletions(-) diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index fa5ca21cf8622..1d65aed75f0ca 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -1737,17 +1737,17 @@ def setUpClass(cls): class TestScope(object): - def check_global_scope(self, e, engine, parser): + @pytest.mark.parametrize('engine', _engines) + @pytest.mark.parametrize('parser', expr._parsers) + def test_global_scope(self, engine, parser): tm.skip_if_no_ne(engine) + e = '_var_s * 2' tm.assert_numpy_array_equal(_var_s * 2, pd.eval(e, engine=engine, parser=parser)) - def test_global_scope(self): - e = '_var_s * 2' - for engine, parser in product(_engines, expr._parsers): - yield self.check_global_scope, e, engine, parser - - def check_no_new_locals(self, engine, parser): + @pytest.mark.parametrize('engine', _engines) + @pytest.mark.parametrize('parser', expr._parsers) + def test_no_new_locals(self, engine, parser): tm.skip_if_no_ne(engine) x = 1 lcls = locals().copy() @@ -1756,11 +1756,9 @@ def check_no_new_locals(self, engine, parser): lcls2.pop('lcls') tm.assert_equal(lcls, lcls2) - def test_no_new_locals(self): - for engine, parser in product(_engines, expr._parsers): - yield self.check_no_new_locals, engine, parser - - def check_no_new_globals(self, engine, parser): + @pytest.mark.parametrize('engine', _engines) + @pytest.mark.parametrize('parser', expr._parsers) + def test_no_new_globals(self, engine, parser): tm.skip_if_no_ne(engine) x = 1 gbls = globals().copy() @@ -1768,10 +1766,6 @@ def check_no_new_globals(self, engine, parser): gbls2 = globals().copy() tm.assert_equal(gbls, gbls2) - def test_no_new_globals(self): - for engine, parser in product(_engines, expr._parsers): - yield self.check_no_new_globals, engine, parser - def test_invalid_engine(): tm.skip_if_no_ne() @@ -1791,7 +1785,9 @@ def test_invalid_parser(): 'pandas': PandasExprVisitor} -def check_disallowed_nodes(engine, parser): +@pytest.mark.parametrize('engine', _parsers) +@pytest.mark.parametrize('parser', _parsers) +def test_disallowed_nodes(engine, parser): tm.skip_if_no_ne(engine) VisitorClass = _parsers[parser] uns_ops = VisitorClass.unsupported_nodes @@ -1802,11 +1798,6 @@ def check_disallowed_nodes(engine, parser): getattr(inst, ops)() -def test_disallowed_nodes(): - for engine, visitor in product(_parsers, repeat=2): - yield check_disallowed_nodes, engine, visitor - - @pytest.mark.parametrize('engine', _engines) @pytest.mark.parametrize('parser', expr._parsers) def test_syntax_error_exprs(engine, parser): @@ -1883,9 +1874,19 @@ def test_more_than_one_expression_raises(engine, parser): pd.eval('1 + 1; 2 + 2', engine=engine, parser=parser) -def check_bool_ops_fails_on_scalars(gen, lhs, cmp, rhs, engine, parser): +@pytest.mark.parametrize('engine', _engines) +@pytest.mark.parametrize('parser', expr._parsers) +@pytest.mark.parametrize('cmp', ('and', 'or')) +@pytest.mark.parametrize('lhs', (int, float)) +@pytest.mark.parametrize('rhs', (int, float)) +def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser): tm.skip_if_no_ne(engine) - mid = gen[type(lhs)]() + gen = {int: lambda: np.random.randint(10), float: np.random.randn} + + mid = gen[lhs]() + lhs = gen[lhs]() + rhs = gen[rhs]() + ex1 = 'lhs {0} mid {1} rhs'.format(cmp, cmp) ex2 = 'lhs {0} mid and mid {1} rhs'.format(cmp, cmp) ex3 = '(lhs {0} mid) & (mid {1} rhs)'.format(cmp, cmp) @@ -1894,17 +1895,6 @@ def check_bool_ops_fails_on_scalars(gen, lhs, cmp, rhs, engine, parser): pd.eval(ex, engine=engine, parser=parser) -def test_bool_ops_fails_on_scalars(): - _bool_ops_syms = 'and', 'or' - dtypes = int, float - gen = {int: lambda: np.random.randint(10), float: np.random.randn} - for engine, parser, dtype1, cmp, dtype2 in product(_engines, expr._parsers, - dtypes, _bool_ops_syms, - dtypes): - yield (check_bool_ops_fails_on_scalars, gen, gen[dtype1](), cmp, - gen[dtype2](), engine, parser) - - @pytest.mark.parametrize('engine', _engines) @pytest.mark.parametrize('parser', expr._parsers) def test_inf(engine, parser): @@ -1915,7 +1905,9 @@ def test_inf(engine, parser): tm.assert_equal(result, expected) -def check_negate_lt_eq_le(engine, parser): +@pytest.mark.parametrize('engine', _engines) +@pytest.mark.parametrize('parser', expr._parsers) +def test_negate_lt_eq_le(engine, parser): tm.skip_if_no_ne(engine) df = pd.DataFrame([[0, 10], [1, 20]], columns=['cat', 'count']) expected = df[~(df.cat > 0)] @@ -1931,11 +1923,6 @@ def check_negate_lt_eq_le(engine, parser): tm.assert_frame_equal(result, expected) -def test_negate_lt_eq_le(): - for engine, parser in product(_engines, expr._parsers): - yield check_negate_lt_eq_le, engine, parser - - class TestValidate(tm.TestCase): def test_validate_bool_args(self): diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index 721d447262149..4d6b6c7daa3c6 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -7,7 +7,6 @@ import os import pytest -from itertools import product import pandas.util.testing as tm from pandas import DataFrame @@ -21,14 +20,18 @@ def salaries_table(): @pytest.mark.parametrize( - "compression,extension", [('gzip', '.gz'), ('bz2', '.bz2'), - ('zip', '.zip'), ('xz', '.xz')]) -def test_compressed_urls(salaries_table, compression, extension): - check_compressed_urls(salaries_table, compression, extension) + "compression,extension", + [('gzip', '.gz'), ('bz2', '.bz2'), ('zip', '.zip'), + tm._mark_skipif_no_lzma(('xz', '.xz'))]) +@pytest.mark.parametrize('mode', ['explicit', 'infer']) +@pytest.mark.parametrize('engine', ['python', 'c']) +def test_compressed_urls(salaries_table, compression, extension, mode, engine): + check_compressed_urls(salaries_table, compression, extension, mode, engine) @tm.network -def check_compressed_urls(salaries_table, compression, extension): +def check_compressed_urls(salaries_table, compression, extension, mode, + engine): # test reading compressed urls with various engines and # extension inference base_url = ('https://github.com/pandas-dev/pandas/raw/master/' @@ -36,14 +39,11 @@ def check_compressed_urls(salaries_table, compression, extension): url = base_url + extension - # args is a (compression, engine) tuple - for (c, engine) in product([compression, 'infer'], ['python', 'c']): + if mode != 'explicit': + compression = mode - if url.endswith('.xz'): - tm._skip_if_no_lzma() - - url_table = read_table(url, compression=c, engine=engine) - tm.assert_frame_equal(url_table, salaries_table) + url_table = read_table(url, compression=compression, engine=engine) + tm.assert_frame_equal(url_table, salaries_table) class TestS3(tm.TestCase): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index cda386781e2ec..1bd539469dbe3 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -307,12 +307,21 @@ def _skip_if_scipy_0_17(): pytest.skip("scipy 0.17") -def _skip_if_no_lzma(): +def _check_if_lzma(): try: return compat.import_lzma() except ImportError: - import pytest - pytest.skip('need backports.lzma to run') + return False + + +def _skip_if_no_lzma(): + return _check_if_lzma() or pytest.skip('need backports.lzma to run') + + +_mark_skipif_no_lzma = pytest.mark.skipif( + not _check_if_lzma(), + reason='need backports.lzma to run' +) def _skip_if_no_xarray(): From 8368772af2617be4b66064813940c34f254b9637 Mon Sep 17 00:00:00 2001 From: Elliott Sales de Andrade Date: Tue, 14 Feb 2017 20:24:37 -0500 Subject: [PATCH 3/4] TST: Use fixtures for engine/parser where possible. --- pandas/tests/computation/test_eval.py | 91 ++++++--------------------- 1 file changed, 18 insertions(+), 73 deletions(-) diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 1d65aed75f0ca..b42f79fe5009b 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -20,6 +20,7 @@ from pandas.computation import pytables from pandas.computation.engines import _engines, NumExprClobberingError from pandas.computation.expr import PythonExprVisitor, PandasExprVisitor +from pandas.computation.expressions import _USE_NUMEXPR, _NUMEXPR_INSTALLED from pandas.computation.ops import (_binary_ops_dict, _special_case_arith_ops_syms, _arith_ops_syms, _bool_ops_syms, @@ -38,6 +39,23 @@ _scalar_skip = 'in', 'not in' +@pytest.fixture(params=( + pytest.mark.skipif(engine == 'numexpr' and not _USE_NUMEXPR, + reason='numexpr enabled->{enabled}, ' + 'installed->{installed}'.format( + enabled=_USE_NUMEXPR, + installed=_NUMEXPR_INSTALLED))(engine) + for engine in _engines +)) +def engine(request): + return request.param + + +@pytest.fixture(params=expr._parsers) +def parser(request): + return request.param + + def engine_has_neg_frac(engine): return _engines[engine].has_neg_frac @@ -780,14 +798,11 @@ def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs): class TestTypeCasting(object): - @pytest.mark.parametrize('engine', _engines) - @pytest.mark.parametrize('parser', expr._parsers) @pytest.mark.parametrize('op', ['+', '-', '*', '**', '/']) # maybe someday... numexpr has too many upcasting rules now # chain(*(np.sctypes[x] for x in ['uint', 'int', 'float'])) @pytest.mark.parametrize('dt', [np.float32, np.float64]) def test_binop_typecasting(self, engine, parser, op, dt): - tm.skip_if_no_ne(engine) df = mkdf(5, 3, data_gen_f=f, dtype=dt) s = 'df {} 3'.format(op) res = pd.eval(s, engine=engine, parser=parser) @@ -820,19 +835,13 @@ class TestAlignment(object): index_types = 'i', 'u', 'dt' lhs_index_types = index_types + ('s',) # 'p' - @pytest.mark.parametrize('engine', _engines) - @pytest.mark.parametrize('parser', expr._parsers) def test_align_nested_unary_op(self, engine, parser): - tm.skip_if_no_ne(engine) s = 'df * ~2' df = mkdf(5, 3, data_gen_f=f) res = pd.eval(s, engine=engine, parser=parser) assert_frame_equal(res, df * ~2) - @pytest.mark.parametrize('engine', _engines) - @pytest.mark.parametrize('parser', expr._parsers) def test_basic_frame_alignment(self, engine, parser): - tm.skip_if_no_ne(engine) args = product(self.lhs_index_types, self.index_types, self.index_types) with warnings.catch_warnings(record=True): @@ -850,10 +859,7 @@ def test_basic_frame_alignment(self, engine, parser): res = pd.eval('df + df2', engine=engine, parser=parser) assert_frame_equal(res, df + df2) - @pytest.mark.parametrize('engine', _engines) - @pytest.mark.parametrize('parser', expr._parsers) def test_frame_comparison(self, engine, parser): - tm.skip_if_no_ne(engine) args = product(self.lhs_index_types, repeat=2) for r_idx_type, c_idx_type in args: df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type, @@ -867,10 +873,7 @@ def test_frame_comparison(self, engine, parser): assert_frame_equal(res, df < df3) @slow - @pytest.mark.parametrize('engine', _engines) - @pytest.mark.parametrize('parser', expr._parsers) def test_medium_complex_frame_alignment(self, engine, parser): - tm.skip_if_no_ne(engine) args = product(self.lhs_index_types, self.index_types, self.index_types, self.index_types) @@ -890,11 +893,7 @@ def test_medium_complex_frame_alignment(self, engine, parser): engine=engine, parser=parser) assert_frame_equal(res, df + df2 + df3) - @pytest.mark.parametrize('engine', _engines) - @pytest.mark.parametrize('parser', expr._parsers) def test_basic_frame_series_alignment(self, engine, parser): - tm.skip_if_no_ne(engine) - def testit(r_idx_type, c_idx_type, index_name): df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type) @@ -920,11 +919,7 @@ def testit(r_idx_type, c_idx_type, index_name): for r_idx_type, c_idx_type, index_name in args: testit(r_idx_type, c_idx_type, index_name) - @pytest.mark.parametrize('engine', _engines) - @pytest.mark.parametrize('parser', expr._parsers) def test_basic_series_frame_alignment(self, engine, parser): - tm.skip_if_no_ne(engine) - def testit(r_idx_type, c_idx_type, index_name): df = mkdf(10, 7, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type) @@ -954,10 +949,7 @@ def testit(r_idx_type, c_idx_type, index_name): for r_idx_type, c_idx_type, index_name in args: testit(r_idx_type, c_idx_type, index_name) - @pytest.mark.parametrize('engine', _engines) - @pytest.mark.parametrize('parser', expr._parsers) def test_series_frame_commutativity(self, engine, parser): - tm.skip_if_no_ne(engine) args = product(self.lhs_index_types, self.index_types, ('+', '*'), ('index', 'columns')) @@ -985,11 +977,7 @@ def test_series_frame_commutativity(self, engine, parser): assert_frame_equal(a, b) @slow - @pytest.mark.parametrize('engine', _engines) - @pytest.mark.parametrize('parser', expr._parsers) def test_complex_series_frame_alignment(self, engine, parser): - tm.skip_if_no_ne(engine) - import random args = product(self.lhs_index_types, self.index_types, self.index_types, self.index_types) @@ -1033,10 +1021,7 @@ def test_complex_series_frame_alignment(self, engine, parser): tm.assert_equal(res.shape, expected.shape) assert_frame_equal(res, expected) - @pytest.mark.parametrize('engine', _engines) - @pytest.mark.parametrize('parser', expr._parsers) def test_performance_warning_for_poor_alignment(self, engine, parser): - tm.skip_if_no_ne(engine) df = DataFrame(randn(1000, 10)) s = Series(randn(10000)) if engine == 'numexpr': @@ -1737,18 +1722,12 @@ def setUpClass(cls): class TestScope(object): - @pytest.mark.parametrize('engine', _engines) - @pytest.mark.parametrize('parser', expr._parsers) def test_global_scope(self, engine, parser): - tm.skip_if_no_ne(engine) e = '_var_s * 2' tm.assert_numpy_array_equal(_var_s * 2, pd.eval(e, engine=engine, parser=parser)) - @pytest.mark.parametrize('engine', _engines) - @pytest.mark.parametrize('parser', expr._parsers) def test_no_new_locals(self, engine, parser): - tm.skip_if_no_ne(engine) x = 1 lcls = locals().copy() pd.eval('x + 1', local_dict=lcls, engine=engine, parser=parser) @@ -1756,10 +1735,7 @@ def test_no_new_locals(self, engine, parser): lcls2.pop('lcls') tm.assert_equal(lcls, lcls2) - @pytest.mark.parametrize('engine', _engines) - @pytest.mark.parametrize('parser', expr._parsers) def test_no_new_globals(self, engine, parser): - tm.skip_if_no_ne(engine) x = 1 gbls = globals().copy() pd.eval('x + 1', engine=engine, parser=parser) @@ -1798,29 +1774,19 @@ def test_disallowed_nodes(engine, parser): getattr(inst, ops)() -@pytest.mark.parametrize('engine', _engines) -@pytest.mark.parametrize('parser', expr._parsers) def test_syntax_error_exprs(engine, parser): - tm.skip_if_no_ne(engine) e = 's +' with pytest.raises(SyntaxError): pd.eval(e, engine=engine, parser=parser) -@pytest.mark.parametrize('engine', _engines) -@pytest.mark.parametrize('parser', expr._parsers) def test_name_error_exprs(engine, parser): - tm.skip_if_no_ne(engine) e = 's + t' with tm.assertRaises(NameError): pd.eval(e, engine=engine, parser=parser) -@pytest.mark.parametrize('engine', _engines) -@pytest.mark.parametrize('parser', expr._parsers) def test_invalid_local_variable_reference(engine, parser): - tm.skip_if_no_ne(engine) - a, b = 1, 2 exprs = 'a + @b', '@a + b', '@a + @b' for expr in exprs: @@ -1832,10 +1798,7 @@ def test_invalid_local_variable_reference(engine, parser): pd.eval(exprs, engine=engine, parser=parser) -@pytest.mark.parametrize('engine', _engines) -@pytest.mark.parametrize('parser', expr._parsers) def test_numexpr_builtin_raises(engine, parser): - tm.skip_if_no_ne(engine) sin, dotted_line = 1, 2 if engine == 'numexpr': with tm.assertRaisesRegexp(NumExprClobberingError, @@ -1846,41 +1809,29 @@ def test_numexpr_builtin_raises(engine, parser): tm.assert_equal(res, sin + dotted_line) -@pytest.mark.parametrize('engine', _engines) -@pytest.mark.parametrize('parser', expr._parsers) def test_bad_resolver_raises(engine, parser): - tm.skip_if_no_ne(engine) cannot_resolve = 42, 3.0 with tm.assertRaisesRegexp(TypeError, 'Resolver of type .+'): pd.eval('1 + 2', resolvers=cannot_resolve, engine=engine, parser=parser) -@pytest.mark.parametrize('engine', _engines) -@pytest.mark.parametrize('parser', expr._parsers) def test_empty_string_raises(engine, parser): # GH 13139 - tm.skip_if_no_ne(engine) with tm.assertRaisesRegexp(ValueError, 'expr cannot be an empty string'): pd.eval('', engine=engine, parser=parser) -@pytest.mark.parametrize('engine', _engines) -@pytest.mark.parametrize('parser', expr._parsers) def test_more_than_one_expression_raises(engine, parser): - tm.skip_if_no_ne(engine) with tm.assertRaisesRegexp(SyntaxError, 'only a single expression is allowed'): pd.eval('1 + 1; 2 + 2', engine=engine, parser=parser) -@pytest.mark.parametrize('engine', _engines) -@pytest.mark.parametrize('parser', expr._parsers) @pytest.mark.parametrize('cmp', ('and', 'or')) @pytest.mark.parametrize('lhs', (int, float)) @pytest.mark.parametrize('rhs', (int, float)) def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser): - tm.skip_if_no_ne(engine) gen = {int: lambda: np.random.randint(10), float: np.random.randn} mid = gen[lhs]() @@ -1895,20 +1846,14 @@ def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser): pd.eval(ex, engine=engine, parser=parser) -@pytest.mark.parametrize('engine', _engines) -@pytest.mark.parametrize('parser', expr._parsers) def test_inf(engine, parser): - tm.skip_if_no_ne(engine) s = 'inf + 1' expected = np.inf result = pd.eval(s, engine=engine, parser=parser) tm.assert_equal(result, expected) -@pytest.mark.parametrize('engine', _engines) -@pytest.mark.parametrize('parser', expr._parsers) def test_negate_lt_eq_le(engine, parser): - tm.skip_if_no_ne(engine) df = pd.DataFrame([[0, 10], [1, 20]], columns=['cat', 'count']) expected = df[~(df.cat > 0)] From b002752adb8157169bf1b635f5e38552a2df4602 Mon Sep 17 00:00:00 2001 From: Elliott Sales de Andrade Date: Wed, 15 Feb 2017 23:19:55 -0500 Subject: [PATCH 4/4] TST: Set PYTHONHASHSEED so xdist doesn't break. --- ci/script_multi.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ci/script_multi.sh b/ci/script_multi.sh index f5fbcbbc12f83..41f71fd21f63f 100755 --- a/ci/script_multi.sh +++ b/ci/script_multi.sh @@ -17,6 +17,12 @@ if [ -n "$LOCALE_OVERRIDE" ]; then python -c "$pycmd" fi +# Workaround for pytest-xdist flaky collection order +# https://github.com/pytest-dev/pytest/issues/920 +# https://github.com/pytest-dev/pytest/issues/1075 +export PYTHONHASHSEED=$(python -c 'import random; print(random.randint(1, 4294967295))') +echo PYTHONHASHSEED=$PYTHONHASHSEED + if [ "$BUILD_TEST" ]; then echo "We are not running pytest as this is simply a build test." elif [ "$COVERAGE" ]; then