diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index 7bdd280bd7..ce102602cf 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -13,32 +13,25 @@ jobs: floatx: [float32, float64] test-subset: # Tests are split into multiple jobs to accelerate the CI. - # The first job (starting in the next block) shouldn't run any tests, but - # just ignores tests because that don't work at all, or run in other jobs.' + # + # How this works: + # 1st block: Only passes --ignore parameters to pytest. + # → pytest will run all test_*.py files that are NOT ignored. + # Other blocks: Only pass paths to test files. + # → pytest will run only these files + # # Any test that was not ignored runs in the first job. - # A pre-commit hook (scripts/check_all_tests_are_covered.py) enforces that - # test run just once. - - # Because YAML doesn't allow comments in the blocks below, here they are.. - # 1st block: These tests are temporarily disabled, because they are _very_ broken - # 2nd block: The JAX tests run through their own workflow: jaxtests.yml - # 3nd & 4rd: These tests are covered by other matrix jobs - # 5th block: These tests PASS without a single XFAIL - # 6th block: These have some XFAILs + # A pre-commit hook (scripts/check_all_tests_are_covered.py) + # enforces that test run just once. - | --ignore=pymc3/tests/test_distributions_timeseries.py - --ignore=pymc3/tests/test_missing.py --ignore=pymc3/tests/test_mixture.py --ignore=pymc3/tests/test_model_graph.py --ignore=pymc3/tests/test_modelcontext.py --ignore=pymc3/tests/test_parallel_sampling.py --ignore=pymc3/tests/test_profile.py - --ignore=pymc3/tests/test_random.py - --ignore=pymc3/tests/test_shared.py --ignore=pymc3/tests/test_smc.py - --ignore=pymc3/tests/test_starting.py --ignore=pymc3/tests/test_step.py - --ignore=pymc3/tests/test_tracetab.py --ignore=pymc3/tests/test_tuning.py --ignore=pymc3/tests/test_types.py --ignore=pymc3/tests/test_variational_inference.py diff --git a/pymc3/tests/test_missing.py b/pymc3/tests/test_missing.py index b1a0b20c97..2a0c1ae35a 100644 --- a/pymc3/tests/test_missing.py +++ b/pymc3/tests/test_missing.py @@ -121,7 +121,12 @@ def test_interval_missing_observations(): assert {"theta1", "theta2"} <= set(prior_trace.keys()) - trace = sample(chains=1, draws=50, compute_convergence_checks=False) + trace = sample( + chains=1, + draws=50, + compute_convergence_checks=False, + return_inferencedata=False, + ) assert np.all(0 < trace["theta1_missing"].mean(0)) assert np.all(0 < trace["theta2_missing"].mean(0)) diff --git a/pymc3/tests/test_starting.py b/pymc3/tests/test_starting.py index 4d7c859163..42837571e8 100644 --- a/pymc3/tests/test_starting.py +++ b/pymc3/tests/test_starting.py @@ -13,8 +13,7 @@ # limitations under the License. import numpy as np - -from pytest import raises +import pytest from pymc3 import ( Beta, @@ -47,6 +46,7 @@ def test_accuracy_non_normal(): close_to(newstart["x"], mu, select_by_precision(float64=1e-5, float32=1e-4)) +@pytest.mark.xfail(reason="find_MAP fails with derivatives") def test_find_MAP_discrete(): tol = 2.0 ** -11 alpha = 4 @@ -68,12 +68,15 @@ def test_find_MAP_discrete(): assert map_est2["ss"] == 14 +@pytest.mark.xfail(reason="find_MAP fails with derivatives") def test_find_MAP_no_gradient(): _, model = simple_arbitrary_det() with model: find_MAP() +@pytest.mark.skip(reason="test is slow because it's failing") +@pytest.mark.xfail(reason="find_MAP fails with derivatives") def test_find_MAP(): tol = 2.0 ** -11 # 16 bit machine epsilon, a low bar data = np.random.randn(100) @@ -106,8 +109,8 @@ def test_find_MAP_issue_4488(): map_estimate = find_MAP() assert not set.difference({"x_missing", "x_missing_log__", "y"}, set(map_estimate.keys())) - assert np.isclose(map_estimate["x_missing"], 0.2) - np.testing.assert_array_equal(map_estimate["y"], [2.0, map_estimate["x_missing"][0] + 1]) + np.testing.assert_allclose(map_estimate["x_missing"], 0.2, rtol=1e-5, atol=1e-5) + np.testing.assert_allclose(map_estimate["y"], [2.0, map_estimate["x_missing"][0] + 1]) def test_allinmodel(): @@ -120,11 +123,16 @@ def test_allinmodel(): x2 = Normal("x2", mu=0, sigma=1) y2 = Normal("y2", mu=0, sigma=1) + x1 = model1.rvs_to_values[x1] + y1 = model1.rvs_to_values[y1] + x2 = model2.rvs_to_values[x2] + y2 = model2.rvs_to_values[y2] + starting.allinmodel([x1, y1], model1) starting.allinmodel([x1], model1) - with raises(ValueError, match=r"Some variables not in the model: \['x2', 'y2'\]"): + with pytest.raises(ValueError, match=r"Some variables not in the model: \['x2', 'y2'\]"): starting.allinmodel([x2, y2], model1) - with raises(ValueError, match=r"Some variables not in the model: \['x2'\]"): + with pytest.raises(ValueError, match=r"Some variables not in the model: \['x2'\]"): starting.allinmodel([x2, y1], model1) - with raises(ValueError, match=r"Some variables not in the model: \['x2'\]"): + with pytest.raises(ValueError, match=r"Some variables not in the model: \['x2'\]"): starting.allinmodel([x2], model1)