diff --git a/pymc/distributions/continuous.py b/pymc/distributions/continuous.py index eb1fce02c1..57fbd35b73 100644 --- a/pymc/distributions/continuous.py +++ b/pymc/distributions/continuous.py @@ -3653,12 +3653,23 @@ def _interpolated_argcdf(p, pdf, cdf, x): index = np.searchsorted(cdf, p) - 1 slope = (pdf[index + 1] - pdf[index]) / (x[index + 1] - x[index]) - return x[index] + np.where( - np.abs(slope) <= 1e-8, - np.where(np.abs(pdf[index]) <= 1e-8, np.zeros(index.shape), (p - cdf[index]) / pdf[index]), - (-pdf[index] + np.sqrt(pdf[index] ** 2 + 2 * slope * (p - cdf[index]))) / slope, + # First term (constant) of the Taylor expansion around slope = 0 + small_slopes = np.where( + np.abs(pdf[index]) <= 1e-8, np.zeros(index.shape), (p - cdf[index]) / pdf[index] ) + # This warning happens when we divide by slope = 0: we can ignore it + # because the other result will be returned + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", ".*invalid value encountered in true_divide.*", RuntimeWarning + ) + large_slopes = ( + -pdf[index] + np.sqrt(pdf[index] ** 2 + 2 * slope * (p - cdf[index])) + ) / slope + + return x[index] + np.where(np.abs(slope) <= 1e-8, small_slopes, large_slopes) + class InterpolatedRV(RandomVariable): name = "interpolated" diff --git a/pymc/distributions/logprob.py b/pymc/distributions/logprob.py index 588450560b..a75d2b973d 100644 --- a/pymc/distributions/logprob.py +++ b/pymc/distributions/logprob.py @@ -187,7 +187,7 @@ def joint_logp( # Else we assume we were given a single rv and respective value elif not isinstance(rv_values, Mapping): if len(var) == 1: - rv_values = {var[0]: at.as_tensor_variable(rv_values).astype(var[0].type)} + rv_values = {var[0]: at.as_tensor_variable(rv_values).astype(var[0].dtype)} else: raise ValueError("rv_values must be a dict if more than one var is requested") diff --git a/pymc/tests/test_distributions.py b/pymc/tests/test_distributions.py index 50b9c3c90d..ea3345b2ec 100644 --- a/pymc/tests/test_distributions.py +++ b/pymc/tests/test_distributions.py @@ -2077,12 +2077,13 @@ def test_mvt(self, n): @pytest.mark.parametrize("n", [2, 3]) def test_wishart(self, n): - check_logp( - Wishart, - PdMatrix(n), - {"nu": Domain([0, 3, 4, np.inf], "int64"), "V": PdMatrix(n)}, - lambda value, nu, V: scipy.stats.wishart.logpdf(value, int(nu), V), - ) + with pytest.warns(UserWarning, match="Wishart distribution can currently not be used"): + check_logp( + Wishart, + PdMatrix(n), + {"nu": Domain([0, 3, 4, np.inf], "int64"), "V": PdMatrix(n)}, + lambda value, nu, V: scipy.stats.wishart.logpdf(value, int(nu), V), + ) @pytest.mark.parametrize("x,eta,n,lp", LKJ_CASES) def test_lkjcorr(self, x, eta, n, lp): diff --git a/pymc/tests/test_distributions_random.py b/pymc/tests/test_distributions_random.py index e1aee47f88..07cc4f6893 100644 --- a/pymc/tests/test_distributions_random.py +++ b/pymc/tests/test_distributions_random.py @@ -230,7 +230,11 @@ class BaseTestDistributionRandom(SeededTest): def test_distribution(self): self.validate_tests_list() - self._instantiate_pymc_rv() + if self.pymc_dist == pm.Wishart: + with pytest.warns(UserWarning, match="can currently not be used for MCMC sampling"): + self._instantiate_pymc_rv() + else: + self._instantiate_pymc_rv() if self.reference_dist is not None: self.reference_dist_draws = self.reference_dist()( size=self.size, **self.reference_dist_params @@ -240,7 +244,11 @@ def test_distribution(self): raise ValueError( "Custom check cannot start with `test_` or else it will be executed twice." ) - getattr(self, check_name)() + if self.pymc_dist == pm.Wishart and check_name.startswith("check_rv_size"): + with pytest.warns(UserWarning, match="can currently not be used for MCMC sampling"): + getattr(self, check_name)() + else: + getattr(self, check_name)() def _instantiate_pymc_rv(self, dist_params=None): params = dist_params if dist_params else self.pymc_dist_params diff --git a/pymc/tests/test_distributions_timeseries.py b/pymc/tests/test_distributions_timeseries.py index 4be518fa97..b1697f5ebf 100644 --- a/pymc/tests/test_distributions_timeseries.py +++ b/pymc/tests/test_distributions_timeseries.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import warnings + import aesara import numpy as np import pytest @@ -125,7 +127,7 @@ def check_rv_inferred_size(self): def test_steps_scalar_check(self): with pytest.raises(ValueError, match="steps must be an integer scalar"): - self.pymc_dist.dist(steps=[1]) + self.pymc_dist.dist(steps=[1], init_dist=pm.DiracDelta.dist(0)) def test_gaussianrandomwalk_inference(self): mu, sigma, steps = 2, 1, 1000 @@ -136,7 +138,9 @@ def test_gaussianrandomwalk_inference(self): _sigma = pm.Uniform("sigma", 0, 10) obs_data = pm.MutableData("obs_data", obs) - grw = GaussianRandomWalk("grw", _mu, _sigma, steps=steps, observed=obs_data) + grw = GaussianRandomWalk( + "grw", _mu, _sigma, steps=steps, observed=obs_data, init_dist=Normal.dist(0, 100) + ) trace = pm.sample(chains=1) @@ -147,26 +151,30 @@ def test_gaussianrandomwalk_inference(self): @pytest.mark.parametrize("init", [None, pm.Normal.dist()]) def test_gaussian_random_walk_init_dist_shape(self, init): """Test that init_dist is properly resized""" - grw = pm.GaussianRandomWalk.dist(mu=0, sigma=1, steps=1, init_dist=init) - assert tuple(grw.owner.inputs[-2].shape.eval()) == () + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Initial distribution not specified.*", UserWarning) + grw = pm.GaussianRandomWalk.dist(mu=0, sigma=1, steps=1, init_dist=init) + assert tuple(grw.owner.inputs[-2].shape.eval()) == () - grw = pm.GaussianRandomWalk.dist(mu=0, sigma=1, steps=1, init_dist=init, size=(5,)) - assert tuple(grw.owner.inputs[-2].shape.eval()) == (5,) + grw = pm.GaussianRandomWalk.dist(mu=0, sigma=1, steps=1, init_dist=init, size=(5,)) + assert tuple(grw.owner.inputs[-2].shape.eval()) == (5,) - grw = pm.GaussianRandomWalk.dist(mu=0, sigma=1, steps=1, init_dist=init, shape=2) - assert tuple(grw.owner.inputs[-2].shape.eval()) == () + grw = pm.GaussianRandomWalk.dist(mu=0, sigma=1, steps=1, init_dist=init, shape=2) + assert tuple(grw.owner.inputs[-2].shape.eval()) == () - grw = pm.GaussianRandomWalk.dist(mu=0, sigma=1, steps=1, init_dist=init, shape=(5, 2)) - assert tuple(grw.owner.inputs[-2].shape.eval()) == (5,) + grw = pm.GaussianRandomWalk.dist(mu=0, sigma=1, steps=1, init_dist=init, shape=(5, 2)) + assert tuple(grw.owner.inputs[-2].shape.eval()) == (5,) - grw = pm.GaussianRandomWalk.dist(mu=[0, 0], sigma=1, steps=1, init_dist=init) - assert tuple(grw.owner.inputs[-2].shape.eval()) == (2,) + grw = pm.GaussianRandomWalk.dist(mu=[0, 0], sigma=1, steps=1, init_dist=init) + assert tuple(grw.owner.inputs[-2].shape.eval()) == (2,) - grw = pm.GaussianRandomWalk.dist(mu=0, sigma=[1, 1], steps=1, init_dist=init) - assert tuple(grw.owner.inputs[-2].shape.eval()) == (2,) + grw = pm.GaussianRandomWalk.dist(mu=0, sigma=[1, 1], steps=1, init_dist=init) + assert tuple(grw.owner.inputs[-2].shape.eval()) == (2,) - grw = pm.GaussianRandomWalk.dist(mu=np.zeros((3, 1)), sigma=[1, 1], steps=1, init_dist=init) - assert tuple(grw.owner.inputs[-2].shape.eval()) == (3, 2) + grw = pm.GaussianRandomWalk.dist( + mu=np.zeros((3, 1)), sigma=[1, 1], steps=1, init_dist=init + ) + assert tuple(grw.owner.inputs[-2].shape.eval()) == (3, 2) def test_shape_ellipsis(self): grw = pm.GaussianRandomWalk.dist( @@ -184,28 +192,28 @@ def test_gaussianrandomwalk_broadcasted_by_init_dist(self): @pytest.mark.parametrize("shape", ((6,), (3, 6))) def test_inferred_steps_from_shape(self, shape): - x = GaussianRandomWalk.dist(shape=shape) + x = GaussianRandomWalk.dist(shape=shape, init_dist=Normal.dist(0, 100)) steps = x.owner.inputs[-1] assert steps.eval() == 5 @pytest.mark.parametrize("shape", (None, (5, ...))) def test_missing_steps(self, shape): with pytest.raises(ValueError, match="Must specify steps or shape parameter"): - GaussianRandomWalk.dist(shape=shape) + GaussianRandomWalk.dist(shape=shape, init_dist=Normal.dist(0, 100)) def test_inconsistent_steps_and_shape(self): with pytest.raises(AssertionError, match="Steps do not match last shape dimension"): - x = GaussianRandomWalk.dist(steps=12, shape=45) + x = GaussianRandomWalk.dist(steps=12, shape=45, init_dist=Normal.dist(0, 100)) def test_inferred_steps_from_dims(self): with pm.Model(coords={"batch": range(5), "steps": range(20)}): - x = GaussianRandomWalk("x", dims=("batch", "steps")) + x = GaussianRandomWalk("x", dims=("batch", "steps"), init_dist=Normal.dist(0, 100)) steps = x.owner.inputs[-1] assert steps.eval() == 19 def test_inferred_steps_from_observed(self): with pm.Model(): - x = GaussianRandomWalk("x", observed=np.zeros(10)) + x = GaussianRandomWalk("x", observed=np.zeros(10), init_dist=Normal.dist(0, 100)) steps = x.owner.inputs[-1] assert steps.eval() == 9 diff --git a/pymc/tests/test_mixture.py b/pymc/tests/test_mixture.py index 4cdf6ed1db..7473efb362 100644 --- a/pymc/tests/test_mixture.py +++ b/pymc/tests/test_mixture.py @@ -337,9 +337,9 @@ def test_component_choice_random(self): @pytest.mark.parametrize( "comp_dists", ( - [Normal.dist(size=(2,))], + Normal.dist(size=(2,)), [Normal.dist(), Normal.dist()], - [MvNormal.dist(np.ones(3), np.eye(3), size=(2,))], + MvNormal.dist(np.ones(3), np.eye(3), size=(2,)), [ MvNormal.dist(np.ones(3), np.eye(3)), MvNormal.dist(np.ones(3), np.eye(3)), @@ -348,7 +348,10 @@ def test_component_choice_random(self): ) def test_components_expanded_by_weights(self, comp_dists): """Test that components are expanded when size or weights are larger than components""" - univariate = comp_dists[0].owner.op.ndim_supp == 0 + if isinstance(comp_dists, list): + univariate = comp_dists[0].owner.op.ndim_supp == 0 + else: + univariate = comp_dists.owner.op.ndim_supp == 0 mix = Mixture.dist( w=Dirichlet.dist([1, 1], shape=(3, 2)), @@ -371,9 +374,9 @@ def test_components_expanded_by_weights(self, comp_dists): @pytest.mark.parametrize( "comp_dists", ( - [Normal.dist(size=(2,))], + Normal.dist(size=(2,)), [Normal.dist(), Normal.dist()], - [MvNormal.dist(np.ones(3), np.eye(3), size=(2,))], + MvNormal.dist(np.ones(3), np.eye(3), size=(2,)), [ MvNormal.dist(np.ones(3), np.eye(3)), MvNormal.dist(np.ones(3), np.eye(3)), @@ -382,7 +385,10 @@ def test_components_expanded_by_weights(self, comp_dists): ) @pytest.mark.parametrize("expand", (False, True)) def test_change_size(self, comp_dists, expand): - univariate = comp_dists[0].owner.op.ndim_supp == 0 + if isinstance(comp_dists, list): + univariate = comp_dists[0].owner.op.ndim_supp == 0 + else: + univariate = comp_dists.owner.op.ndim_supp == 0 mix = Mixture.dist(w=Dirichlet.dist([1, 1]), comp_dists=comp_dists) mix = Mixture.change_size(mix, new_size=(4,), expand=expand) @@ -444,6 +450,7 @@ def test_single_poisson_sampling(self): step = Metropolis() with warnings.catch_warnings(): warnings.filterwarnings("ignore", "More chains .* than draws.*", UserWarning) + warnings.filterwarnings("ignore", "overflow encountered in exp", RuntimeWarning) trace = sample( 5000, step, @@ -467,6 +474,7 @@ def test_list_poissons_sampling(self): Mixture("x_obs", w, [Poisson.dist(mu[0]), Poisson.dist(mu[1])], observed=pois_x) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "More chains .* than draws.*", UserWarning) + warnings.filterwarnings("ignore", "overflow encountered in exp", RuntimeWarning) trace = sample( 5000, chains=1, @@ -497,6 +505,7 @@ def test_list_normals_sampling(self): ) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "More chains .* than draws.*", UserWarning) + warnings.filterwarnings("ignore", "overflow encountered in exp", RuntimeWarning) trace = sample( 5000, chains=1, @@ -755,6 +764,7 @@ def test_normal_mixture_sampling(self): step = Metropolis() with warnings.catch_warnings(): warnings.filterwarnings("ignore", "More chains .* than draws.*", UserWarning) + warnings.filterwarnings("ignore", "overflow encountered in exp", RuntimeWarning) trace = sample( 5000, step, @@ -989,7 +999,8 @@ def test_with_multinomial(self, batch_shape): w = np.ones(self.mixture_comps) / self.mixture_comps mixture_axis = len(batch_shape) with Model() as model: - comp_dists = Multinomial.dist(p=p, n=n, shape=(*batch_shape, self.mixture_comps, 3)) + with pytest.warns(UserWarning, match="parameters sum up to"): + comp_dists = Multinomial.dist(p=p, n=n, shape=(*batch_shape, self.mixture_comps, 3)) mixture = Mixture( "mixture", w=w, diff --git a/pymc/tests/test_model.py b/pymc/tests/test_model.py index 07ca8a1f9b..6a74b88d8d 100644 --- a/pymc/tests/test_model.py +++ b/pymc/tests/test_model.py @@ -14,8 +14,6 @@ import unittest import warnings -from functools import reduce - import aesara import aesara.sparse as sparse import aesara.tensor as at @@ -354,7 +352,8 @@ def test_missing_data(self): res = [gf(DictToArrayBijection.map(Point(pnt, model=m))) for i in range(5)] - assert reduce(lambda x, y: np.array_equal(x, y) and y, res) is not False + # Assert that all the elements of res are equal + assert res[1:] == res[:-1] def test_aesara_switch_broadcast_edge_cases_1(self): # Tests against two subtle issues related to a previous bug in Theano diff --git a/pymc/tests/test_ode.py b/pymc/tests/test_ode.py index a722b5eb2f..96bbf22ee7 100644 --- a/pymc/tests/test_ode.py +++ b/pymc/tests/test_ode.py @@ -366,6 +366,9 @@ def system(y, t, p): with aesara.config.change_flags(mode=fast_unstable_sampling_mode): with warnings.catch_warnings(): warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + warnings.filterwarnings( + "ignore", "invalid value encountered in log", RuntimeWarning + ) idata = pm.sample(50, tune=0, chains=1) assert idata.posterior["alpha"].shape == (1, 50) @@ -399,6 +402,9 @@ def system(y, t, p): with aesara.config.change_flags(mode=fast_unstable_sampling_mode): with warnings.catch_warnings(): warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + warnings.filterwarnings( + "ignore", "invalid value encountered in log", RuntimeWarning + ) idata = pm.sample(50, tune=0, chains=1) assert idata.posterior["alpha"].shape == (1, 50) @@ -443,6 +449,9 @@ def system(y, t, p): with aesara.config.change_flags(mode=fast_unstable_sampling_mode): with warnings.catch_warnings(): warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + warnings.filterwarnings( + "ignore", "invalid value encountered in log", RuntimeWarning + ) idata = pm.sample(50, tune=0, chains=1) assert idata.posterior["R"].shape == (1, 50) @@ -486,6 +495,9 @@ def system(y, t, p): with aesara.config.change_flags(mode=fast_unstable_sampling_mode): with warnings.catch_warnings(): warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + warnings.filterwarnings( + "ignore", "invalid value encountered in log", RuntimeWarning + ) idata = pm.sample(50, tune=0, chains=1) assert idata.posterior["beta"].shape == (1, 50) diff --git a/pymc/tests/test_shape_handling.py b/pymc/tests/test_shape_handling.py index 9714a45492..3304c83974 100644 --- a/pymc/tests/test_shape_handling.py +++ b/pymc/tests/test_shape_handling.py @@ -110,10 +110,14 @@ class TestShapesBroadcasting: ids=str, ) def test_type_check_raises(self, bad_input): - with pytest.raises(TypeError): - shapes_broadcasting(bad_input, tuple(), raise_exception=True) - with pytest.raises(TypeError): - shapes_broadcasting(bad_input, tuple(), raise_exception=False) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", ".*ragged nested sequences.*", np.VisibleDeprecationWarning + ) + with pytest.raises(TypeError): + shapes_broadcasting(bad_input, tuple(), raise_exception=True) + with pytest.raises(TypeError): + shapes_broadcasting(bad_input, tuple(), raise_exception=False) def test_type_check_success(self): inputs = [3, 3.0, tuple(), [3], (3,), np.array(3), np.array([3])]