Skip to content

Use sigma instead of sd, remove deprecationwarning #4344

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Dec 26, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 0 additions & 14 deletions pymc3/distributions/continuous.py
Original file line number Diff line number Diff line change
Expand Up @@ -478,7 +478,6 @@ class Normal(Continuous):
def __init__(self, mu=0, sigma=None, tau=None, sd=None, **kwargs):
if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
self.sigma = self.sd = tt.as_tensor_variable(sigma)
self.tau = tt.as_tensor_variable(tau)
Expand Down Expand Up @@ -640,7 +639,6 @@ def __init__(
):
if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
self.sigma = self.sd = tt.as_tensor_variable(sigma)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we can leave self.sd too.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure, I can revert this, but wouldn't it be safer to have a single source of truth for the value of sigma?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree with your sentiment, but from user perspective, setting sd=x I would expect there to be a y.sd, and if we're keeping it around, which isn't the cleanest anyway, I don't see what we gain by changing this one thing in a subtle way.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

makes sense, thanks for explaining!

self.tau = tt.as_tensor_variable(tau)
Expand Down Expand Up @@ -835,7 +833,6 @@ class HalfNormal(PositiveContinuous):
def __init__(self, sigma=None, tau=None, sd=None, *args, **kwargs):
if sd is not None:
sigma = sd
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Instead of deleting, could you raise an error here and advice using sigma

The reason being that sd was a keyword for a REALLY long time so there are a lot of examples out there in the wild with this keyword.

warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)
super().__init__(*args, **kwargs)
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)

Expand Down Expand Up @@ -1218,7 +1215,6 @@ def __init__(self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, *
super().__init__(*args, **kwargs)
if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)
alpha, beta = self.get_alpha_beta(alpha, beta, mu, sigma)
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
self.beta = beta = tt.as_tensor_variable(floatX(beta))
Expand Down Expand Up @@ -1724,7 +1720,6 @@ def __init__(self, mu=0, sigma=None, tau=None, sd=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)

tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)

Expand Down Expand Up @@ -1884,11 +1879,9 @@ class StudentT(Continuous):
"""

def __init__(self, nu, mu=0, lam=None, sigma=None, sd=None, *args, **kwargs):
super().__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)
self.nu = nu = tt.as_tensor_variable(floatX(nu))
lam, sigma = get_tau_sigma(tau=lam, sigma=sigma)
self.lam = lam = tt.as_tensor_variable(lam)
Expand Down Expand Up @@ -2397,7 +2390,6 @@ def __init__(self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, *
super().__init__(*args, **kwargs)
if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)

alpha, beta = self.get_alpha_beta(alpha, beta, mu, sigma)
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
Expand Down Expand Up @@ -2545,7 +2537,6 @@ def __init__(self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, *

if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)

alpha, beta = InverseGamma._get_alpha_beta(alpha, beta, mu, sigma)
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
Expand Down Expand Up @@ -2902,7 +2893,6 @@ def __init__(self, nu=1, sigma=None, lam=None, sd=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)

self.mode = tt.as_tensor_variable(0)
lam, sigma = get_tau_sigma(lam, sigma)
Expand Down Expand Up @@ -3041,7 +3031,6 @@ def __init__(self, mu=0.0, sigma=None, nu=None, sd=None, *args, **kwargs):

if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)

self.mu = mu = tt.as_tensor_variable(floatX(mu))
self.sigma = self.sd = sigma = tt.as_tensor_variable(floatX(sigma))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can remove self.sd as well.

Expand Down Expand Up @@ -3317,7 +3306,6 @@ def __init__(self, mu=0.0, sigma=None, tau=None, alpha=1, sd=None, *args, **kwar

if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)

tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
self.mu = mu = tt.as_tensor_variable(floatX(mu))
Expand Down Expand Up @@ -3721,7 +3709,6 @@ def __init__(self, nu=None, sigma=None, b=None, sd=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)

nu, b, sigma = self.get_nu_b(nu, b, sigma)
self.nu = nu = tt.as_tensor_variable(floatX(nu))
Expand Down Expand Up @@ -3994,7 +3981,6 @@ class LogitNormal(UnitContinuous):
def __init__(self, mu=0, sigma=None, tau=None, sd=None, **kwargs):
if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)
self.mu = mu = tt.as_tensor_variable(floatX(mu))
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
self.sigma = self.sd = tt.as_tensor_variable(sigma)
Expand Down
3 changes: 0 additions & 3 deletions pymc3/distributions/mixture.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import warnings

from collections.abc import Iterable

import numpy as np
Expand Down Expand Up @@ -632,7 +630,6 @@ class NormalMixture(Mixture):
def __init__(self, w, mu, sigma=None, tau=None, sd=None, comp_shape=(), *args, **kwargs):
if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)
_, sigma = get_tau_sigma(tau=tau, sigma=sigma)

self.mu = mu = tt.as_tensor_variable(mu)
Expand Down
4 changes: 0 additions & 4 deletions pymc3/distributions/timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import warnings

import numpy as np
import theano.tensor as tt

Expand Down Expand Up @@ -116,7 +114,6 @@ def __init__(
super().__init__(*args, **kwargs)
if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)

tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
self.sigma = self.sd = tt.as_tensor_variable(sigma)
Expand Down Expand Up @@ -211,7 +208,6 @@ def __init__(self, tau=None, init=Flat.dist(), sigma=None, mu=0.0, sd=None, *arg
raise TypeError("GaussianRandomWalk must be supplied a non-zero shape argument!")
if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
self.tau = tt.as_tensor_variable(tau)
sigma = tt.as_tensor_variable(sigma)
Expand Down
8 changes: 4 additions & 4 deletions pymc3/tests/test_glm.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,9 @@ def setup_class(cls):
super().setup_class()
cls.intercept = 1
cls.slope = 3
cls.sd = 0.05
cls.sigma = 0.05
x_linear, cls.y_linear = generate_data(cls.intercept, cls.slope, size=1000)
cls.y_linear += np.random.normal(size=1000, scale=cls.sd)
cls.y_linear += np.random.normal(size=1000, scale=cls.sigma)
cls.data_linear = pd.DataFrame(dict(x=x_linear, y=cls.y_linear))

x_logistic, y_logistic = generate_data(cls.intercept, cls.slope, size=3000)
Expand All @@ -73,7 +73,7 @@ def test_linear_component(self):

assert round(abs(np.mean(trace["Intercept"]) - self.intercept), 1) == 0
assert round(abs(np.mean(trace["x"]) - self.slope), 1) == 0
assert round(abs(np.mean(trace["sigma"]) - self.sd), 1) == 0
assert round(abs(np.mean(trace["sigma"]) - self.sigma), 1) == 0

def test_glm(self):
with Model() as model:
Expand All @@ -83,7 +83,7 @@ def test_glm(self):

assert round(abs(np.mean(trace["Intercept"]) - self.intercept), 1) == 0
assert round(abs(np.mean(trace["x"]) - self.slope), 1) == 0
assert round(abs(np.mean(trace["sd"]) - self.sd), 1) == 0
assert round(abs(np.mean(trace["sd"]) - self.sigma), 1) == 0

def test_glm_offset(self):
offset = 1.0
Expand Down
12 changes: 6 additions & 6 deletions pymc3/tests/test_models_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,9 @@ def setup_class(cls):
super().setup_class()
cls.intercept = 1
cls.slope = 3
cls.sd = 0.05
cls.sigma = 0.05
x_linear, cls.y_linear = generate_data(cls.intercept, cls.slope, size=1000)
cls.y_linear += np.random.normal(size=1000, scale=cls.sd)
cls.y_linear += np.random.normal(size=1000, scale=cls.sigma)
cls.data_linear = dict(x=x_linear, y=cls.y_linear)

x_logistic, y_logistic = generate_data(cls.intercept, cls.slope, size=3000)
Expand All @@ -59,7 +59,7 @@ def test_linear_component(self):

assert round(abs(np.mean(trace["lm_Intercept"]) - self.intercept), 1) == 0
assert round(abs(np.mean(trace["lm_x0"]) - self.slope), 1) == 0
assert round(abs(np.mean(trace["sigma"]) - self.sd), 1) == 0
assert round(abs(np.mean(trace["sigma"]) - self.sigma), 1) == 0
assert vars_to_create == set(model.named_vars.keys())

def test_linear_component_from_formula(self):
Expand All @@ -75,7 +75,7 @@ def test_linear_component_from_formula(self):

assert round(abs(np.mean(trace["Intercept"]) - self.intercept), 1) == 0
assert round(abs(np.mean(trace["x"]) - self.slope), 1) == 0
assert round(abs(np.mean(trace["sigma"]) - self.sd), 1) == 0
assert round(abs(np.mean(trace["sigma"]) - self.sigma), 1) == 0

def test_glm(self):
with Model() as model:
Expand All @@ -88,7 +88,7 @@ def test_glm(self):
)
assert round(abs(np.mean(trace["glm_Intercept"]) - self.intercept), 1) == 0
assert round(abs(np.mean(trace["glm_x0"]) - self.slope), 1) == 0
assert round(abs(np.mean(trace["glm_sd"]) - self.sd), 1) == 0
assert round(abs(np.mean(trace["glm_sd"]) - self.sigma), 1) == 0
assert vars_to_create == set(model.named_vars.keys())

def test_glm_from_formula(self):
Expand All @@ -103,7 +103,7 @@ def test_glm_from_formula(self):

assert round(abs(np.mean(trace["%s_Intercept" % NAME]) - self.intercept), 1) == 0
assert round(abs(np.mean(trace["%s_x" % NAME]) - self.slope), 1) == 0
assert round(abs(np.mean(trace["%s_sd" % NAME]) - self.sd), 1) == 0
assert round(abs(np.mean(trace["%s_sd" % NAME]) - self.sigma), 1) == 0

def test_strange_types(self):
with Model():
Expand Down