@@ -14,7 +14,7 @@ kernelspec:
14
14
(GLM-truncated-censored-regression)=
15
15
# Bayesian regression with truncated or censored data
16
16
17
- :::{post} January , 2022
17
+ :::{post} September , 2022
18
18
:tags: censored, censoring, generalized linear model, regression, truncated, truncation
19
19
:category: beginner
20
20
:author: Benjamin T. Vincent
@@ -126,7 +126,7 @@ def linear_regression(x, y):
126
126
with pm.Model() as model:
127
127
slope = pm.Normal("slope", mu=0, sigma=1)
128
128
intercept = pm.Normal("intercept", mu=0, sigma=1)
129
- σ = pm.HalfNormal("σ", sd =1)
129
+ σ = pm.HalfNormal("σ", sigma =1)
130
130
pm.Normal("obs", mu=slope * x + intercept, sigma=σ, observed=y)
131
131
132
132
return model
@@ -163,7 +163,7 @@ az.plot_posterior(trunc_linear_fit, var_names=["slope"], ref_val=slope, ax=ax[0]
163
163
ax[0].set(title="Linear regression\n(truncated data)", xlabel="slope")
164
164
165
165
az.plot_posterior(cens_linear_fit, var_names=["slope"], ref_val=slope, ax=ax[1])
166
- ax[1].set(title="Linear regression\n(censored data)", xlabel="slope")
166
+ ax[1].set(title="Linear regression\n(censored data)", xlabel="slope");
167
167
```
168
168
169
169
To appreciate the extent of the problem (for this dataset) we can visualise the posterior predictive fits alongside the data.
@@ -217,15 +217,8 @@ def truncated_regression(x, y, bounds):
217
217
slope = pm.Normal("slope", mu=0, sigma=1)
218
218
intercept = pm.Normal("intercept", mu=0, sigma=1)
219
219
σ = pm.HalfNormal("σ", sigma=1)
220
-
221
- pm.TruncatedNormal(
222
- "obs",
223
- mu=slope * x + intercept,
224
- sigma=σ,
225
- observed=y,
226
- lower=bounds[0],
227
- upper=bounds[1],
228
- )
220
+ normal_dist = pm.Normal.dist(mu=slope * x + intercept, sigma=σ)
221
+ pm.Truncated("obs", normal_dist, lower=bounds[0], upper=bounds[1], observed=y)
229
222
return model
230
223
```
231
224
@@ -260,7 +253,7 @@ def censored_regression(x, y, bounds):
260
253
with pm.Model() as model:
261
254
slope = pm.Normal("slope", mu=0, sigma=1)
262
255
intercept = pm.Normal("intercept", mu=0, sigma=1)
263
- σ = pm.HalfNormal("σ", sd =1)
256
+ σ = pm.HalfNormal("σ", sigma =1)
264
257
y_latent = pm.Normal.dist(mu=slope * x + intercept, sigma=σ)
265
258
obs = pm.Censored("obs", y_latent, lower=bounds[0], upper=bounds[1], observed=y)
266
259
@@ -284,8 +277,8 @@ with pm.Model() as m:
284
277
with pm.Model() as m_censored:
285
278
pm.Censored("y", pm.Normal.dist(0, 2), lower=-1.0, upper=None)
286
279
287
- logp_fn = m.logp
288
- logp_censored_fn = m_censored.logp
280
+ logp_fn = m.compile_logp()
281
+ logp_censored_fn = m_censored.compile_logp()
289
282
290
283
xi = np.hstack((np.linspace(-6, -1.01), [-1.0], np.linspace(-0.99, 6)))
291
284
@@ -368,6 +361,7 @@ When looking into this topic, I found that most of the material out there focuse
368
361
## Authors
369
362
* Authored by [ Benjamin T. Vincent] ( https://github.com/drbenvincent ) in May 2021
370
363
* Updated by [ Benjamin T. Vincent] ( https://github.com/drbenvincent ) in January 2022
364
+ * Updated by [ Benjamin T. Vincent] ( https://github.com/drbenvincent ) in September 2022
371
365
372
366
+++
373
367
0 commit comments