library(lmeInfo)
library(nlme)
library(lme4)
Loading required package: Matrix
Attaching package: ‘lme4’
The following object is masked from ‘package:nlme’:
lmList
library(eefAnalytics)
Registered S3 methods overwritten by 'htmltools':
method from
print.html tools:rstudio
print.shiny.tag tools:rstudio
print.shiny.tag.list tools:rstudio
Registered S3 method overwritten by 'htmlwidgets':
method from
print.htmlwidget tools:rstudio
library(tictoc)
library(beepr)
library(brms)
Loading required package: Rcpp
Loading 'brms' package (version 2.21.0). Useful instructions
can be found by typing help('brms'). A more detailed introduction
to the package is available through vignette('brms_overview').
Attaching package: ‘brms’
The following object is masked from ‘package:lme4’:
ngrps
The following object is masked from ‘package:stats’:
ar
library(tidybayes)
Attaching package: ‘tidybayes’
The following objects are masked from ‘package:brms’:
dstudent_t, pstudent_t, qstudent_t, rstudent_t
library(tidyverse)
── Attaching core tidyverse packages ─────────────────────────────────────────── tidyverse 2.0.0 ──
✔ dplyr 1.1.4 ✔ readr 2.1.5
✔ forcats 1.0.0 ✔ stringr 1.5.1
✔ ggplot2 3.5.1 ✔ tibble 3.2.1
✔ lubridate 1.9.3 ✔ tidyr 1.3.1
✔ purrr 1.0.2
── Conflicts ───────────────────────────────────────────────────────────── tidyverse_conflicts() ──
✖ dplyr::collapse() masks nlme::collapse()
✖ tidyr::expand() masks Matrix::expand()
✖ dplyr::filter() masks stats::filter()
✖ dplyr::lag() masks stats::lag()
✖ tidyr::pack() masks Matrix::pack()
✖ tidyr::unpack() masks Matrix::unpack()
ℹ Use the ]8;;http://conflicted.r-lib.org/conflicted package]8;; to force all conflicts to become errors
all_results <- list()
# vec_res order is estimate, lower, and upper 95% interval
add_result <- function(name, vec_res) {
stopifnot(is.vector(vec_res))
stopifnot(length(vec_res) == 3)
stopifnot(vec_res[1] >= vec_res[2])
stopifnot(vec_res[1] <= vec_res[3])
stopifnot(vec_res[2] <= vec_res[3])
res <- tibble(
source = name,
est = vec_res[1],
lower95 = vec_res[2],
upper95 = vec_res[3]
)
all_results <<- append(all_results, list(res))
}
clear_results <- function() {
all_results <<- list()
}
tibble_results <- function() {
bind_rows(all_results)
}
output1 <- crtFREQ(
Posttest ~ Intervention + Prettest,
random = "School",
intervention = "Intervention",
data = crtData
)
Computing profile confidence intervals ...
summary(output1)
method: MLM
Design: crtFREQ
Estimate 95% LB 95% UB
Intercept 11.23 9.05 13.42
Intervention1 3.11 0.75 5.47
Prettest 1.79 1.39 2.18
Result for: Conditional effect size
$Intervention1
Estimate 95% LB 95% UB
Within 0.81 0.10 1.52
Total 0.67 0.07 1.28
Result for: Unconditional effect size
$Intervention1
Estimate 95% LB 95% UB
Within 0.7 0.04 1.37
Total 0.6 0.02 1.17
output1$ES$Intervention1[2,]
output1$Unconditional$ES$Intervention1[2,]
add_result("eefAnalytics total conditional",
output1$ES$Intervention1[2, ] |> as.numeric())
add_result(
"eefAnalytics total unconditional",
output1$Unconditional$ES$Intervention1[2, ] |> as.numeric()
)
tibble_results()
test_mod <- lmer(Posttest ~ Intervention + Prettest +
(1|School), data = crtData)
test_mod |> summary()
Linear mixed model fit by REML ['lmerMod']
Formula: Posttest ~ Intervention + Prettest + (1 | School)
Data: crtData
REML criterion at convergence: 1493.8
Scaled residuals:
Min 1Q Median 3Q Max
-2.46249 -0.64166 0.01626 0.59655 2.60277
Random effects:
Groups Name Variance Std.Dev.
School (Intercept) 5.674 2.382
Residual 14.779 3.844
Number of obs: 265, groups: School, 22
Fixed effects:
Estimate Std. Error t value
(Intercept) 11.2286 1.1250 9.981
Intervention 3.1097 1.2094 2.571
Prettest 1.7889 0.2004 8.928
Correlation of Fixed Effects:
(Intr) Intrvn
Interventin -0.493
Prettest -0.681 -0.010
test_mod_0 <- lmer(Posttest ~ Intervention + (1|School), data = crtData)
test_mod_0
Linear mixed model fit by REML ['lmerMod']
Formula: Posttest ~ Intervention + (1 | School)
Data: crtData
REML criterion at convergence: 1561.157
Random effects:
Groups Name Std.Dev.
School (Intercept) 2.127
Residual 4.429
Number of obs: 265, groups: School, 22
Fixed Effects:
(Intercept) Intervention
18.150 3.181
vars <- VarCorr(test_mod_0) |> as.data.frame()
the_sd <- vars$vcov |> sum() |> sqrt()
the_sd
[1] 4.913204
vars
add_result("Naively divide CI by SD",
c(fixef(test_mod)["Intervention"], confint(test_mod)["Intervention",]) |> as.numeric() / the_sd)
Computing profile confidence intervals ...
tibble_results()
lme_num <- lme(
fixed = Posttest ~ Intervention + Prettest,
random = ~ 1 | School,
data = crtData,
method = "REML"
)
lme_den <- lme(
fixed = Posttest ~ Intervention,
random = ~ 1 | School,
data = crtData,
method = "REML"
)
summary(lme_num)
Linear mixed-effects model fit by REML
Data: crtData
AIC BIC logLik
1503.811 1521.653 -746.9057
Random effects:
Formula: ~1 | School
(Intercept) Residual
StdDev: 2.381959 3.844398
Fixed effects: Posttest ~ Intervention + Prettest
Value Std.Error DF t-value p-value
(Intercept) 11.228610 1.125000 242 9.980989 0.0000
Intervention 3.109709 1.209383 20 2.571318 0.0182
Prettest 1.788931 0.200381 242 8.927649 0.0000
Correlation:
(Intr) Intrvn
Intervention -0.493
Prettest -0.681 -0.010
Standardized Within-Group Residuals:
Min Q1 Med Q3 Max
-2.46249340 -0.64165752 0.01625598 0.59654661 2.60277048
Number of Observations: 265
Number of Groups: 22
the_g <- g_mlm(
lme_num,
p_const = c(0,1,0),
mod_denom = lme_den,
r_const = c(1,1),
infotype = "expected",
separate_variances = FALSE
)
the_g
est se
unadjusted effect size 0.633 0.250
adjusted effect size 0.630 0.249
degree of freedom 158.355
Without the df adjustment:
unadj_SE <- sqrt(the_g$SE_g_AB^2 / the_g$J_nu^2)
the_g$delta_AB
[1] 0.6329288
c(the_g$delta_AB - 1.96 * unadj_SE,
the_g$delta_AB + 1.96 * unadj_SE)
[1] 0.1423461 1.1235116
“Manual” df adjustment:
the_g$J_nu * the_g$delta_AB
[1] 0.6299264
From the model:
the_g$g_AB
[1] 0.6299264
c(the_g$g_AB - 1.96 * the_g$SE_g_AB,
the_g$g_AB + 1.96 * the_g$SE_g_AB)
[1] 0.1416708 1.1181820
add_result("lmeInfo Hedges df adjusted", c(the_g$g_AB,
the_g$g_AB - 1.96 * the_g$SE_g_AB,
the_g$g_AB + 1.96 * the_g$SE_g_AB))
add_result("lmeInfo unadjusted", c(the_g$delta_AB,
the_g$delta_AB - 1.96 * unadj_SE,
the_g$delta_AB + 1.96 * unadj_SE))
tibble_results()
my_g <- fixef(test_mod)["Intervention"] / the_sd
my_g
Intervention
0.6329289
b_var <- vcov(test_mod)["Intervention", "Intervention"]
b_var
[1] 1.462606
library(merDeriv)
Loading required package: nonnest2
This is nonnest2 0.5-7.
nonnest2 has not been tested with all combinations of supported model classes.
Loading required package: sandwich
Loading required package: lavaan
This is lavaan 0.6-18
lavaan is FREE software! Please report any bugs.
ranef_var <- vcov(test_mod_0,
full = TRUE,
ranpar = "var",
information = "expected")[3, 3]
sum_vars <- vars$vcov |> sum()
the_se <- sqrt((b_var / sum_vars) +
((fixef(test_mod)["Intervention"]^2 * ranef_var) / (4 * (sum_vars)^3)))
the_se
Intervention
0.2478425
add_result("Manual using merDeriv (no df adjust)", c(my_g,
my_g - 1.96 * the_se,
my_g + 1.96 * the_se))
tibble_results()
library(lmeresampler)
to_boot <- lmer(Posttest ~ Intervention + Prettest +
(1 | School), data = crtData)
each_boot <- function(mod) {
denom_mod <- lmer(Posttest ~ Intervention + (1 | School),
data = model.frame(mod))
vars <- VarCorr(denom_mod) |> as.data.frame()
the_sd <- vars$vcov |> sum() |> sqrt()
the_b <- fixef(mod)["Intervention"]
the_es <- the_b / the_sd
c(b = the_b, sd = the_sd, g = the_es)
}
each_boot(to_boot)
b.Intervention sd g.Intervention
3.1097086 4.9132036 0.6329289
how_many_boots <- 5000
tic()
boo_para <- bootstrap(
model = to_boot,
.f = each_boot,
type = "parametric",
B = how_many_boots
)
toc()
103.5 sec elapsed
beep(3)
tic()
boo_resid <- bootstrap(
model = to_boot,
.f = each_boot,
type = "residual",
B = how_many_boots
)
toc()
111.16 sec elapsed
beep(2)
tic()
boo_case <- bootstrap(
model = to_boot,
.f = each_boot,
type = "case",
B = how_many_boots ,
resample = c(TRUE, TRUE)
)
toc()
161.14 sec elapsed
beep(1)
confint(boo_para, type = "perc")
confint(boo_resid, type = "perc")
confint(boo_case, type = "perc")
get_bootstrap_ests <- function(obj) {
mean_est <- obj$replicates$g.Intervention |> mean()
intervals <- confint(obj, type = "perc") |>
filter(term == "g.Intervention")
c(mean_est, intervals$lower, intervals$upper) |> as.numeric()
}
add_result("bootstrap (parametric)", get_bootstrap_ests(boo_para))
add_result("bootstrap (resid)", get_bootstrap_ests(boo_resid))
add_result("bootstrap (case)", get_bootstrap_ests(boo_case))
tibble_results()
Model for the numerator:
bayes_mod_1 <- brm(Posttest ~ Intervention + Prettest +
(1 | School), data = crtData)
Compiling Stan program...
Start sampling
SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
Chain 1:
Chain 1: Gradient evaluation took 5e-05 seconds
Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.5 seconds.
Chain 1: Adjust your expectations accordingly!
Chain 1:
Chain 1:
Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
Chain 1:
Chain 1: Elapsed Time: 0.338 seconds (Warm-up)
Chain 1: 0.288 seconds (Sampling)
Chain 1: 0.626 seconds (Total)
Chain 1:
SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
Chain 2:
Chain 2: Gradient evaluation took 2.1e-05 seconds
Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.21 seconds.
Chain 2: Adjust your expectations accordingly!
Chain 2:
Chain 2:
Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
Chain 2:
Chain 2: Elapsed Time: 0.366 seconds (Warm-up)
Chain 2: 0.27 seconds (Sampling)
Chain 2: 0.636 seconds (Total)
Chain 2:
SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 3).
Chain 3:
Chain 3: Gradient evaluation took 2e-05 seconds
Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.2 seconds.
Chain 3: Adjust your expectations accordingly!
Chain 3:
Chain 3:
Chain 3: Iteration: 1 / 2000 [ 0%] (Warmup)
Chain 3: Iteration: 200 / 2000 [ 10%] (Warmup)
Chain 3: Iteration: 400 / 2000 [ 20%] (Warmup)
Chain 3: Iteration: 600 / 2000 [ 30%] (Warmup)
Chain 3: Iteration: 800 / 2000 [ 40%] (Warmup)
Chain 3: Iteration: 1000 / 2000 [ 50%] (Warmup)
Chain 3: Iteration: 1001 / 2000 [ 50%] (Sampling)
Chain 3: Iteration: 1200 / 2000 [ 60%] (Sampling)
Chain 3: Iteration: 1400 / 2000 [ 70%] (Sampling)
Chain 3: Iteration: 1600 / 2000 [ 80%] (Sampling)
Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling)
Chain 3: Iteration: 2000 / 2000 [100%] (Sampling)
Chain 3:
Chain 3: Elapsed Time: 0.344 seconds (Warm-up)
Chain 3: 0.259 seconds (Sampling)
Chain 3: 0.603 seconds (Total)
Chain 3:
SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 4).
Chain 4:
Chain 4: Gradient evaluation took 2.1e-05 seconds
Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.21 seconds.
Chain 4: Adjust your expectations accordingly!
Chain 4:
Chain 4:
Chain 4: Iteration: 1 / 2000 [ 0%] (Warmup)
Chain 4: Iteration: 200 / 2000 [ 10%] (Warmup)
Chain 4: Iteration: 400 / 2000 [ 20%] (Warmup)
Chain 4: Iteration: 600 / 2000 [ 30%] (Warmup)
Chain 4: Iteration: 800 / 2000 [ 40%] (Warmup)
Chain 4: Iteration: 1000 / 2000 [ 50%] (Warmup)
Chain 4: Iteration: 1001 / 2000 [ 50%] (Sampling)
Chain 4: Iteration: 1200 / 2000 [ 60%] (Sampling)
Chain 4: Iteration: 1400 / 2000 [ 70%] (Sampling)
Chain 4: Iteration: 1600 / 2000 [ 80%] (Sampling)
Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling)
Chain 4: Iteration: 2000 / 2000 [100%] (Sampling)
Chain 4:
Chain 4: Elapsed Time: 0.351 seconds (Warm-up)
Chain 4: 0.26 seconds (Sampling)
Chain 4: 0.611 seconds (Total)
Chain 4:
Model for the denominator:
bayes_mod_0 <- brm(Posttest ~ Intervention +
(1 | School), data = crtData)
Compiling Stan program...
Start sampling
SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
Chain 1:
Chain 1: Gradient evaluation took 6.1e-05 seconds
Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.61 seconds.
Chain 1: Adjust your expectations accordingly!
Chain 1:
Chain 1:
Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
Chain 1:
Chain 1: Elapsed Time: 0.335 seconds (Warm-up)
Chain 1: 0.262 seconds (Sampling)
Chain 1: 0.597 seconds (Total)
Chain 1:
SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
Chain 2:
Chain 2: Gradient evaluation took 1.9e-05 seconds
Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.19 seconds.
Chain 2: Adjust your expectations accordingly!
Chain 2:
Chain 2:
Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
Chain 2:
Chain 2: Elapsed Time: 0.324 seconds (Warm-up)
Chain 2: 0.245 seconds (Sampling)
Chain 2: 0.569 seconds (Total)
Chain 2:
SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 3).
Chain 3:
Chain 3: Gradient evaluation took 2.3e-05 seconds
Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.23 seconds.
Chain 3: Adjust your expectations accordingly!
Chain 3:
Chain 3:
Chain 3: Iteration: 1 / 2000 [ 0%] (Warmup)
Chain 3: Iteration: 200 / 2000 [ 10%] (Warmup)
Chain 3: Iteration: 400 / 2000 [ 20%] (Warmup)
Chain 3: Iteration: 600 / 2000 [ 30%] (Warmup)
Chain 3: Iteration: 800 / 2000 [ 40%] (Warmup)
Chain 3: Iteration: 1000 / 2000 [ 50%] (Warmup)
Chain 3: Iteration: 1001 / 2000 [ 50%] (Sampling)
Chain 3: Iteration: 1200 / 2000 [ 60%] (Sampling)
Chain 3: Iteration: 1400 / 2000 [ 70%] (Sampling)
Chain 3: Iteration: 1600 / 2000 [ 80%] (Sampling)
Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling)
Chain 3: Iteration: 2000 / 2000 [100%] (Sampling)
Chain 3:
Chain 3: Elapsed Time: 0.307 seconds (Warm-up)
Chain 3: 0.249 seconds (Sampling)
Chain 3: 0.556 seconds (Total)
Chain 3:
SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 4).
Chain 4:
Chain 4: Gradient evaluation took 2.3e-05 seconds
Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.23 seconds.
Chain 4: Adjust your expectations accordingly!
Chain 4:
Chain 4:
Chain 4: Iteration: 1 / 2000 [ 0%] (Warmup)
Chain 4: Iteration: 200 / 2000 [ 10%] (Warmup)
Chain 4: Iteration: 400 / 2000 [ 20%] (Warmup)
Chain 4: Iteration: 600 / 2000 [ 30%] (Warmup)
Chain 4: Iteration: 800 / 2000 [ 40%] (Warmup)
Chain 4: Iteration: 1000 / 2000 [ 50%] (Warmup)
Chain 4: Iteration: 1001 / 2000 [ 50%] (Sampling)
Chain 4: Iteration: 1200 / 2000 [ 60%] (Sampling)
Chain 4: Iteration: 1400 / 2000 [ 70%] (Sampling)
Chain 4: Iteration: 1600 / 2000 [ 80%] (Sampling)
Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling)
Chain 4: Iteration: 2000 / 2000 [100%] (Sampling)
Chain 4:
Chain 4: Elapsed Time: 0.298 seconds (Warm-up)
Chain 4: 0.257 seconds (Sampling)
Chain 4: 0.555 seconds (Total)
Chain 4:
bayes_mod_0 |> get_variables()
[1] "b_Intercept" "b_Intervention" "sd_School__Intercept"
[4] "sigma" "Intercept" "r_School[1,Intercept]"
[7] "r_School[2,Intercept]" "r_School[3,Intercept]" "r_School[4,Intercept]"
[10] "r_School[5,Intercept]" "r_School[6,Intercept]" "r_School[7,Intercept]"
[13] "r_School[8,Intercept]" "r_School[9,Intercept]" "r_School[10,Intercept]"
[16] "r_School[11,Intercept]" "r_School[12,Intercept]" "r_School[13,Intercept]"
[19] "r_School[14,Intercept]" "r_School[15,Intercept]" "r_School[16,Intercept]"
[22] "r_School[17,Intercept]" "r_School[18,Intercept]" "r_School[19,Intercept]"
[25] "r_School[20,Intercept]" "r_School[21,Intercept]" "r_School[22,Intercept]"
[28] "lprior" "lp__" "accept_stat__"
[31] "stepsize__" "treedepth__" "n_leapfrog__"
[34] "divergent__" "energy__"
b_draws <- bayes_mod_1 |>
spread_draws(b_Intervention)
sd_draws <- bayes_mod_0 |>
spread_draws(sd_School__Intercept, sigma) |>
mutate(total_sd = sqrt(sd_School__Intercept^2 + sigma^2))
head(b_draws)
head(sd_draws)
combined_draws <- bind_cols(b_draws |> select(b_Intervention),
sd_draws |> select(total_sd)) |>
mutate(g = b_Intervention / total_sd)
head(combined_draws)
combined_draws_Bayes_mean <- combined_draws |>
mean_hdci(g)
combined_draws_Bayes_median <- combined_draws |>
median_hdci(g)
combined_draws_Bayes_mode <- combined_draws |>
mode_hdci(g)
add_result("Bayesian HDCI (mean)", combined_draws_Bayes_mean[,1:3] |> as.numeric())
add_result("Bayesian HDCI (median)", combined_draws_Bayes_median[,1:3] |> as.numeric())
add_result("Bayesian HDCI (mode)", combined_draws_Bayes_mode[,1:3] |> as.numeric())
tibble_results() |>
mutate(across(where(is.numeric), ~ round(.x, 4))) |>
arrange(lower95, est)