diff --git a/.RData b/.RData new file mode 100644 index 00000000..4f2a53b6 Binary files /dev/null and b/.RData differ diff --git a/.github/workflows/test-coverage.yaml b/.github/workflows/test-coverage.yaml index c6abe713..39e17ce1 100644 --- a/.github/workflows/test-coverage.yaml +++ b/.github/workflows/test-coverage.yaml @@ -22,7 +22,7 @@ jobs: fail-fast: false matrix: config: - - {os: ubuntu-20.04, r: 'release', rspm: "https://packagemanager.rstudio.com/cran/__linux__/focal/latest"} + - {os: windows-latest, r: 'release'} env: GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 187ec006..c834d2a3 100644 --- a/.gitignore +++ b/.gitignore @@ -17,4 +17,7 @@ RoBMA.Rcheck .Rprofile /doc/ /Meta/ +models/MetaRegression/fit_wPSMA_reg10/*.RDS +models/MetaRegression/fit_PSMA_reg10/*.RDS +models/MetaRegression/fit_3PP_reg10/*.RDS models/MetaRegression/* diff --git a/DESCRIPTION b/DESCRIPTION index 1e4b896c..9476ec67 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: RoBMA Title: Robust Bayesian Meta-Analyses -Version: 2.3.2 +Version: 3.0.0 Maintainer: František Bartoš Authors@R: c( person("František", "Bartoš", role = c("aut", "cre"), @@ -21,9 +21,9 @@ Description: A framework for estimating ensembles of meta-analytic models combine the competing meta-analytic models into a model ensemble, weights the posterior parameter distributions based on posterior model probabilities and uses Bayes factors to test for the presence or absence of the - individual components (e.g., effect vs. no effect; Bartoš et al., 2021, - ; Maier, Bartoš & Wagenmakers, in press, - ). Users can define a wide range of non-informative + individual components (e.g., effect vs. no effect; Bartoš et al., 2022, + ; Maier, Bartoš & Wagenmakers, 2022, + ). Users can define a wide range of non-informative or informative prior distributions for the effect size, heterogeneity, and publication bias components (including selection models and PET-PEESE). The package provides convenient functions for summary, visualizations, and @@ -40,10 +40,9 @@ NeedsCompilation: yes Depends: R (>= 4.0.0) Imports: - BayesTools (>= 0.2.0), + BayesTools (>= 0.2.14), runjags, rjags, - coda, stats, graphics, mvtnorm, @@ -53,8 +52,11 @@ Imports: ggplot2 Suggests: parallel, - rstan, metaBMA, + metafor, + weightr, + fixest, + metadat, testthat, vdiffr, knitr, diff --git a/NAMESPACE b/NAMESPACE index afbb8c8b..1ffbb770 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -2,6 +2,7 @@ S3method(plot,RoBMA) S3method(print,RoBMA) +S3method(print,marginal_summary.RoBMA) S3method(print,summary.RoBMA) S3method(summary,RoBMA) S3method(update,RoBMA) @@ -12,22 +13,32 @@ export(OR2z) export(RoBMA) export(RoBMA.get_option) export(RoBMA.options) +export(RoBMA.reg) export(check_RoBMA) export(check_setup) +export(check_setup.reg) export(combine_data) +export(contr.meandif) +export(contr.orthonormal) export(d2OR) export(d2logOR) export(d2r) export(d2z) export(diagnostics) +export(diagnostics_autocorrelation) +export(diagnostics_density) +export(diagnostics_trace) export(dwnorm) export(forest) export(interpret) export(is.RoBMA) +export(is.RoBMA.reg) export(logOR2OR) export(logOR2d) export(logOR2r) export(logOR2z) +export(marginal_plot) +export(marginal_summary) export(n_d) export(n_r) export(n_z) @@ -35,6 +46,7 @@ export(plot_models) export(prior) export(prior_PEESE) export(prior_PET) +export(prior_factor) export(prior_informed) export(prior_none) export(prior_weightfunction) @@ -69,6 +81,7 @@ export(z2r) importFrom(BayesTools,is.prior) importFrom(BayesTools,is.prior.PEESE) importFrom(BayesTools,is.prior.PET) +importFrom(BayesTools,is.prior.factor) importFrom(BayesTools,is.prior.none) importFrom(BayesTools,is.prior.point) importFrom(BayesTools,is.prior.simple) diff --git a/NEWS.md b/NEWS.md index 538e43c2..14260114 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,3 +1,17 @@ +## version 3.0 +### Features +- meta-regression with `RoBMA.reg()` function +- posterior marginal summary and plots for the `RoBMA.reg` models with `summary_marginal()` and `plot_marginal()` functions +- new vignette on hierarchical Bayesian model-averaged meta-analysis +- new vignette on robust Bayesian model-averaged meta-regression +- adding vignette from AMPPS tutorial +- faster implementation of JAGS multivariate normal distribution (based on the BUGS JAGS module) +- incorporating `weight` argument in the `RoBMA` and `combine_data` functions in order to pass `custom` likelihood weights +- ability to use inverse square weights in the weighted meta-analysis by setting a `weighted_type = "inverse_sqrt"` argument + +### Changes +- reworked interface for the hierarchical models. Prior distributions are now specified via the `priors_hierarchical` and `priors_hierarchical_null` arguments instead of `priors_rho` and `priors_rho_null`. The model summary now shows `Hierarchical` component summary. + ## version 2.3.2 ### Fixes - suppressing start-up message diff --git a/R/RoBMA-package.R b/R/RoBMA-package.R index e7d37173..b1d95fbb 100644 --- a/R/RoBMA-package.R +++ b/R/RoBMA-package.R @@ -24,7 +24,7 @@ ##' further questions. ##' ##' @references \insertAllCited{} -##' @importFrom BayesTools is.prior is.prior.none is.prior.point is.prior.simple is.prior.PET is.prior.PEESE is.prior.weightfunction +##' @importFrom BayesTools is.prior is.prior.none is.prior.point is.prior.simple is.prior.factor is.prior.PET is.prior.PEESE is.prior.weightfunction ##' @importFrom Rdpack reprompt ##' @importFrom rlang .data "_PACKAGE" diff --git a/R/RoBMA-reg.R b/R/RoBMA-reg.R new file mode 100644 index 00000000..4def1148 --- /dev/null +++ b/R/RoBMA-reg.R @@ -0,0 +1,351 @@ +#' @title Estimate a Robust Bayesian Meta-Analysis Meta-Regression +#' +#' @description \code{RoBMA} is used to estimate a Robust Bayesian +#' Meta-Analysis. The interface allows a complete customization of +#' the ensemble with different prior (or list of prior) distributions +#' for each component. +#' +#' @param formula a formula for the meta-regression model +#' @param test_predictors vector of predictor names that will be test +#' (i.e., assigned both the null and alternative prior distributions). +#' Defaults to \code{TRUE}, all predictors are tested using the default +#' prior distributions (i.e., \code{prior_covariates}, +#' \code{prior_covariates_null}, \code{prior_factors}, and +#' \code{prior_factors_null}). To only estimate +#' and adjust for the effect of predictors use \code{FALSE}. If +#' \code{priors} is specified, any settings in \code{test_predictors} +#' is overridden. +#' @param priors named list of prior distributions for each predictor +#' (with names corresponding to the predictors). It allows users to +#' specify both the null and alternative hypothesis prior distributions +#' for each predictor by assigning the corresponding element of the named +#' list with another named list (with \code{"null"} and +#' \code{"alt"}). +#' If only one prior is specified for a given parameter, it is +#' assumed to correspond to the alternative hypotheses and the default null +#' hypothesis is specified (i.e., \code{prior_covariates_null} or +#' \code{prior_factors_null}). +#' If a named list with only one named prior distribution is provided (either +#' \code{"null"} or \code{"alt"}), only this prior distribution is used and no +#' default distribution is filled in. +#' Parameters without specified prior distributions are assumed to be only adjusted +#' for using the default alternative hypothesis prior distributions (i.e., +#' \code{prior_covariates} or \code{prior_factors}). +#' If \code{priors} is specified, \code{test_predictors} is ignored. +#' @param prior_covariates a prior distributions for the regression parameter +#' of continuous covariates on the effect size under the alternative hypothesis +#' (unless set explicitly in \code{priors}). Defaults to a relatively wide normal +#' distribution \code{prior(distribution = "normal", parameters = list(mean = 0, sd = 0.25))}. +#' @param prior_covariates_null a prior distributions for the regression parameter +#' of continuous covariates on the effect size under the null hypothesis +#' (unless set explicitly in \code{priors}). Defaults to a no effect +#' \code{prior("spike", parameters = list(location = 0))}. +#' @param prior_factors a prior distributions for the regression parameter +#' of categorical covariates on the effect size under the alternative hypothesis +#' (unless set explicitly in \code{priors}). Defaults to a relatively wide +#' multivariate normal distribution specifying differences from the mean contrasts +#' \code{prior_factor("mnormal", parameters = list(mean = 0, sd = 0.25), contrast = "meandif")}. +#' @param prior_factors_null a prior distributions for the regression parameter +#' of categorical covariates on the effect size under the null hypothesis +#' (unless set explicitly in \code{priors}). Defaults to a no effect +#' \code{prior("spike", parameters = list(location = 0))}. +#' @param standardize_predictors whether continuous predictors should be standardized prior to +#' estimating the model. Defaults to \code{TRUE}. +#' @inheritParams RoBMA +#' @inheritParams combine_data +#' +#' @details See [RoBMA()] for more details. +#' +#' Note that these default prior distributions are relatively wide and more informed +#' prior distributions for testing for the presence of moderation should be considered. +#' +#' +#' @references +#' \insertAllCited{} +#' +#' +#' @return \code{RoBMA.reg} returns an object of class 'RoBMA.reg'. +#' +#' @seealso [RoBMA()] [summary.RoBMA()], [update.RoBMA()], [check_setup.reg()] +#' @export +RoBMA.reg <- function( + formula, data, test_predictors = TRUE, study_names = NULL, study_ids = NULL, + transformation = if(any(colnames(data) != "y")) "fishers_z" else "none", + prior_scale = if(any(colnames(data) != "y")) "cohens_d" else "none", + standardize_predictors = TRUE, + effect_direction = "positive", + + # prior specification + priors = NULL, + model_type = NULL, + + priors_effect = prior(distribution = "normal", parameters = list(mean = 0, sd = 1)), + priors_heterogeneity = prior(distribution = "invgamma", parameters = list(shape = 1, scale = .15)), + priors_bias = list( + prior_weightfunction(distribution = "two.sided", parameters = list(alpha = c(1, 1), steps = c(0.05)), prior_weights = 1/12), + prior_weightfunction(distribution = "two.sided", parameters = list(alpha = c(1, 1, 1), steps = c(0.05, 0.10)), prior_weights = 1/12), + prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1), steps = c(0.05)), prior_weights = 1/12), + prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1, 1), steps = c(0.025, 0.05)), prior_weights = 1/12), + prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1, 1), steps = c(0.05, 0.5)), prior_weights = 1/12), + prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1, 1, 1), steps = c(0.025, 0.05, 0.5)), prior_weights = 1/12), + prior_PET(distribution = "Cauchy", parameters = list(0,1), truncation = list(0, Inf), prior_weights = 1/4), + prior_PEESE(distribution = "Cauchy", parameters = list(0,5), truncation = list(0, Inf), prior_weights = 1/4) + ), + priors_effect_null = prior(distribution = "point", parameters = list(location = 0)), + priors_heterogeneity_null = prior(distribution = "point", parameters = list(location = 0)), + priors_bias_null = prior_none(), + priors_hierarchical = prior("beta", parameters = list(alpha = 1, beta = 1)), + priors_hierarchical_null = NULL, + + prior_covariates = prior("normal", parameters = list(mean = 0, sd = 0.25)), + prior_covariates_null = prior("spike", parameters = list(location = 0)), + prior_factors = prior_factor("mnormal", parameters = list(mean = 0, sd = 0.25), contrast = "meandif"), + prior_factors_null = prior_factor("spike", parameters = list(location = 0), contrast = "meandif"), + + # MCMC fitting settings + chains = 3, sample = 5000, burnin = 2000, adapt = 500, thin = 1, parallel = FALSE, + autofit = TRUE, autofit_control = set_autofit_control(), convergence_checks = set_convergence_checks(), + + # additional settings + save = "all", seed = NULL, silent = TRUE, ...){ + + + dots <- .RoBMA_collect_dots(...) + object <- NULL + object$call <- match.call() + + + ### prepare & check the data + object$data <- .combine_data.reg(formula, data, standardize_predictors, transformation, study_names, study_ids) + object$formula <- formula + + # switch between multivariate and weighted models + if(attr(object$data[["outcome"]], "weighted")) + .weighted_warning() + + if(.is_multivariate(object)) + .multivariate_warning() + + + ### check MCMC settings + object$fit_control <- BayesTools::JAGS_check_and_list_fit_settings(chains = chains, adapt = adapt, burnin = burnin, sample = sample, thin = thin, autofit = autofit, parallel = parallel, cores = chains, silent = silent, seed = seed) + object$autofit_control <- BayesTools::JAGS_check_and_list_autofit_settings(autofit_control = autofit_control) + object$convergence_checks <- .check_and_list_convergence_checks(convergence_checks) + + + ### prepare and check the settings + object$priors <- .check_and_list_priors.reg( + priors = priors, data = object[["data"]], model_type = model_type, test_predictors = test_predictors, prior_scale = .transformation_var(prior_scale), + priors_effect_null = priors_effect_null, priors_effect = priors_effect, + priors_heterogeneity_null = priors_heterogeneity_null, priors_heterogeneity = priors_heterogeneity, + priors_bias_null = priors_bias_null, priors_bias = priors_bias, + priors_hierarchical_null = priors_hierarchical_null, priors_hierarchical = priors_hierarchical, + prior_covariates_null = prior_covariates_null, prior_covariates = prior_covariates, + prior_factors_null = prior_factors_null, prior_factors = prior_factors) + object$models <- .make_models.reg(object[["priors"]], .is_multivariate(object), .is_weighted(object), dots[["do_not_fit"]]) + + + ### additional information + object$add_info <- .check_and_list_add_info( + model_type = model_type, + predictors = attr(object[["priors"]], "terms"), + predictors_test = attr(object[["priors"]], "terms_test"), + prior_scale = .transformation_var(prior_scale), + output_scale = .transformation_var(prior_scale), + effect_measure = attr(object$data[["outcome"]], "effect_measure"), + effect_direction = effect_direction, + standardize_predictors = standardize_predictors, + seed = seed, + save = save, + warnings = NULL, + errors = NULL + ) + + # the check requires the 'add_info' object already created + object$add_info[["warnings"]] <- c(.check_effect_direction(object), .check_predictors_scaling(object)) + + + if(dots[["do_not_fit"]]){ + return(object) + } + + + ### fit the models and compute marginal likelihoods + if(!object$fit_control[["parallel"]]){ + + if(dots[["is_JASP"]]){ + .JASP_progress_bar_start(length(object[["models"]])) + } + + for(i in seq_along(object[["models"]])){ + object$models[[i]] <- .fit_RoBMA_model(object, i) + if(dots[["is_JASP"]]){ + .JASP_progress_bar_tick() + } + } + + }else{ + + fitting_order <- .fitting_priority(object[["models"]]) + + cl <- parallel::makePSOCKcluster(floor(RoBMA.get_option("max_cores") / object$fit_control[["chains"]])) + parallel::clusterEvalQ(cl, {library("RoBMA")}) + parallel::clusterExport(cl, "object", envir = environment()) + object$models <- parallel::parLapplyLB(cl, fitting_order, .fit_RoBMA_model, object = object)[order(fitting_order)] + parallel::stopCluster(cl) + + } + + + # create ensemble only if at least one model converged + if(any(.get_model_convergence(object))){ + + # balance probability of non-converged models + if(object$convergence_checks[["balance_probability"]] && !all(.get_model_convergence(object))){ + object <- .balance_probability(object) + } + + ### compute the model-space results + object$models <- BayesTools::models_inference(object[["models"]]) + object$RoBMA <- .ensemble_inference(object) + object$coefficients <- .compute_coeficients(object[["RoBMA"]]) + } + + + ### collect and print errors and warnings + object$add_info[["errors"]] <- c(object$add_info[["errors"]], .get_model_errors(object)) + object$add_info[["warnings"]] <- c(object$add_info[["warnings"]], .get_model_warnings(object)) + .print_errors_and_warnings(object) + + + ### remove model posteriors if asked to + if(save == "min"){ + object <- .remove_model_posteriors(object) + object <- .remove_model_margliks(object) + } + + + class(object) <- c("RoBMA", "RoBMA.reg") + return(object) +} + + +.combine_data.reg <- function(formula, data, standardize_predictors, transformation, study_names, study_ids){ + + if(!is.language(formula)) + stop("The 'formula' is not specidied as a formula.") + if(!is.data.frame(data)) + stop("'data' must be an object of type data.frame.") + BayesTools::check_bool(standardize_predictors, "standardize_predictors") + + + ### deal with the effect sizes + data_outcome <- combine_data( + d = if("d" %in% colnames(data)) data[,"d"], + r = if("r" %in% colnames(data)) data[,"r"], + z = if("z" %in% colnames(data)) data[,"z"], + logOR = if("logOR" %in% colnames(data)) data[,"logOR"], + t = if("t" %in% colnames(data)) data[,"t"], + y = if("y" %in% colnames(data)) data[,"y"], + se = if("se" %in% colnames(data)) data[,"se"], + v = if("v" %in% colnames(data)) data[,"v"], + n = if("n" %in% colnames(data)) data[,"n"], + lCI = if("lCI" %in% colnames(data)) data[,"lCI"], + uCI = if("uCI" %in% colnames(data)) data[,"uCI"], + weight = if("weight" %in% colnames(data)) data[,"weight"], + study_names = study_names, + study_ids = study_ids, + transformation = transformation, + return_all = FALSE) + + ### obtain the predictors part + data_predictors <- data[,!colnames(data) %in% c("d", "r", "z", "logOR", "t", "y", "se", "v", "n", "lCI", "uCI", "weight"), drop = FALSE] + + if(attr(stats::terms(formula), "response") == 1){ + formula[2] <- NULL + } + rhs <- formula[c(1,2)] + model_frame <- stats::model.frame(rhs, data = data_predictors) + + # check that intercept is specified + if(attr(attr(model_frame, "terms"),"intercept") == 0) + stop("Intercept cannot be ommited from the model (you can set the coefficient to zero via 'priors_effect'). ") + + # change characters into factors + for(i in seq_along(attr(attr(model_frame, "terms"), "dataClasses"))){ + if(attr(attr(model_frame, "terms"), "dataClasses")[[i]] == "character"){ + model_frame[,names(attr(attr(model_frame, "terms"), "dataClasses"))[i]] <- + as.factor(model_frame[,names(attr(attr(model_frame, "terms"), "dataClasses"))[i]]) + attr(attr(model_frame, "terms"), "dataClasses")[[i]] <- "factor" + } + } + + + model_frame <- as.list(model_frame) + if(length(model_frame) == 0){ + data_predictors <- list() + }else{ + data_predictors <- model_frame[1:length(model_frame)] + } + attr(data_predictors, "variables") <- attr(attr(model_frame, "terms"), "term.labels")[attr(attr(model_frame, "terms"), "order") == 1] + attr(data_predictors, "terms") <- attr(attr(model_frame, "terms"), "term.labels") + attr(data_predictors, "terms_type") <- attr(attr(model_frame, "terms"), "dataClasses") + + + # add additional information about the predictors + data_predictors_info <- list() + to_warn <- NULL + for(i in seq_along(data_predictors)){ + if(attr(data_predictors, "terms_type")[i] == "numeric"){ + + data_predictors_info[[names(data_predictors)[i]]] <- list( + type = "continuous", + mean = mean(data_predictors[[names(data_predictors)[i]]]), + sd = stats::sd(data_predictors[[names(data_predictors)[i]]]) + ) + + if(standardize_predictors){ + data_predictors[[names(data_predictors)[i]]] <- .pred_scale(data_predictors[[names(data_predictors)[i]]], data_predictors_info[[names(data_predictors)[i]]]) + }else if(RoBMA.get_option("check_scaling") && (abs(mean(data_predictors[[names(data_predictors)[i]]])) > 0.01 | abs(1 - stats::sd(data_predictors[[names(data_predictors)[i]]])) > 0.01)){ + to_warn <- c(to_warn, names(data_predictors)[i]) + } + + }else if(attr(data_predictors, "terms_type")[i] == "factor"){ + + data_predictors_info[[names(data_predictors)[i]]] <- list( + type = "factor", + default = levels(data_predictors[[names(data_predictors)[i]]])[1], + levels = levels(data_predictors[[names(data_predictors)[i]]]) + ) + + } + } + attr(data_predictors, "variables_info") <- data_predictors_info + + + output <- list( + outcome = data_outcome, + predictors = data_predictors + ) + + # throw warnings and errors + if(length(to_warn) > 0){ + scaling_warning <- paste0("The continuous predictors ", paste0("'", to_warn, "'", collapse = ", "), " are not scaled. Note that extra care need to be taken when specifying prior distributions for unscaled predictors.") + warning(scaling_warning, immediate. = TRUE, call. = FALSE) + warning("You can suppress this and following warnings via 'RoBMA.options(check_scaling = FALSE)'. To automatically rescale predictors set 'standardize_predictors = TRUE'.", immediate. = TRUE, call. = FALSE) + attr(output, "warnings") <- scaling_warning + } + + # check for reserved words + if(any(attr(data_predictors, "terms") %in% .reserved_words())) + stop(paste0("The following variable names are internally reserved keywords and cannot be used: ", + paste0(" '", attr(data_predictors, "terms")[attr(data_predictors, "terms") %in% .reserved_words()], "' ", collapse = ", "))) + + return(output) +} +.pred_scale <- function(x, predictor_info){ + (x - predictor_info[["mean"]]) / predictor_info[["sd"]] +} +.pred_unscale <- function(x, predictor_info){ + x * predictor_info[["sd"]] + predictor_info[["mean"]] +} diff --git a/R/main.R b/R/RoBMA.R similarity index 90% rename from R/main.R rename to R/RoBMA.R index 3a70c537..9e53fb96 100644 --- a/R/main.R +++ b/R/RoBMA.R @@ -62,13 +62,13 @@ #' @param priors_bias_null list of prior weight functions for the \code{omega} parameter #' that will be treated as belonging to the null hypothesis. Defaults no publication #' bias adjustment, \code{prior_none()}. -#' @param priors_rho list of prior distributions for the variance allocation (\code{rho}) -#' parameter that will be treated as belonging to the alternative hypothesis. This setting allows -#' users to fit a three-level meta-analysis when \code{study_ids} are supplied. Note that this is -#' an experimental feature and see News for more details. Defaults to a beta distribution +#' @param priors_hierarchical list of prior distributions for the correlation of random effects +#' (\code{rho}) parameter that will be treated as belonging to the alternative hypothesis. This setting allows +#' users to fit a hierarchical (three-level) meta-analysis when \code{study_ids} are supplied. +#' Note that this is an experimental feature and see News for more details. Defaults to a beta distribution #' \code{prior(distribution = "beta", parameters = list(alpha = 1, beta = 1))}. -#' @param priors_rho_null list of prior distributions for the variance allocation (\code{rho}) -#' parameter that will be treated as belonging to the null hypothesis. Defaults to \code{NULL}. +#' @param priors_hierarchical_null list of prior distributions for the correlation of random effects +#' (\code{rho}) parameter that will be treated as belonging to the null hypothesis. Defaults to \code{NULL}. #' @param chains a number of chains of the MCMC algorithm. #' @param sample a number of sampling iterations of the MCMC algorithm. #' Defaults to \code{5000}. @@ -184,7 +184,7 @@ RoBMA <- function( # data specification d = NULL, r = NULL, logOR = NULL, z = NULL, y = NULL, se = NULL, v = NULL, n = NULL, lCI = NULL, uCI = NULL, t = NULL, study_names = NULL, study_ids = NULL, - data = NULL, + data = NULL, weight = NULL, transformation = if(is.null(y)) "fishers_z" else "none", prior_scale = if(is.null(y)) "cohens_d" else "none", effect_direction = "positive", @@ -206,8 +206,8 @@ RoBMA <- function( priors_effect_null = prior(distribution = "point", parameters = list(location = 0)), priors_heterogeneity_null = prior(distribution = "point", parameters = list(location = 0)), priors_bias_null = prior_none(), - priors_rho = prior("beta", parameters = list(alpha = 1, beta = 1)), - priors_rho_null = NULL, + priors_hierarchical = prior("beta", parameters = list(alpha = 1, beta = 1)), + priors_hierarchical_null = NULL, # MCMC fitting settings chains = 3, sample = 5000, burnin = 2000, adapt = 500, thin = 1, parallel = FALSE, @@ -225,19 +225,16 @@ RoBMA <- function( if("data.RoBMA" %in% class(data)){ object$data <- data }else{ - object$data <- combine_data(d = d, r = r, z = z, logOR = logOR, t = t, y = y, se = se, v = v, n = n, lCI = lCI, uCI = uCI, study_names = study_names, study_ids = study_ids, data = data, transformation = transformation) + object$data <- combine_data(d = d, r = r, z = z, logOR = logOR, t = t, y = y, se = se, v = v, n = n, lCI = lCI, uCI = uCI, study_names = study_names, study_ids = study_ids, weight = weight, data = data, transformation = transformation) } # switch between multivariate and weighted models - if(!attr(object$data, "all_independent")){ - if(dots[["weighted"]]){ - .weighted_warning() - attr(object$data, "all_independent") <- TRUE - attr(object$data, "weighted") <- TRUE - }else{ - .multivariate_warning() - } - } + if(attr(object$data, "weighted")) + .weighted_warning() + + if(.is_multivariate(object)) + .multivariate_warning() + ### check MCMC settings object$fit_control <- BayesTools::JAGS_check_and_list_fit_settings(chains = chains, adapt = adapt, burnin = burnin, sample = sample, thin = thin, autofit = autofit, parallel = parallel, cores = chains, silent = silent, seed = seed) @@ -260,10 +257,20 @@ RoBMA <- function( ### prepare and check the settings - object$priors <- .check_and_list_priors(object$add_info[["model_type"]], priors_effect_null, priors_effect, priors_heterogeneity_null, priors_heterogeneity, priors_bias_null, priors_bias, priors_rho_null, priors_rho, object$add_info[["prior_scale"]]) - object$models <- .make_models(object[["priors"]], !attr(object$data, "all_independent"), attr(object$data, "weighted")) + object$priors <- .check_and_list_priors( + model_type = object$add_info[["model_type"]], + priors_effect_null = priors_effect_null, priors_effect = priors_effect, + priors_heterogeneity_null = priors_heterogeneity_null, priors_heterogeneity = priors_heterogeneity, + priors_bias_null = priors_bias_null, priors_bias = priors_bias, + priors_hierarchical_null = priors_hierarchical_null, priors_hierarchical = priors_hierarchical, + prior_scale = object$add_info[["prior_scale"]]) + object$models <- .make_models(object[["priors"]], .is_multivariate(object), .is_weighted(object)) object$add_info$warnings <- c(object$add_info[["warnings"]], .check_effect_direction(object)) + if(dots[["do_not_fit"]]){ + return(object) + } + ### fit the models and compute marginal likelihoods if(!object$fit_control[["parallel"]]){ @@ -363,13 +370,13 @@ RoBMA <- function( #' @param prior_bias_null prior distribution for the publication bias adjustment #' component that will be treated as belonging to the null hypothesis. #' Defaults to \code{NULL}. -#' @param prior_rho prior distributions for the variance allocation (\code{rho}) -#' parameter that will be treated as belonging to the alternative hypothesis. This setting allows -#' users to fit a three-level meta-analysis when \code{study_ids} are supplied. Note that this is -#' an experimental feature and see News for more details. Defaults to a beta distribution +#' @param prior_hierarchical prior distribution for the correlation of random effects +#' (\code{rho}) parameter that will be treated as belonging to the alternative hypothesis. This setting allows +#' users to fit a hierarchical (three-level) meta-analysis when \code{study_ids} are supplied. +#' Note that this is an experimental feature and see News for more details. Defaults to a beta distribution #' \code{prior(distribution = "beta", parameters = list(alpha = 1, beta = 1))}. -#' @param prior_rho_null prior distributions for the variance allocation (\code{rho}) -#' parameter that will be treated as belonging to the null hypothesis. Defaults to \code{NULL}. +#' @param prior_hierarchical_null prior distribution for the correlation of random effects +#' (\code{rho}) parameter that will be treated as belonging to the null hypothesis. Defaults to \code{NULL}. #' @param prior_weights either a single value specifying prior model weight #' of a newly specified model using priors argument, or a vector of the #' same length as already fitted models to update their prior weights. @@ -408,8 +415,8 @@ RoBMA <- function( #' @seealso [RoBMA()], [summary.RoBMA()], [prior()], [check_setup()] #' @export update.RoBMA <- function(object, refit_failed = TRUE, - prior_effect = NULL, prior_heterogeneity = NULL, prior_bias = NULL, prior_rho = NULL, prior_weights = NULL, - prior_effect_null = NULL, prior_heterogeneity_null = NULL, prior_bias_null = NULL, prior_rho_null = NULL, + prior_effect = NULL, prior_heterogeneity = NULL, prior_bias = NULL, prior_hierarchical = NULL, prior_weights = NULL, + prior_effect_null = NULL, prior_heterogeneity_null = NULL, prior_bias_null = NULL, prior_hierarchical_null = NULL, study_names = NULL, chains = NULL, adapt = NULL, burnin = NULL, sample = NULL, thin = NULL, autofit = NULL, parallel = NULL, autofit_control = NULL, convergence_checks = NULL, @@ -434,10 +441,13 @@ update.RoBMA <- function(object, refit_failed = TRUE, (!is.null(prior_heterogeneity) | !is.null(prior_heterogeneity_null)) & (!is.null(prior_bias) | !is.null(prior_bias_null))){ + if(is.RoBMA.reg(object)) + stop("Adding a new model to the ensemble is not possible with RoBMA.reg models.") + what_to_do <- "fit_new_model" - new_priors <- .check_and_list_priors(NULL, prior_effect_null, prior_effect, prior_heterogeneity_null, prior_heterogeneity, prior_bias_null, prior_bias, prior_rho_null, prior_rho, object$add_info[["prior_scale"]]) + new_priors <- .check_and_list_priors(NULL, prior_effect_null, prior_effect, prior_heterogeneity_null, prior_heterogeneity, prior_bias_null, prior_bias, prior_hierarchical_null, prior_hierarchical, object$add_info[["prior_scale"]]) - object$models[length(object$models) + 1] <- list(.make_models(new_priors, !attr(object$data, "all_independent"), attr(object$data, "weighted"))[[1]]) + object$models[length(object$models) + 1] <- list(.make_models(new_priors, .is_multivariate(object), .is_weighted(object))[[1]]) if(!is.null(prior_weights)){ object$models[[length(object$models)]]$prior_weights <- prior_weights diff --git a/R/check-input-and-settings.R b/R/check-input-and-settings.R index 70eb6947..122ff9ee 100644 --- a/R/check-input-and-settings.R +++ b/R/check-input-and-settings.R @@ -10,7 +10,7 @@ #' #' @return \code{check_setup} invisibly returns list of summary tables. #' -#' @seealso [RoBMA()] +#' @seealso [check_setup.reg()] [RoBMA()] #' @export check_setup <- function( model_type = NULL, @@ -29,15 +29,14 @@ check_setup <- function( priors_effect_null = prior(distribution = "point", parameters = list(location = 0)), priors_heterogeneity_null = prior(distribution = "point", parameters = list(location = 0)), priors_bias_null = prior_none(), - priors_rho = prior("beta", parameters = list(alpha = 1, beta = 1)), - priors_rho_null = NULL, + priors_hierarchical = prior("beta", parameters = list(alpha = 1, beta = 1)), + priors_hierarchical_null = NULL, models = FALSE, silent = FALSE){ object <- list() - object$priors <- .check_and_list_priors(tolower(model_type), priors_effect_null, priors_effect, priors_heterogeneity_null, priors_heterogeneity, priors_bias_null, priors_bias, priors_rho, priors_rho_null, object$add_info[["prior_scale"]]) + object$priors <- .check_and_list_priors(model_type, priors_effect_null, priors_effect, priors_heterogeneity_null, priors_heterogeneity, priors_bias_null, priors_bias, priors_hierarchical, priors_hierarchical_null, "d") object$models <- .make_models(object[["priors"]], multivariate = FALSE, weighted = FALSE) - ### model types overview effect <- sapply(object$models, function(model)!.is_component_null(model[["priors"]], "effect")) heterogeneity <- sapply(object$models, function(model)!.is_component_null(model[["priors"]], "heterogeneity")) @@ -108,7 +107,7 @@ check_setup <- function( "Bias" = priors_bias, "prior_prob" = prior_prob ) - class(summary) <- c("BayesTools_table", "BayesTools_ensemble_summary", class(summary)) + class(summary) <- c("BayesTools_table", "BayesTools_ensemble_inference", class(summary)) attr(summary, "type") <- c("integer", rep("prior", 3), "prior_prob") attr(summary, "rownames") <- FALSE attr(summary, "title") <- "Models overview:" @@ -133,6 +132,251 @@ check_setup <- function( } +#' @title Prints summary of \code{"RoBMA.reg"} ensemble implied by the specified priors +#' and formula +#' +#' @description \code{check_setup} prints summary of \code{"RoBMA.reg"} ensemble +#' implied by the specified prior distributions. It is useful for checking +#' the ensemble configuration prior to fitting all of the models. +#' +#' @inheritParams check_setup +#' @inheritParams RoBMA.reg +#' +#' @return \code{check_setup.reg} invisibly returns list of summary tables. +#' +#' @seealso [check_setup()] [RoBMA.reg()] +#' @export +check_setup.reg <- function( + formula, data, test_predictors = TRUE, study_names = NULL, study_ids = NULL, + transformation = if(any(colnames(data) != "y")) "fishers_z" else "none", + prior_scale = if(any(colnames(data) != "y")) "cohens_d" else "none", + standardize_predictors = TRUE, + effect_direction = "positive", + + # prior specification + priors = NULL, + model_type = NULL, + + priors_effect = prior(distribution = "normal", parameters = list(mean = 0, sd = 1)), + priors_heterogeneity = prior(distribution = "invgamma", parameters = list(shape = 1, scale = .15)), + priors_bias = list( + prior_weightfunction(distribution = "two.sided", parameters = list(alpha = c(1, 1), steps = c(0.05)), prior_weights = 1/12), + prior_weightfunction(distribution = "two.sided", parameters = list(alpha = c(1, 1, 1), steps = c(0.05, 0.10)), prior_weights = 1/12), + prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1), steps = c(0.05)), prior_weights = 1/12), + prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1, 1), steps = c(0.025, 0.05)), prior_weights = 1/12), + prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1, 1), steps = c(0.05, 0.5)), prior_weights = 1/12), + prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1, 1, 1), steps = c(0.025, 0.05, 0.5)), prior_weights = 1/12), + prior_PET(distribution = "Cauchy", parameters = list(0,1), truncation = list(0, Inf), prior_weights = 1/4), + prior_PEESE(distribution = "Cauchy", parameters = list(0,5), truncation = list(0, Inf), prior_weights = 1/4) + ), + priors_effect_null = prior(distribution = "point", parameters = list(location = 0)), + priors_heterogeneity_null = prior(distribution = "point", parameters = list(location = 0)), + priors_bias_null = prior_none(), + priors_hierarchical = prior("beta", parameters = list(alpha = 1, beta = 1)), + priors_hierarchical_null = NULL, + + prior_covariates = prior("normal", parameters = list(mean = 0, sd = 0.25)), + prior_covariates_null = prior("spike", parameters = list(location = 0)), + prior_factors = prior_factor("mnormal", parameters = list(mean = 0, sd = 0.25), contrast = "meandif"), + prior_factors_null = prior("spike", parameters = list(location = 0)), + models = FALSE, silent = FALSE, ...){ + + + # use 'do_not_fit' with RoBMA.reg + object <- RoBMA.reg( + ### passed arguments + formula = formula, data = data, test_predictors = test_predictors, study_names = study_names, study_ids = study_ids, + transformation = transformation, + prior_scale = prior_scale, + standardize_predictors = standardize_predictors, + effect_direction = effect_direction, + + priors = priors, + model_type = model_type, + + priors_effect = priors_effect, + priors_heterogeneity = priors_heterogeneity, + priors_bias = priors_bias, + priors_effect_null = priors_effect_null, + priors_heterogeneity_null = priors_heterogeneity_null, + priors_bias_null = priors_bias_null, + priors_hierarchical = priors_hierarchical, + priors_hierarchical_null = priors_hierarchical_null, + + prior_covariates = prior_covariates, + prior_covariates_null = prior_covariates_null, + prior_factors = prior_factors, + prior_factors_null = prior_factors_null, + + do_not_fit = TRUE, + + ### fitting settings defaults + # MCMC fitting settings + chains = 3, sample = 5000, burnin = 2000, adapt = 500, thin = 1, parallel = FALSE, + autofit = TRUE, autofit_control = set_autofit_control(), convergence_checks = set_convergence_checks(), + + # additional settings + save = "all", seed = NULL, silent = TRUE) + + + ### Components summary + effect <- sapply(object$models, function(model)!.is_component_null(model[["priors"]], "effect")) + heterogeneity <- sapply(object$models, function(model)!.is_component_null(model[["priors"]], "heterogeneity")) + bias <- sapply(object$models, function(model)!.is_component_null(model[["priors"]], "bias")) + + # obtain the parameter types + weightfunctions <- sapply(object$models, function(model)any(sapply(model[["priors"]], is.prior.weightfunction))) + PET <- sapply(object$models, function(model)any(sapply(model[["priors"]], is.prior.PET))) + PEESE <- sapply(object$models, function(model)any(sapply(model[["priors"]], is.prior.PEESE))) + + # number of model types + n_models <- c( + mu = sum(effect), + tau = sum(heterogeneity), + omega = sum(bias) + ) + + # extract model weights + prior_weights <- sapply(object$models, function(m) m$prior_weights) + # standardize model weights + prior_weights <- prior_weights / sum(prior_weights) + # conditional model weights + models_prior <- c( + mu <- sum(prior_weights[effect]), + tau <- sum(prior_weights[heterogeneity]), + omega <- sum(prior_weights[bias]) + ) + + # create overview table + components.tab <- data.frame( + "models" = n_models, + "prior_prob" = models_prior + ) + rownames(components.tab) <- c("Effect", "Heterogeneity", "Bias") + + class(components.tab) <- c("BayesTools_table", "BayesTools_ensemble_inference", class(components.tab)) + attr(components.tab, "type") <- c("n_models", "prior_prob") + attr(components.tab, "rownames") <- TRUE + attr(components.tab, "n_models") <- length(object$models) + attr(components.tab, "title") <- "Components summary:" + attr(components.tab, "footnotes") <- NULL + attr(components.tab, "warnings") <- NULL + + + ### Meta-regression components summary + model_predictors <- lapply(object$models, function(model) model[["terms"]]) + model_predictors_test <- lapply(object$models, function(model) model[["terms_test"]]) + + predictors <- object$add_info[["predictors"]] + predictors_test <- object$add_info[["predictors_test"]] + + # define inference options + components_predictors <- NULL + parameters_predictors <- "mu_intercept" + components_predictors_null <- list() + parameters_predictors_null <- list("mu_intercept" = !effect) + + components_predictors_distributions <- NULL + components_predictors_distributions_null <- list() + + # predictors + for(i in seq_along(predictors_test)){ + components_predictors <- c(components_predictors, .BayesTools_parameter_name(predictors_test[i])) + components_predictors_null[[.BayesTools_parameter_name(predictors_test[i])]] <- + sapply(model_predictors_test, function(x) if(length(x) == 0) TRUE else !(predictors_test[i] %in% x)) + } + + for(i in seq_along(predictors)){ + parameters_predictors <- c(parameters_predictors, .BayesTools_parameter_name(predictors[i])) + parameters_predictors_null[[.BayesTools_parameter_name(predictors[i])]] <- + sapply(model_predictors_test, function(x) if(length(x) == 0) TRUE else !(predictors[i] %in% x)) + } + + # create overview table + if(length(components_predictors_null) > 0){ + + components_predictors.tab <- data.frame( + "models" = sapply(components_predictors_null, sum), + "prior_prob" = sapply(seq_along(components_predictors_null), function(i) sum(prior_weights[components_predictors_null[[i]]])) + ) + rownames(components_predictors.tab) <- .output_parameter_names(components_predictors) + + class(components_predictors.tab) <- c("BayesTools_table", "BayesTools_ensemble_inference", class(components_predictors.tab)) + attr(components_predictors.tab, "type") <- c("n_models", "prior_prob") + attr(components_predictors.tab, "rownames") <- TRUE + attr(components_predictors.tab, "n_models") <- length(object$models) + attr(components_predictors.tab, "title") <- "Meta-regression components summary:" + attr(components_predictors.tab, "footnotes") <- NULL + attr(components_predictors.tab, "warnings") <- NULL + + }else{ + + components_predictors.tab <- BayesTools::ensemble_inference_empty_table(title = "Meta-regression components summary:") + components_predictors.tab <- BayesTools::remove_column(components_predictors.tab, 4) + components_predictors.tab <- BayesTools::remove_column(components_predictors.tab, 3) + + } + + + ### store summaries in the object + object$components <- components.tab + object$components_predictors <- components_predictors.tab + + + ### model details + if(models){ + priors_heterogeneity <- sapply(1:length(object$models), function(i)print(object$models[[i]]$priors$tau, silent = TRUE)) + priors_bias <- sapply(1:length(object$models), function(i){ + if(weightfunctions[i]){ + print(object$models[[i]]$priors$omega, silent = TRUE) + }else if(PET[i]){ + print(object$models[[i]]$priors$PET, silent = TRUE) + }else if(PEESE[i]){ + print(object$models[[i]]$priors$PEESE, silent = TRUE) + }else{ + "" + } + }) + prior_weights <- sapply(1:length(object$models), function(i)object$models[[i]]$prior_weights) + prior_prob <- prior_weights / sum(prior_weights) + + summary <- data.frame("Model" = 1:length(object$models)) + for(p in seq_along(parameters_predictors)){ + summary <- cbind(summary, sapply(1:length(object$models), function(i)print(object$models[[i]]$priors$terms[[.output_parameter_names(parameters_predictors[p])]], silent = TRUE))) + colnames(summary)[p+1] <- .output_parameter_names(parameters_predictors[i]) + } + summary <- cbind( + summary, + "Heterogeneity" = priors_heterogeneity, + "Bias" = priors_bias, + "prior_prob" = prior_prob + ) + class(summary) <- c("BayesTools_table", "BayesTools_ensemble_summary", class(summary)) + attr(summary, "type") <- c("integer", rep("prior", 2 + length(parameters_predictors)), "prior_prob") + attr(summary, "rownames") <- FALSE + attr(summary, "title") <- "Models overview:" + attr(summary, "footnotes") <- NULL + attr(summary, "warnings") <- NULL + + object$summary <- summary + } + + + if(!silent){ + cat("Robust Bayesian meta-regression (set-up)\n") + print(components.tab, quote = FALSE, right = TRUE) + + cat("\n") + print(components_predictors.tab, quote = FALSE, right = TRUE) + + if(models){ + cat("\n") + print(summary, quote = FALSE, right = TRUE) + } + } + + return(invisible(object)) +} #' @title Control MCMC fitting process #' @@ -209,6 +453,7 @@ set_convergence_checks <- function(max_Rhat = 1.05, min_ESS = 500, max_error = } + .update_fit_control <- function(old_fit_control, chains, adapt, burnin, sample, thin, autofit, parallel, cores, silent, seed){ if(is.null(chains)){ @@ -333,31 +578,40 @@ set_convergence_checks <- function(max_Rhat = 1.05, min_ESS = 500, max_error = convergence_checks[["balance_probability"]] <- balance_probability return(convergence_checks) } -.check_and_list_add_info <- function(model_type, prior_scale, output_scale, effect_measure, effect_direction, seed, save, warnings, errors){ +.check_and_list_add_info <- function(model_type, predictors = NULL, predictors_test = NULL, prior_scale, output_scale, effect_measure, effect_direction, standardize_predictors = NULL, seed, save, warnings, errors){ BayesTools::check_char(effect_direction, "effect_direction", allow_values = c("positive", "negative")) BayesTools::check_real(seed, "seed", allow_NULL = TRUE) BayesTools::check_char(save, "save", allow_values = c("min", "all")) model_type <- .check_and_set_model_type(model_type, prior_scale) + BayesTools::check_char(predictors, "predictors", allow_NULL = TRUE, check_length = 0) + BayesTools::check_char(predictors_test, "predictors_test", allow_NULL = TRUE, check_length = 0) + BayesTools::check_bool(standardize_predictors, "standardize_predictors", allow_NULL = TRUE) if((prior_scale == "y" & effect_measure != "y") | (prior_scale != "y" & effect_measure == "y")) stop("Prior / effect size transformations are not available for unstandardized effect sizes.", call. = FALSE) - return(list( - model_type = model_type, - prior_scale = prior_scale, - output_scale = output_scale, - effect_measure = effect_measure, - effect_direction = effect_direction, - seed = seed, - save = save, - warnings = warnings, - errors = errors - )) + add_info <- list( + model_type = model_type, + predictors = predictors, + predictors_test = predictors_test, + prior_scale = prior_scale, + output_scale = output_scale, + effect_measure = effect_measure, + effect_direction = effect_direction, + standardize_predictors = standardize_predictors, + seed = seed, + save = save, + warnings = warnings, + errors = errors, + version = utils::packageVersion("RoBMA") + ) + + return(add_info) } .check_and_set_model_type <- function(model_type, prior_scale){ - if(!is.null(model_type)){ + if(length(model_type) != 0){ model_type <- tolower(model_type) BayesTools::check_char(model_type, "model_type", allow_values = c("psma", "2w", "pp")) if(prior_scale != "d") @@ -370,12 +624,18 @@ set_convergence_checks <- function(max_Rhat = 1.05, min_ESS = 500, max_error = warnings <- NULL + if(!is.null(object$data[["outcome"]])){ + data <- object$data[["outcome"]] + }else{ + data <- object[["data"]] + } + # check whether majority of effect sizes are in expected direction. throw warning if not. - if(any(sapply(object$priors$omega, function(p)p$distribution) == "one.sided") | - any(grepl("PET", sapply(object$priors$omega, function(p)p$distribution))) | - any(grepl("PEESE", sapply(object$priors$omega, function(p)p$distribution)))){ - if(stats::median(object$data$y) > 0 & object$control$effect_direction == "negative" | - stats::median(object$data$y) < 0 & object$control$effect_direction == "positive"){ + if(any(sapply(object$priors[["bias"]], function(p) p[["distribution"]]) == "one.sided") | + any(grepl("PET", sapply(object$priors[["bias"]], function(p) p[["distribution"]]))) | + any(grepl("PEESE", sapply(object$priors[["bias"]], function(p) p[["distribution"]])))){ + if(stats::median(data[["y"]]) > 0 & object$add_info[["effect_direction"]] == "negative" | + stats::median(data[["y"]]) < 0 & object$add_info[["effect_direction"]] == "positive"){ warnings <- "The majority of effect sizes is in the oposite direction than expected. The direction of effect sizes is important for the one-sided weight functions. Please, check the 'effect_direction' argument in 'RoBMA' fitting function." } } @@ -384,14 +644,31 @@ set_convergence_checks <- function(max_Rhat = 1.05, min_ESS = 500, max_error = return(warnings) } +.check_predictors_scaling <- function(object){ + warnings <- NULL + predictors <- object[["data"]][["predictors"]] + + # check that all continuous predictors are centered and scale + for(i in seq_along(predictors)){ + + if(attr(predictors, "variables_info")[[i]][["type"]] == "continuous"){ + if(!(isTRUE(all.equal(mean(predictors[[i]]), 0)) && isTRUE(all.equal(stats::sd(predictors[[i]]), 1)))){ + warnings <- c(warnings, paste0("The continuous predictor '",names(predictors[i]),"' is not standardized. Be cafefull about the specified prior distribution and hypothesis test.")) + } + } + + } + + return(warnings) +} # some functions for the JASP implementation .RoBMA_collect_dots <- function(...){ dots <- list(...) - known_dots <- c("is_JASP", "weighted") + known_dots <- c("is_JASP", "weighted", "do_not_fit", "weighted_type") if(any(!names(dots) %in% known_dots)) stop(paste0("Uknown arguments to 'RoBMA': ", paste("'", names(dots)[!names(dots) %in% known_dots], "'", collapse = ", "), "."), call. = FALSE) @@ -405,6 +682,21 @@ set_convergence_checks <- function(max_Rhat = 1.05, min_ESS = 500, max_error = dots[["weighted"]] <- FALSE }else{ BayesTools::check_bool(dots[["weighted"]], "weighted") + + # select weight type + if(is.null(dots[["weighted_type"]])){ + attr(dots[["weighted"]], "type") <- "inverse" + }else{ + BayesTools::check_char(dots[["weighted_type"]], "weighted", allow_values = c("inverse", "inverse_sqrt", "custom")) + attr(dots[["weighted"]], "type") <- dots[["weighted_type"]] + dots[["weighted_type"]] <- NULL + } + } + + if(is.null(dots[["do_not_fit"]])){ + dots[["do_not_fit"]] <- FALSE + }else{ + BayesTools::check_bool(dots[["do_not_fit"]], "do_not_fit") } return(dots) diff --git a/R/check-priors-and-models.R b/R/check-priors-and-models.R index 438495b0..cb18b459 100644 --- a/R/check-priors-and-models.R +++ b/R/check-priors-and-models.R @@ -1,5 +1,8 @@ ### functions for creating model objects -.check_and_list_priors <- function(model_type, priors_effect_null, priors_effect, priors_heterogeneity_null, priors_heterogeneity, priors_bias_null, priors_bias, priors_rho_null, priors_rho, prior_scale){ +.check_and_list_priors <- function(model_type, priors_effect_null, priors_effect, priors_heterogeneity_null, priors_heterogeneity, priors_bias_null, priors_bias, priors_hierarchical_null, priors_hierarchical, prior_scale){ + + # format the model-type (in RoBMA.reg, the add_info check is run only after .check_and_list_priors) + model_type <- .check_and_set_model_type(model_type, prior_scale) if(!is.null(model_type) & length(model_type == 1)){ # precanned models @@ -16,11 +19,11 @@ prior_PET(distribution = "Cauchy", parameters = list(0,1), truncation = list(0, Inf), prior_weights = 1/4), prior_PEESE(distribution = "Cauchy", parameters = list(0,5), truncation = list(0, Inf), prior_weights = 1/4) ) - priors_rho <- NULL + priors_hierarchical <- NULL priors_effect_null <- prior(distribution = "point", parameters = list(location = 0)) priors_heterogeneity_null <- prior(distribution = "point", parameters = list(location = 0)) priors_bias_null <- prior_none() - priors_rho_null <- NULL + priors_hierarchical_null <- NULL }else if(model_type == "pp"){ priors_effect <- prior(distribution = "normal", parameters = list(mean = 0, sd = 1)) priors_heterogeneity <- prior(distribution = "invgamma", parameters = list(shape = 1, scale = .15)) @@ -28,11 +31,11 @@ prior_PET(distribution = "Cauchy", parameters = list(0,1), truncation = list(0, Inf), prior_weights = 1/2), prior_PEESE(distribution = "Cauchy", parameters = list(0,5), truncation = list(0, Inf), prior_weights = 1/2) ) - priors_rho <- NULL + priors_hierarchical <- NULL priors_effect_null <- prior(distribution = "point", parameters = list(location = 0)) priors_heterogeneity_null <- prior(distribution = "point", parameters = list(location = 0)) priors_bias_null <- prior_none() - priors_rho_null <- NULL + priors_hierarchical_null <- NULL }else if(model_type == "2w"){ priors_effect <- prior(distribution = "normal", parameters = list(mean = 0, sd = 1)) priors_heterogeneity <- prior(distribution = "invgamma", parameters = list(shape = 1, scale = .15)) @@ -40,11 +43,11 @@ prior_weightfunction(distribution = "two.sided", parameters = list(alpha = c(1, 1), steps = c(0.05)), prior_weights = 1/2), prior_weightfunction(distribution = "two.sided", parameters = list(alpha = c(1, 1, 1), steps = c(0.05, 0.10)), prior_weights = 1/2) ) - priors_rho <- NULL + priors_hierarchical <- NULL priors_effect_null <- prior(distribution = "point", parameters = list(location = 0)) priors_heterogeneity_null <- prior(distribution = "point", parameters = list(location = 0)) priors_bias_null <- prior_none() - priors_rho_null <- NULL + priors_hierarchical_null <- NULL }else{ stop("Unknown 'model_type'.") } @@ -54,14 +57,137 @@ priors$effect <- .check_and_list_component_priors(priors_effect_null, priors_effect, "effect") priors$heterogeneity <- .check_and_list_component_priors(priors_heterogeneity_null, priors_heterogeneity, "heterogeneity") priors$bias <- .check_and_list_component_priors(priors_bias_null, priors_bias, "bias") - priors$rho <- .check_and_list_component_priors(priors_rho_null, priors_rho, "rho") + priors$hierarchical <- .check_and_list_component_priors(priors_hierarchical_null, priors_hierarchical, "hierarchical") return(priors) } +.check_and_list_priors.reg <- function(priors, data, model_type, test_predictors, prior_scale, + priors_effect_null, priors_effect, + priors_heterogeneity_null, priors_heterogeneity, + priors_bias_null, priors_bias, + priors_hierarchical_null, priors_hierarchical, + prior_covariates_null, prior_covariates, + prior_factors_null, prior_factors){ + + priors_main <- .check_and_list_priors( + model_type = model_type, + priors_effect_null = priors_effect_null, + priors_effect = priors_effect, + priors_heterogeneity_null = priors_heterogeneity_null, + priors_heterogeneity = priors_heterogeneity, + priors_bias_null = priors_bias_null, + priors_bias = priors_bias, + priors_hierarchical_null = priors_hierarchical_null, + priors_hierarchical = priors_hierarchical, + prior_scale = prior_scale + ) + + predictors <- attr(data[["predictors"]],"terms") + predictors_type <- attr(data[["predictors"]],"terms_type") + + # check the input + if(!is.prior.simple(prior_covariates_null) || is.prior.factor(prior_covariates_null)) + stop("The default prior for covariates (null) is not a valid prior distribution.", call. = FALSE) + if(!is.prior.simple(prior_covariates) || is.prior.factor(prior_covariates)) + stop("The default prior for covariates is not a valid prior distribution.", call. = FALSE) + if(!is.prior.factor(prior_factors_null) & !is.prior.point(prior_factors_null)) + stop("The default prior for factors (null) is not a valid prior distribution.", call. = FALSE) + if(!is.prior.factor(prior_factors) & !is.prior.point(prior_factors)) + stop("The default prior for factors is not a valid prior distribution.", call. = FALSE) + + # check for reserved words + if(any(names(priors) %in% .reserved_words())) + stop(paste0("The following prior names are internally reserved keywords and cannot be used: ", + paste0(" '", names(priors)[names(priors) %in% .reserved_words()], "' ", collapse = ", ")), call. = FALSE) + + + # completely the prior distribution specification + if(is.null(priors)){ + + # standardize possible inputs for no predictors testing + if(is.null(test_predictors) || isFALSE(test_predictors) || length(test_predictors) == 0){ + # no testing + test_predictors <- character() + }else if(isTRUE(test_predictors)){ + # tests all predictors with default priors + test_predictors <- predictors + }else{ + BayesTools::check_char(test_predictors, "test_predictors", check_length = FALSE, allow_values = predictors) + } + + # update the predictors specification + priors <- list() + + to_test <- predictors[predictors %in% test_predictors] + no_test <- predictors[!predictors %in% test_predictors] + + for(i in seq_along(to_test)){ + priors[[to_test[i]]] <- list( + "null" = if(predictors_type[to_test[i]] == "factor") prior_factors_null else prior_covariates_null, + "alt" = if(predictors_type[to_test[i]] == "factor") prior_factors else prior_covariates + ) + } + for(i in seq_along(no_test)){ + priors[[no_test[i]]] <- list( + "alt" = if(predictors_type[no_test[i]] == "factor") prior_factors else prior_covariates + ) + } + + }else{ + + priors_by_user <- priors + priors <- list() + test_predictors <- character() + + # reformat and updated the user specified priors + for(i in seq_along(predictors)){ + + p <- predictors[i] + + if(is.null(priors_by_user[[p]])){ + # no user specified priors -- default estimation only + priors[[p]] <- list( + "alt" = if(predictors_type[p] == "factor") prior_factors else prior_covariates + ) + + }else if(is.prior(priors_by_user[[p]])){ + # a single, unmanned prior distribution -- assume it's alternative and perform default test + test_predictors <- c(test_predictors, p) + priors[[p]] <- list( + "null" = if(predictors_type[p] == "factor") prior_factors_null else prior_covariates_null, + "alt" = priors_by_user[[p]] + ) + + }else if(is.list(priors_by_user[[p]]) && length(priors_by_user[[p]]) == 1 && length(names(priors_by_user[[p]])) == 1 && + names(priors_by_user[[p]]) %in% c("null", "alt") && is.prior(priors_by_user[[p]][[1]])){ + # a single, named prior distribution -- do not add any additional prior + priors[[p]] <- priors_by_user[[p]] + + }else if(is.list(priors_by_user[[p]]) && length(priors_by_user[[p]]) == 2 && length(names(priors_by_user[[p]])) == 2 && + all(names(priors_by_user[[p]]) %in% c("null", "alt")) && all(sapply(priors_by_user[[p]], is.prior))){ + # both prior distributions are specified by the user + test_predictors <- c(test_predictors, p) + priors[[p]] <- priors_by_user[[p]] + + }else{ + stop(paste0("The prior distribution for '",p,"' is specified incorrectly.")) + } + } + + } + + + # add attributes + priors_main$terms <- priors + attr(priors_main, "terms") <- predictors + attr(priors_main, "terms_test") <- if(length(test_predictors) == 0) NULL else test_predictors + + return(priors_main) +} .check_and_list_component_priors <- function(priors_null, priors_alt, component){ # check that at least one prior is specified (either null or alternative) - if(component != "rho" && (is.null(priors_null) & is.null(priors_alt))) + if(component != "hierarchical" && (is.null(priors_null) & is.null(priors_alt))) stop(paste0("At least one prior needs to be specified for the ", component," parameter (either null or alternative).")) # create an empty list if user didn't specified priors @@ -101,7 +227,7 @@ # check that the passed priors are supported for the component (and replace none placeholders) - if(component %in% c("effect", "heterogeneity")){ + if(component %in% c("effect", "heterogeneity", "hierarchical")){ for(p in seq_along(priors)){ @@ -126,6 +252,24 @@ priors[[p]]$truncation[["lower"]] <- 0 warning(paste0("The range of a prior distribution for ", component, " component cannot be negative. The lower truncation point was set to zero."), immediate. = TRUE) } + }else if(component == "hierarchical"){ + if(priors[[p]][["distribution"]] == "point" && abs(priors[[p]]$parameters[["location"]]) > 1){ + stop("The location of a point prior distribution for the hierarchical correlation must be within [-1, 1] interval.") + }else if(priors[[p]][["distribution"]] == "uniform" && (priors[[p]]$parameters[["a"]] < -1 | priors[[p]]$parameters[["b"]] > 1)){ + stop("The uniform prior distribution for the hierarchical correlation cannot be defined outsied of the [-1, 1] interval.") + } + + if(priors[[p]]$truncation[["lower"]] < -1){ + priors[[p]]$truncation[["lower"]] <- -1 + warning("The range of a prior distribution for the hierarchical correlation cannot be lower than -1. The lower truncation point was set to -1.", immediate. = TRUE) + } + if(priors[[p]]$truncation[["upper"]] > 1){ + priors[[p]]$truncation[["lower"]] <- 1 + warning("The range of a prior distribution for the hierarchical correlation cannot be higher than 1. The upper truncation point was set to 1.", immediate. = TRUE) + } + if(priors[[p]]$truncation[["lower"]] > priors[[p]]$truncation[["upper"]]){ + stop("Invalid lower and upper truncation points for the hierarchical correlation.", immediate. = TRUE) + } } } @@ -139,30 +283,23 @@ if(!(is.prior.PET(priors[[p]]) | is.prior.PEESE(priors[[p]]) | is.prior.weightfunction(priors[[p]]) | is.prior.none(priors[[p]]))) stop(paste0("'", print(priors[[p]], silent = TRUE),"' prior distribution is not supported for the bias component.")) } - }else if(component == "rho"){ - - for(p in seq_along(priors)){ - - # check for allowed priors - if(!(priors[[p]][["distribution"]] == "beta")) - stop(paste0("'", print(priors[[p]], silent = TRUE),"' prior distribution is not supported for the rho component.")) - } } return(priors) } -.make_models <- function(priors, multivariate, weighted){ + +.make_models <- function(priors, multivariate, weighted){ # create models according to the set priors models <- NULL for(effect in priors[["effect"]]){ for(heterogeneity in priors[["heterogeneity"]]){ for(bias in priors[["bias"]]){ - if(!is.null(priors[["rho"]]) && multivariate){ - for(rho in priors[["rho"]]){ + if(!is.null(priors[["hierarchical"]]) && multivariate){ + for(hierarchical in priors[["hierarchical"]]){ models <- c( models, - list(.make_model(effect, heterogeneity, bias, rho, effect[["prior_weights"]] * heterogeneity[["prior_weights"]] * bias[["prior_weights"]] * rho[["prior_weights"]], multivariate, weighted)) + list(.make_model(effect, heterogeneity, bias, hierarchical, effect[["prior_weights"]] * heterogeneity[["prior_weights"]] * bias[["prior_weights"]] * hierarchical[["prior_weights"]], multivariate, weighted)) ) } }else{ @@ -177,7 +314,7 @@ return(models) } -.make_model <- function(prior_effect, prior_heterogeneity, prior_bias, prior_rho, prior_weights, multivariate, weighted){ +.make_model <- function(prior_effect, prior_heterogeneity, prior_bias, prior_hierarchical, prior_weights, multivariate, weighted){ priors <- list() @@ -191,19 +328,84 @@ priors$omega <- prior_bias } # add 3 level structure only if there is heterogeneity - if(!(prior_heterogeneity[["distribution"]] == "point" && prior_heterogeneity$parameters[["location"]] == 0) && !is.null(prior_rho)){ - priors$rho <- prior_rho + if(!(prior_heterogeneity[["distribution"]] == "point" && prior_heterogeneity$parameters[["location"]] == 0) && !is.null(prior_hierarchical)){ + priors$rho <- prior_hierarchical } model <- list( - priors = priors, + priors = priors, prior_weights = prior_weights, prior_weights_set = prior_weights ) class(model) <- "RoBMA.model" - attr(model, "multivariate") <- multivariate && !is.null(priors$rho) - attr(model, "weighted") <- weighted + attr(model, "multivariate") <- multivariate && !is.null(priors$rho) + attr(model, "weighted") <- weighted + attr(model, "weighted_type") <- attr(weighted, "type") + + return(model) +} + +.make_models.reg <- function(priors, multivariate, weighted, do_not_fit){ + + models_base <- .make_models(priors = priors, multivariate = multivariate, weighted = weighted) + + ### create grid of the models + grid <- list( + model = seq_along(models_base) + ) + + # add prior terms + terms <- attr(priors, "terms") + for(i in seq_along(attr(priors, "terms"))){ + grid[[terms[i]]] <- names(priors[["terms"]][[terms[i]]]) + } + + grid <- do.call(expand.grid, grid) + + if(nrow(grid) > 50 && !do_not_fit) + warning(sprintf("You are about to estimate %i models based on the model formula and prior specification.", nrow(grid)), immediate. = TRUE, call. = FALSE) + + ### create empty models objects for fitting + models <- lapply(1:nrow(grid), function(i) .make_model.reg(models_base[[grid[i,1]]], grid[i,-1,drop=FALSE], priors)) + + return(models) +} +.make_model.reg <- function(model_base, grid_row, priors, multivariate, weighted){ + + model_priors <- model_base[["priors"]] + prior_weights <- model_base[["prior_weights"]] + terms <- attr(priors, "terms") + + ### add priors for the terms + model_priors[["terms"]] <- list() + + # rename mu to the intercept + model_priors[["terms"]][["intercept"]] <- model_priors[["mu"]] + model_priors[["mu"]] <- NULL + + terms_test <- NULL + for(i in seq_along(terms)){ + model_priors[["terms"]][[terms[i]]] <- priors[["terms"]][[terms[i]]][[grid_row[,terms[i]]]] + prior_weights <- prior_weights * priors[["terms"]][[terms[i]]][[grid_row[,terms[i]]]]$prior_weights + model_priors[["terms"]][[terms[i]]][["is_null"]] <- grid_row[,terms[i]] == "null" + if(grid_row[,terms[i]] == "alt"){ + terms_test <- c(terms_test, terms[i]) + } + } + + model <- list( + priors = model_priors, + terms = terms, + terms_test = terms_test, + prior_weights = prior_weights, + prior_weights_set = prior_weights + ) + + class(model) <- "RoBMA.reg.model" + attr(model, "multivariate") <- attr(model_base, "multivariate") + attr(model, "weighted") <- attr(model_base, "weighted") + attr(model, "weighted_type") <- attr(model_base, "weighted_type") return(model) } diff --git a/R/data.R b/R/datasets.R similarity index 51% rename from R/data.R rename to R/datasets.R index e2567332..f47646d8 100644 --- a/R/data.R +++ b/R/datasets.R @@ -51,3 +51,42 @@ #' @references #' \insertAllCited{} "Poulsen2006" + +#' @title 881 estimates from 69 studies of a relationship between employment and +#' educational outcomes collected by \insertCite{kroupova2021student;textual}{RoBMA} +#' +#' @description The data set contains partial correlation coefficients, standard errors, +#' study labels, samples sizes, type of the educational outcome, intensity of the +#' employment, gender of the student population, study location, study design, whether +#' the study controlled for endogenity, and whether the study controlled for motivation. +#' The original data set including additional variables and the publication can be found +#' at http://meta-analysis.cz/students. +#' (Note that some standard errors and employment intensities are missing.) +#' +#' @format A data.frame with 11 columns and 881 observations. +#' +#' @return a data.frame. +#' +#' @references +#' \insertAllCited{} +"Kroupova2021" + +#' @title 18 studies of a relationship between acculturation mismatch and +#' intergenerational cultural conflict collected by +#' \insertCite{lui2015intergenerational;textual}{RoBMA} +#' +#' @description The data set contains correlation coefficients r, +#' sample sizes n, and labels for each study assessing the +#' relationship between acculturation mismatch (that is the result of the contrast +#' between the collectivist cultures of Asian and Latin immigrant groups +#' and the individualist culture in the United States) and intergenerational cultural +#' conflict \insertCite{lui2015intergenerational}{RoBMA} which was used as an +#' example in \insertCite{bartos2020adjusting;textual}{RoBMA}. +#' +#' @format A data.frame with 3 columns and 18 observations. +#' +#' @return a data.frame. +#' +#' @references +#' \insertAllCited{} +"Lui2015" diff --git a/R/diagnostics.R b/R/diagnostics.R index 798b2069..05d9917a 100644 --- a/R/diagnostics.R +++ b/R/diagnostics.R @@ -60,9 +60,16 @@ #' of class 'ggplot2' if \code{plot_type = "ggplot2"}. #' #' @seealso [RoBMA()], [summary.RoBMA()] -#' @export +#' @name diagnostics +#' @aliases diagnostics_autocorrelation diagnostics_trace diagnostics_density +#' @export diagnostics +#' @export diagnostics_density +#' @export diagnostics_autocorrelation +#' @export diagnostics_trace + +#' @rdname diagnostics diagnostics <- function(fit, parameter, type, plot_type = "base", show_models = NULL, - lags = 30, title = is.null(show_models) | length(show_models) > 1, ...){ + lags = 30, title = is.null(show_models) | length(show_models) > 1, ...){ # check settings if(!is.RoBMA(fit)) @@ -75,11 +82,11 @@ diagnostics <- function(fit, parameter, type, plot_type = "base", show_models = # deal with bad type names if(substr(type, 1, 1) == "c"){ - type <- "chains" + type <- "trace" }else if(substr(type, 1, 1) == "t"){ - type <- "chains" # for trace + type <- "trace" # for trace }else if(substr(type, 1, 1) == "d"){ - type <- "densities" + type <- "density" }else if(substr(type, 1, 1) == "a"){ type <- "autocorrelation" }else{ @@ -88,11 +95,23 @@ diagnostics <- function(fit, parameter, type, plot_type = "base", show_models = # deal with bad parameter names for PET-PEESE, weightfunction if(tolower(gsub("-", "", gsub("_", "", gsub(".", "", parameter, fixed = TRUE),fixed = TRUE), fixed = TRUE)) %in% c("weightfunction", "weigthfunction", "omega")){ - parameter <- "omega" - }else if(parameter %in% c("mu", "tau", "PET", "PEESE")){ - parameters <- parameter + parameter <- "omega" + parameter_samples <- "omega" + }else if(parameter %in% c("tau", "rho", "PET", "PEESE")){ + parameter <- parameter + parameter_samples <- parameter + }else if(parameter == "mu"){ + parameter <- parameter + parameter_samples <- if(is.RoBMA.reg(fit)) "mu_intercept" else "mu" + }else if(is.RoBMA.reg(fit) && parameter %in% fit$add_info[["predictors"]]){ + parameter <- parameter + parameter_samples <- .BayesTools_parameter_name(parameter) }else{ - stop("The passed parameter is not supported for plotting. See '?plot.RoBMA' for more details.") + if(is.RoBMA.reg(fit)){ + stop(paste0("The passed parameter does not correspond to any of main model parameter ('mu', 'tau', 'omega', 'PET', 'PEESE') or any of the specified predictors: ", paste0("'", fit$add_info[["predictors"]], "'", collapse = ", "), ". See '?plot.RoBMA' for more details.")) + }else{ + stop(paste0("The passed parameter does not correspond to any of main model parameter ('mu', 'tau', 'omega', 'PET', 'PEESE'). See '?plot.RoBMA' for more details.")) + } } # omit the first figure for publication bias weights (it is constants for all interesting weightfunctions) @@ -115,338 +134,83 @@ diagnostics <- function(fit, parameter, type, plot_type = "base", show_models = if(plot_type == "base" & (length(models_ind) > 1 | parameter == "omega")) message("Multiple plots will be produced. See '?layout' for help with setting multiple plots.") + dots <- .set_dots_diagnostics(..., type = type, chains = fit[["fit_control"]][["chains"]]) + plots <- list() - for(m in models_ind){ + for(i in models_ind){ - temp_out <- NULL - # add ability to transform the estimates with 'par_transform' - temp_data <- .diagnostics_plot_data(fit, m, parameter, NULL) + model_parameters <- c(names(attr(fit$models[[i]][["fit"]], "prior_list"))) - # deal with no parameter in model - if(is.null(temp_data)){ - if(length(models_ind) == 1){ - message("Selected model does not containt the parameter of interest.") - return(invisible(NULL)) - }else{ - out[m] <- temp_out - next - } - } + if(!parameter_samples %in% model_parameters){ - # make the plots - par_ind <- 1:length(temp_data) - if(!is.null(show_figures)){ - par_ind <- par_ind[show_figures] - } + plots[[i]] <- NULL - for(i in par_ind){ - if(type == "chains"){ - temp_out <- c(temp_out, list(.diagnostics_plot_trace(temp_data[[i]], plot_type, if(title) m else NULL, ...))) - }else if(type == "densities"){ - temp_out <- c(temp_out, list(.diagnostics_plot_density(temp_data[[i]], plot_type, if(title) m else NULL, parameter, ...))) - }else if(type == "autocorrelation"){ - temp_out <- c(temp_out, list(.diagnostics_plot_ac(temp_data[[i]], plot_type, if(title) m else NULL, lags, ...))) - } - } + }else if(inherits(fit$models[[i]][["fit"]], "null_model")){ - if(length(temp_out) == 1){ - temp_out <- temp_out[[1]] - } + plots[[i]] <- NULL - if(length(models_ind) == 1){ - out <- temp_out }else{ - out[m] <- list(temp_out) - } - } - - # return the plots - if(plot_type == "base"){ - return(invisible(NULL)) - }else if(plot_type == "ggplot"){ - return(out) - } -} - - -.diagnostics_plot_trace <- function(plot_data, plot_type, title, ...){ - - if(plot_type == "base"){ - - # save plotting settings - oldpar <- graphics::par(no.readonly = TRUE) - on.exit(graphics::par(mar = oldpar[["mar"]])) - # set up margins - if(length(list(...)) == 0){ - graphics::par(mar = c(4, 4, if(!is.null(title)) 3 else 1, 1)) - }else{ - graphics::par(list(...)) - } - - graphics::plot(NA, type = "n", xlim = range(plot_data$samp$iteration), ylim = range(plot_data$samp$value), - xlab = "", ylab = "", bty = "n", las = 1) - for(i in as.numeric(unique(plot_data$samp$chain))){ - graphics::lines(plot_data$samp$iteration[plot_data$samp$chain == i], plot_data$samp$value[plot_data$samp$chain == i], - col = .diagnostics_color(plot_data$nchains)[i]) - } - if(!is.null(title)){ - graphics::mtext(paste0("Model ",title), side = 3, line = 1, cex = 1.25) - } - graphics::mtext(plot_data$parameter, side = 2, line = 2.5, cex = 1.25) - - graph <- NULL - - }else if(plot_type == "ggplot"){ - - graph <- ggplot2::ggplot( - data = data.frame( - x = plot_data$samp$iteration, - y = plot_data$samp$value, - color = plot_data$samp$chain), - mapping = ggplot2::aes( - x = .data[["x"]], - y = .data[["y"]], - color = .data[["color"]])) + - ggplot2::geom_path() + - ggplot2::scale_color_manual(name = "chain", values = .diagnostics_color(plot_data$nchains)) - temp_x_range <- range(plot_data$samp$iteration) - temp_y_range <- range(plot_data$samp$value) - graph <- graph + ggplot2::scale_x_continuous( - name = "Iterations", - limits = temp_x_range, - breaks = pretty(temp_x_range, n = 3), - labels = pretty(temp_x_range, n = 3) - ) + - ggplot2::scale_y_continuous( - name = plot_data$parameter, - limits = temp_y_range, - breaks = pretty(temp_y_range), - labels = pretty(temp_y_range) - ) - if(!is.null(title)){ - graph <- graph + ggplot2::ggtitle(paste0("Model ",title)) - } - } - - return(graph) -} -.diagnostics_plot_density <- function(plot_data, plot_type, title, par, ...){ - - if(plot_type == "base"){ - - # save plotting settings - oldpar <- graphics::par(no.readonly = TRUE) - on.exit(graphics::par(mar = oldpar[["mar"]])) - - # set up margins - if(length(list(...)) == 0){ - graphics::par(mar = c(4, 4, if(!is.null(title)) 3 else 1, 1)) - }else{ - graphics::par(list(...)) - } - with_trunc <- list() - if(!is.infinite(plot_data$lower)){ - with_trunc$from <- plot_data$lower - } - if(!is.infinite(plot_data$upper)){ - with_trunc$to <- plot_data$upper - } - - - temp_den <- vector(mode = "list", length = length(unique(plot_data$samp$chain))) - for(i in as.numeric(unique(plot_data$samp$chain))){ - # deal with first weights if requested - if(all(plot_data$samp$value[plot_data$samp$chain == i] == 1) & par == "omega"){ - temp_den[[i]] <- NULL - }else{ - temp_den[[i]] <- do.call(stats::density, c(list(x = plot_data$samp$value[plot_data$samp$chain == i]), with_trunc)) - } - } - - graphics::plot( - NA, type = "n", - xlim = if(all(sapply(temp_den, is.null))) c(0, 1) else range(sapply(1:length(temp_den), function(i)temp_den[[i]]$x)), - ylim = if(all(sapply(temp_den, is.null))) c(0, 1) else c(0, max(sapply(1:length(temp_den), function(i)temp_den[[i]]$y))), - xlab = "", ylab = "", bty = "n", las = 1) - for(i in 1:length(temp_den)){ - if(is.null(temp_den[[i]]) & par == "omega"){ - graphics::arrows(x0 = 1, y0 = 0, y1 = 1, lwd = 2, lty = 1, col = .diagnostics_color(plot_data$nchains)[i]) - }else{ - graphics::lines(temp_den[[i]], col = .diagnostics_color(plot_data$nchains)[i]) - graphics::polygon( - x = c(if(!is.infinite(plot_data$lower)) plot_data$lower, temp_den[[i]]$x, if(!is.infinite(plot_data$upper)) plot_data$upper), - y = c(if(!is.infinite(plot_data$lower)) 0, temp_den[[i]]$y, if(!is.infinite(plot_data$upper)) 0), - border = .diagnostics_color(plot_data$nchains)[i], - col = scales::alpha(.diagnostics_color(plot_data$nchains)[i], alpha = .5)) + # get the parameter name + args <- dots + args$fit <- fit$models[[i]][["fit"]] + args$parameter <- parameter_samples + args$parameter_names <- if(parameter %in% c("mu", "tau")) .plot.RoBMA_par_names(parameter, fit, fit$add_info[["prior_scale"]])[[1]] + args$type <- type + args$plot_type <- plot_type + args$lags <- lags + args$transformations <- NULL + args$transform_factors <- TRUE + args$short_name <- FALSE + args$parameter_names <- FALSE + args$formula_prefix <- FALSE + + if(!is.null(title) && title){ + args$main <- paste0("Model ", i) } - } - if(!is.null(title)){ - graphics::mtext(paste0("Model ",title), side = 3, line = 1, cex = 1.25) - } - graphics::mtext(if(all(sapply(temp_den, is.null))) "Probability" else "Density", side = 2, line = 2.5, cex = 1.25) - graphics::mtext(plot_data$parameter, side = 1, line = 2.5, cex = 1.25) - - graph <- NULL - - }else if(plot_type == "ggplot"){ - graph <- ggplot2::ggplot( - data = data.frame( - x = plot_data$samp$value, - fill = plot_data$samp$chain), - mapping = ggplot2::aes( - x = .data[["x"]], - fill = .data[["fill"]])) + - ggplot2::geom_density(color = "black", alpha = .5) + - ggplot2::scale_fill_manual(name = "chain", values = .diagnostics_color(plot_data$nchains)) - temp_y_max <- max(ggplot2::ggplot_build(graph)$data[[1]]$density) - temp_x_range <- if(par == "omega") c(0, 1) else range(plot_data$samp$value) - graph <- graph + ggplot2::scale_y_continuous( - name = "Density", - limits = range(pretty(c(0, temp_y_max))), - breaks = pretty(c(0, temp_y_max)), - labels = pretty(c(0, temp_y_max)) - ) + - ggplot2::scale_x_continuous( - name = plot_data$parameter, - limits = range(pretty(temp_x_range)), - breaks = pretty(temp_x_range), - labels = pretty(temp_x_range) - ) - if(!is.null(title)){ - graph <- graph + ggplot2::ggtitle(paste0("Model ",title)) + plots[[i]] <- do.call(BayesTools::JAGS_diagnostics, args) } - } - return(graph) -} -.diagnostics_plot_ac <- function(plot_data, plot_type, title, lags = 30, ...){ - ac_dat <- .diagnostics_ac_data(dat = plot_data$samp, lags = lags) + # return the plots if(plot_type == "base"){ - - # save plotting settings - oldpar <- graphics::par(no.readonly = TRUE) - on.exit(graphics::par(mar = oldpar[["mar"]])) - - # set up margins - if(length(list(...)) == 0){ - graphics::par(mar = c(4,4,3,1)) - }else{ - graphics::par(list(...)) - } - - temp_dat <- as.numeric(by(ac_dat$ac, ac_dat$lag, mean)) - temp_dat[is.nan(temp_dat)] <- 1 - graphics::barplot(temp_dat, names.arg = unique(ac_dat$lag), col = "#B2001D", las = 1) - graphics::mtext("Lag", side = 1, line = 2.5, cex = 1.25) - graphics::mtext("Avg. autocorrelation", side = 2, line = 2.5, cex = 1.25) - if(!is.null(title)){ - graphics::mtext(bquote(paste("Model"," ", .(title),":"," ", .(eval(plot_data$parameter)))), side = 3, line = 1, cex = 1.25) - }else{ - graphics::mtext(plot_data$parameter, side = 3, line = 1, cex = 1.25) - } - - graph <- NULL - + return(invisible(plots)) }else if(plot_type == "ggplot"){ - graph <- ggplot2::ggplot( - data = data.frame( - x = ac_dat$lag, - y = ac_dat$ac), - mapping = ggplot2::aes( - x = .data[["x"]], - y = .data[["y"]])) + - ggplot2::geom_bar(linewidth = .5, color = "black", fill = "#B2001D", position = "dodge", stat = "summary", fun = "mean") + - ggplot2::scale_y_continuous(breaks = seq(0, 1, 0.25)) + - ggplot2::labs(x = "Lag", y = "Avg. autocorrelation") - if(!is.null(title)){ - graph <- graph + ggplot2::ggtitle(bquote(paste("Model"," ", .(title),":"," ", .(eval(plot_data$parameter))))) - }else{ - graph <- graph + ggplot2::ggtitle(plot_data$parameter) + if(length(plots) == 1){ + plots <- plots[[1]] } + return(plots) } - - return(graph) } -.diagnostics_ac_data <- function(dat, lags){ - ch <- dat[, grep("chain", colnames(dat))] - nc <- length(unique(ch)) - ac_list <- tapply(dat$value, INDEX = ch, FUN = function(x)stats::acf(x, lag.max = lags, plot = FALSE)$acf[, , 1L], simplify = FALSE) - nl <- lags + 1 - ch <- factor(rep(1:nc, each = nl), labels = paste0("chain:", 1:nc)) - ll <- rep(seq(0, lags), nc) - - return(data.frame(chains = ch, ac = do.call(c, ac_list), lag = ll)) -} -.diagnostics_color <- function(n){ - return(rep_len(c("#E66101", "#998EC3", "#542788", "#F1A340", "#D8DAEB", "#FEE0B6"), n)) +#' @rdname diagnostics +diagnostics_autocorrelation <- function(fit, parameter = NULL, plot_type = "base", show_models = NULL, + lags = 30, title = is.null(show_models) | length(show_models) > 1, ...){ + diagnostics(fit = fit, parameter = parameter, type = "autocorrelation", plot_type = plot_type, show_models = show_models, lags = lags, title = title, ...) } -.diagnostics_plot_data <- function(fit, model, par, par_transform){ - - if(length(fit$models[[model]]$fit) == 0){ - - return(NULL) - - }else{ - - # do not plot spike priors - if(is.prior.point(fit$models[[model]]$priors[[par]])) - return(NULL) - - samples <- coda::as.array.mcmc.list(fit$models[[model]]$fit$mcmc, drop = FALSE) - if(!any(grepl(par, dimnames(samples)$var))) - return(NULL) - - # create parameter names and get parameter indexes - if(par %in% c("mu", "tau", "PET", "PEESE")){ - ind <- c(1:length(dimnames(samples)$var))[par == dimnames(samples)$var] - par_names <- .plot.RoBMA_par_names(par, fit, fit$add_info$prior_scale) - }else{ - ind <- c(1:length(dimnames(samples)$var))[grepl(par, dimnames(samples)$var)] - ind <- rev(ind) - summary_info <- summary(fit, "individual") - summary_info <- summary_info[["models"]][[model]][["estimates"]] - omega_names <- rownames(summary_info)[grepl(par, rownames(summary_info))] - par_names <- vector("list", length = length(omega_names)) - for(i in 1:length(par_names)){ - par_names[[i]] <- bquote(~omega[~.(substr(omega_names[i],6,nchar(omega_names[i])))]) - } - } +#' @rdname diagnostics +diagnostics_trace <- function(fit, parameter = NULL, plot_type = "base", show_models = NULL, + title = is.null(show_models) | length(show_models) > 1, ...){ + diagnostics(fit = fit, parameter = parameter, type = "trace", plot_type = plot_type, show_models = show_models, title = title, ...) +} - plot_data <- list() - for(i in 1:length(ind)){ - plot_data[[dimnames(samples)$var[ind[i]]]] <- list( - samp = data.frame( - value = as.vector(samples[,ind[i],]), - parameter = dimnames(samples)$var[ind[i]], - chain = as.factor(c(unlist(sapply(1:dim(samples)[3], function(x)rep(x,dim(samples)[1]))))), - iteration = rep(1:dim(samples)[1], dim(samples)[3]) - ), - nchains = dim(samples)[3], - nparams = 1, - warmup = 0, - parameter = par_names[[i]], - lower = if(par == "omega") 0 else fit$models[[model]]$priors[[par]]$truncation[["lower"]], - upper = if(par == "omega") 1 else fit$models[[model]]$priors[[par]]$truncation[["upper"]] - ) - } +#' @rdname diagnostics +diagnostics_density <- function(fit, parameter = NULL, plot_type = "base", show_models = NULL, + title = is.null(show_models) | length(show_models) > 1, ...){ + diagnostics(fit = fit, parameter = parameter, type = "density", plot_type = plot_type, show_models = show_models, title = title, ...) +} - # TODO: implement later - # transform the values if requested - # if(par_transform){ - # if(par %in% c("mu", "theta") & fit$add_info$effect_size %in% c("r", "OR")){ - # plot_data[[1]]$samp$value <- .transform(plot_data[[1]]$samp$value, fit$add_info$effect_size, fit$add_info$transformation) - # } - # } +.set_dots_diagnostics <- function(..., type, chains){ + dots <- list(...) + if(is.null(dots[["col"]])){ + dots[["col"]] <- if(type == "autocorrelation") "black" else rev(scales::viridis_pal()(chains)) } - return(plot_data) + return(dots) } diff --git a/R/fit-and-marglik.R b/R/fit-and-marglik.R index 8067223f..5054f076 100644 --- a/R/fit-and-marglik.R +++ b/R/fit-and-marglik.R @@ -18,27 +18,76 @@ # don't sample the complete null model if(!.is_model_constant(priors)){ + if(attr(model, "multivariate")){ + object[["data"]] <- .order_data.mv(object[["data"]], inherits(model, "RoBMA.reg.model")) + } + + # deal with regression vs basic models + if(inherits(model, "RoBMA.reg.model")){ + data_outcome <- object[["data"]][["outcome"]] + fit_priors <- priors[names(priors) != "terms"] + formula_list <- .generate_model_formula_list(object[["formula"]]) + formula_data_list <- .generate_model_formula_data_list(object[["data"]]) + formula_prior_list <- .generate_model_formula_prior_list(priors) + }else if(inherits(model, "RoBMA.model")){ + data_outcome <- object[["data"]] + fit_priors <- priors + formula_list <- NULL + formula_data_list <- NULL + formula_prior_list <- NULL + } + + # deal with multivariate vs univariate models if(attr(model, "multivariate")){ # generate the model syntax - model_syntax <- .generate_model_syntax.mv(priors, add_info[["effect_direction"]], add_info[["prior_scale"]], add_info[["effect_measure"]], object[["data"]]) + model_syntax <- .generate_model_syntax.mv( + priors = fit_priors, + effect_direction = add_info[["effect_direction"]], + priors_scale = add_info[["prior_scale"]], + effect_measure = add_info[["effect_measure"]], + data = data_outcome, + regression = inherits(model, "RoBMA.reg.model") + ) # remove unnecessary objects from data to mitigate warnings - fit_data <- .fit_data.mv(object[["data"]], priors, add_info[["effect_direction"]], add_info[["prior_scale"]]) + fit_data <- .fit_data.mv( + data = data_outcome, + priors = fit_priors, + effect_direction = add_info[["effect_direction"]], + prior_scale = add_info[["prior_scale"]] + ) + }else{ # generate the model syntax - model_syntax <- .generate_model_syntax(priors, add_info[["effect_direction"]], add_info[["prior_scale"]], add_info[["effect_measure"]], attr(model, "weighted")) + model_syntax <- .generate_model_syntax( + priors = fit_priors, + effect_direction = add_info[["effect_direction"]], + priors_scale = add_info[["prior_scale"]], + effect_measure = add_info[["effect_measure"]], + weighted = attr(model, "weighted"), + regression = inherits(model, "RoBMA.reg.model") + ) # remove unnecessary objects from data to mitigate warnings - fit_data <- .fit_data(object[["data"]], priors, add_info[["effect_direction"]], add_info[["prior_scale"]], attr(model, "weighted")) + fit_data <- .fit_data( + data = data_outcome, + priors = fit_priors, + effect_direction = add_info[["effect_direction"]], + prior_scale = add_info[["prior_scale"]], + weighted = attr(model, "weighted"), + weighted_type = attr(model, "weighted_type") + ) } - # fit the model fit <- BayesTools::JAGS_fit( model_syntax = model_syntax, data = fit_data, - prior_list = priors, + prior_list = fit_priors, + formula_list = formula_list, + formula_data_list = formula_data_list, + formula_prior_list = formula_prior_list, chains = fit_control[["chains"]], adapt = fit_control[["adapt"]], burnin = fit_control[["burnin"]], @@ -73,7 +122,7 @@ has_posterior <- TRUE check_fit <- BayesTools::JAGS_check_convergence( fit = fit, - prior_list = priors, + prior_list = attr(fit, "prior_list"), max_Rhat = convergence_checks[["max_Rhat"]], min_ESS = convergence_checks[["min_ESS"]], max_error = convergence_checks[["max_error"]], @@ -92,16 +141,19 @@ if(length(fit) != 0){ marglik <- BayesTools::JAGS_bridgesampling( - fit = fit, - data = fit_data, - prior_list = priors, - log_posterior = if(attr(model, "multivariate")) .marglik_function.mv else .marglik_function, - maxiter = 50000, - silent = fit_control[["silent"]], - priors = priors, - effect_direction = add_info[["effect_direction"]], - prior_scale = add_info[["prior_scale"]], - effect_measure = add_info[["effect_measure"]] + fit = fit, + data = fit_data, + prior_list = fit_priors, + formula_list = formula_list, + formula_data_list = formula_data_list, + formula_prior_list = formula_prior_list, + log_posterior = if(attr(model, "multivariate")) .marglik_function.mv else .marglik_function, + maxiter = 50000, + silent = fit_control[["silent"]], + priors = priors, + effect_direction = add_info[["effect_direction"]], + prior_scale = add_info[["prior_scale"]], + effect_measure = add_info[["effect_measure"]] ) # deal with failed marginal likelihoods @@ -124,29 +176,58 @@ }else{ - fit_data <- .fit_data(object[["data"]], priors, add_info[["effect_direction"]], add_info[["prior_scale"]], attr(model, "weighted")) + # deal with regression vs basic models + if(inherits(model, "RoBMA.reg.model")){ + + # check that all terms but intercept are spikes at zero + if(any(sapply(priors[["terms"]][names(priors[["terms"]]) != "intercept"], function(prior) prior$parameters[["location"]] != 0))) + stop("All constant model can include only non-zero intercept parameter.") + + data_outcome <- object[["data"]][["outcome"]] + const_location <- priors[["terms"]][["intercept"]]$parameters[["location"]] + fit_priors <- c(priors[names(priors) != "terms"], priors[["terms"]]) + names(fit_priors)[names(fit_priors) %in% names(priors[["terms"]])] <- paste0("mu_", names(fit_priors)[names(fit_priors) %in% names(priors[["terms"]])]) + fit_priors <- .add_priors_levels(fit_priors, object[["data"]][["predictors"]]) + }else if(inherits(model, "RoBMA.model")){ + data_outcome <- object[["data"]] + fit_priors <- priors + const_location <- priors$mu$parameters[["location"]] + } + + if(fit_priors[["tau"]]$parameters[["location"]] != 0) + stop("All constant model cannot include non zero heterogeneity parameter.") + + fit_data <- .fit_data( + data = data_outcome, + priors = priors, + effect_direction = add_info[["effect_direction"]], + prior_scale = add_info[["prior_scale"]], + weighted = attr(model, "weighted"), + weighted_type = attr(model, "weighted_type") + ) converged <- TRUE has_posterior <- FALSE fit <- list() - attr(fit, "prior_list") <- priors + attr(fit, "prior_list") <- fit_priors class(fit) <- "null_model" marglik <- list() + # weighted vs unweighted models if(attr(model, "weighted")){ - marglik$logml <- sum(stats::dnorm(fit_data[["y"]], priors$mu$parameters[["location"]], fit_data[["se"]], log = TRUE) * fit_data[["weight"]]) + marglik$logml <- sum(stats::dnorm(fit_data[["y"]], const_location, fit_data[["se"]], log = TRUE) * fit_data[["weight"]]) }else{ - marglik$logml <- sum(stats::dnorm(fit_data[["y"]], priors$mu$parameters[["location"]], fit_data[["se"]], log = TRUE)) + marglik$logml <- sum(stats::dnorm(fit_data[["y"]], const_location, fit_data[["se"]], log = TRUE)) } - class(marglik) <- "bridge" + class(marglik) <- "bridge" } # add model summaries if(has_posterior){ - fit_summary <- BayesTools::runjags_estimates_table(fit = fit, warnings = warnings) + fit_summary <- suppressMessages(BayesTools::runjags_estimates_table(fit = fit, warnings = warnings, transform_factors = TRUE, formula_prefix = FALSE)) if(add_info[["prior_scale"]] != "y"){ - fit_summaries <- .runjags_summary_list(fit, priors, add_info[["prior_scale"]], warnings) + fit_summaries <- .runjags_summary_list(fit, attr(fit, "prior_list"), add_info[["prior_scale"]], warnings) }else{ fit_summaries <- NULL } @@ -182,7 +263,7 @@ } # tools -.fit_data <- function(data, priors, effect_direction, prior_scale, weighted){ +.fit_data <- function(data, priors, effect_direction, prior_scale, weighted, weighted_type){ # unlist the data.frame original_measure <- attr(data, "original_measure") @@ -205,7 +286,7 @@ # add weights proportional to the number of estimates from a study if(weighted){ - fit_data$weight <- .get_id_weights(data) + fit_data$weight <- .get_id_weights(data, weighted_type) } return(fit_data) @@ -237,33 +318,66 @@ } + ### add the multivariate part if(effect_direction == "negative"){ fit_data$y_v <- - data[!is.na(data[,"study_ids"]),"y"] }else{ fit_data$y_v <- data[!is.na(data[,"study_ids"]),"y"] } - fit_data$se_v <- data[!is.na(data[,"study_ids"]),"se"] fit_data$se2_v <- data[!is.na(data[,"study_ids"]),"se"]^2 - fit_data$K_v <- length(fit_data[["y_v"]]) + fit_data$K_v <- length(fit_data[["y_v"]]) # add critical y-values if(!is.null(priors[["omega"]])){ - fit_data$crit_y_v <- t(.get_cutoffs(fit_data[["y_v"]], fit_data[["se_v"]], priors[["omega"]], original_measure[!is.na(data[,"study_ids"])], effect_measure)) + fit_data$crit_y_v <- t(.get_cutoffs(fit_data[["y_v"]], data[!is.na(data[,"study_ids"]),"se"], priors[["omega"]], original_measure[!is.na(data[,"study_ids"])], effect_measure)) + }else if(!is.null(priors[["PET"]])){ + fit_data$se_v <- data[!is.na(data[,"study_ids"]),"se"] } fit_data$indx_v <- c((1:fit_data[["K_v"]])[!duplicated(data[!is.na(data[,"study_ids"]),"study_ids"])][-1] - 1, fit_data[["K_v"]]) - ### add the multivariate part return(fit_data) } -.generate_model_syntax <- function(priors, effect_direction, priors_scale, effect_measure, weighted){ +.order_data.mv <- function(data, regression){ + # prepares data in a better order for the subsequent vectorization of multivariate distributions + + if(regression){ + ids <- data[["outcome"]]$study_ids + }else{ + ids <- data$study_ids + } + + # first independent and then dependent estimates + ordering <- order(ifelse(is.na(ids), -1, ids)) + + # re-order the data set and predictors + if(regression){ + data[["outcome"]] <- data[["outcome"]][ordering,] + for(i in seq_along(data[["predictors"]])){ + data[["predictors"]][[i]] <- data[["predictors"]][[i]][ordering] + } + }else{ + data <- data[ordering,] + } + + return(data) +} +.generate_model_syntax <- function(priors, effect_direction, priors_scale, effect_measure, weighted, regression){ model_syntax <- "model{\n" ### prior transformations # the precise transformation for heterogeneity is not used due the inability to re-scale large variances # instead, approximate linear scaling is employed in the same way as in metaBMA package - model_syntax <- paste0(model_syntax, .JAGS_scale(priors_scale, effect_measure, "mu", "mu_transformed")) + # deal with mu as a vector or scalar based on whether it is regression or not + if(regression){ + model_syntax <- paste0(model_syntax, "for(i in 1:K){\n") + model_syntax <- paste0(model_syntax, .JAGS_scale(priors_scale, effect_measure, "mu[i]", "mu_transformed[i]")) + model_syntax <- paste0(model_syntax, "}\n") + }else{ + model_syntax <- paste0(model_syntax, .JAGS_scale(priors_scale, effect_measure, "mu", "mu_transformed")) + } + model_syntax <- paste0(model_syntax, .JAGS_scale(priors_scale, effect_measure, "tau", "tau_transformed")) if(!is.null(priors[["PET"]])){ model_syntax <- paste0(model_syntax, paste0("PET_transformed = PET\n")) @@ -277,10 +391,15 @@ model_syntax <- paste0(model_syntax, "for(i in 1:K){\n") # marginalized random effects and the effect size - prec <- "1 / ( pow(se[i],2) + pow(tau_transformed,2) )" - reg_std <- "pow( pow(se[i],2) + pow(tau_transformed,2), 1/2)" + prec <- "1 / ( pow(se[i],2) + pow(tau_transformed,2) )" + + # deal with mu as a vector or scalar based on whether it is regression or not + if(regression){ + eff <- ifelse(effect_direction == "negative", "-1 * mu_transformed[i]", "mu_transformed[i]") + }else{ + eff <- ifelse(effect_direction == "negative", "-1 * mu_transformed", "mu_transformed") + } - eff <- ifelse(effect_direction == "negative", "-1 * mu_transformed", "mu_transformed") # add PET/PEESE if(!is.null(priors[["PET"]])){ eff <- paste0("(", eff, " + PET_transformed * se[i])") @@ -312,14 +431,21 @@ return(model_syntax) } -.generate_model_syntax.mv <- function(priors, effect_direction, priors_scale, effect_measure, data){ +.generate_model_syntax.mv <- function(priors, effect_direction, priors_scale, effect_measure, data, regression){ model_syntax <- "model{\n" ### prior transformations # the precise transformation for heterogeneity is not used due the inability to re-scale large variances # instead, approximate linear scaling is employed in the same way as in metaBMA package - model_syntax <- paste0(model_syntax, .JAGS_scale(priors_scale, effect_measure, "mu", "mu_transformed")) + # deal with mu as a vector or scalar based on whether it is regression or not + if(regression){ + model_syntax <- paste0(model_syntax, "for(i in 1:(K+K_v)){\n") + model_syntax <- paste0(model_syntax, .JAGS_scale(priors_scale, effect_measure, "mu[i]", "mu_transformed[i]")) + model_syntax <- paste0(model_syntax, "}\n") + }else{ + model_syntax <- paste0(model_syntax, .JAGS_scale(priors_scale, effect_measure, "mu", "mu_transformed")) + } model_syntax <- paste0(model_syntax, .JAGS_scale(priors_scale, effect_measure, "tau", "tau_transformed")) if(!is.null(priors[["PET"]])){ @@ -336,7 +462,13 @@ # marginalized random effects and the effect size prec <- "1 / ( pow(se[i],2) + pow(tau_transformed,2) )" - eff <- ifelse(effect_direction == "negative", "-1 * mu_transformed", "mu_transformed") + # deal with mu as a vector or scalar based on whether it is regression or not + if(regression){ + eff <- ifelse(effect_direction == "negative", "-1 * mu_transformed[i]", "mu_transformed[i]") + }else{ + eff <- ifelse(effect_direction == "negative", "-1 * mu_transformed", "mu_transformed") + } + # add PET/PEESE if(!is.null(priors[["PET"]])){ eff <- paste0("(", eff, " + PET_transformed * se[i])") @@ -362,7 +494,12 @@ model_syntax <- paste0(model_syntax, paste0("tau_transformed2 = pow(tau_transformed, 2)\n")) # create the mean vector - eff_v <- ifelse(effect_direction == "negative", "-1 * mu_transformed", "mu_transformed") + # deal with mu as a vector or scalar based on whether it is regression or not + if(regression){ + eff_v <- ifelse(effect_direction == "negative", "-1 * mu_transformed[K+i]", "mu_transformed[K+i]") + }else{ + eff_v <- ifelse(effect_direction == "negative", "-1 * mu_transformed", "mu_transformed") + } model_syntax <- paste0(model_syntax, "for(i in 1:K_v){\n") if(!is.null(priors[["PET"]])){ eff_v <- paste0(eff_v, " + PET_transformed * se_v[i]") @@ -569,20 +706,32 @@ # selection models > random effects | PET/PEESE > non-null models fitting_difficulty <- sapply(models, function(model){ - diffuculty <- 0 + difficulty <- 0 - if(is.prior.simple(model$priors[["mu"]])){ - diffuculty <- diffuculty + 1 + if(inherits(model, "RoBMA.model")){ + if(is.prior.simple(model$priors[["mu"]])){ + difficulty <- difficulty + 1 + } + }else if(inherits(model, "RoBMA.reg.model")){ + difficulty <- difficulty + sum(sapply(model$priors[["terms"]], function(prior){ + if(is.prior.point(prior)){ + return(0) + }else if(is.prior.factor(prior)){ + return(1.5) + }else if(is.prior.simple(prior)){ + return(1) + } + })) } if(is.prior.simple(model$priors[["tau"]])){ - diffuculty <- diffuculty + 3 + difficulty <- difficulty + 3 } if(is.null(model$priors[["PET"]])){ - diffuculty <- diffuculty + 1 + difficulty <- difficulty + 1 }else if(is.null(model$priors[["PEESE"]])){ - diffuculty <- diffuculty + 1 + difficulty <- difficulty + 1 }else if(is.null(model$priors[["omega"]])){ - diffuculty <- diffuculty + 5 + difficulty <- difficulty + 5 } }) @@ -606,6 +755,15 @@ ) ) } + for(i in seq_along(priors[!names(priors) %in% c("mu", "tau", "omega", "PET", "PEESE")])){ + transformations[[names(priors[!names(priors) %in% c("mu", "tau", "omega", "PET", "PEESE")])[i]]] <- list( + "fun" = .transform_mu, + "arg" = list( + "from" = priors_scale, + "to" = measure + ) + ) + } if("tau" %in% names(priors) && ((is.prior.point(priors[["mu"]]) && priors[["mu"]][["parameters"]][["location"]] != 0) || !is.prior.point(priors[["tau"]]))){ transformations[["tau"]] <- list( "fun" = .scale, @@ -632,36 +790,85 @@ transformations <- NULL } - summary_list[[measure]] <- BayesTools::runjags_estimates_table( - fit = fit, - transformations = transformations, - warnings = warnings, - footnotes = .scale_note(priors_scale, measure), - ) + summary_list[[measure]] <- suppressMessages(BayesTools::runjags_estimates_table( + fit = fit, + transformations = transformations, + transform_factors = TRUE, + formula_prefix = FALSE, + warnings = warnings, + footnotes = .scale_note(priors_scale, measure) + )) } return(summary_list) } -.get_id_weights <- function(data){ +.get_id_weights <- function(data, type){ - weights <- rep(NA, nrow(data)) + weight <- rep(NA, nrow(data)) # create table of number of estimates per study - ids_weights <- data.frame( - id = names(table(data[,"study_ids"])), - weight = 1/as.vector(table(data[,"study_ids"])) - ) + if(!is.null(data[,"weight"]) && !anyNA(data[,"weight"])){ + weight <- data[,"weight"] + }else{ + ids_weight <- data.frame( + id = names(table(data[,"study_ids"])), + weight = switch( + type, + "inverse" = 1/as.vector(table(data[,"study_ids"])), + "inverse_sqrt" = 1/sqrt(as.vector(table(data[,"study_ids"]))) + ) + ) + + # fill their weights + for(i in seq_along(ids_weight$id)){ + weight[!is.na(data[,"study_ids"]) & data[,"study_ids"] == ids_weight$id[i]] <- ids_weight$weight[ids_weight$id == ids_weight$id[i]] + } - # fill their weights - for(i in seq_along(ids_weights$id)){ - weights[!is.na(data[,"study_ids"]) & data[,"study_ids"] == ids_weights$id[i]] <- ids_weights$weight[ids_weights$id == ids_weights$id[i]] + # assign all remaining studies weight 1 + weight[is.na(weight)] <- 1 } - # assign all remaining studies weight 1 - weights[is.na(weights)] <- 1 - return(weights) + return(weight) } +.add_priors_levels <- function(priors, data){ + + for(v in names(data)[sapply(data, function(d) is.factor(d))]){ + attr(priors[[.BayesTools_parameter_name(v)]], "levels") <- length(levels(data[[v]])) + attr(priors[[.BayesTools_parameter_name(v)]], "level_names") <- levels(data[[v]]) + attr(priors[[.BayesTools_parameter_name(v)]], "parameter") <- "mu" + } + + return(priors) +} + +.generate_model_formula_list <- function(formula){ + + # remove the left hand side + if(attr(stats::terms(formula), "response") == 1){ + formula[2] <- NULL + } + formula <- list("mu" = formula) + + return(formula) +} +.generate_model_formula_data_list <- function(data){ + + if(length(data[["predictors"]]) == 0){ + data <- list("mu" = data.frame(matrix(ncol = 0, nrow = nrow(data[["outcome"]])))) + }else{ + data <- list("mu" = data.frame(data[["predictors"]])) + } + + return(data) +} +.generate_model_formula_prior_list <- function(priors){ + + priors <- list("mu" = priors[["terms"]]) + + return(priors) +} + # JAGS tools for model building and marginal likelihood .JAGS_transformation <- function(from, to, from_par, to_par_name){ diff --git a/R/inference-and-model-averaging.R b/R/inference-and-model-averaging.R index c37a3b3a..84144ef7 100644 --- a/R/inference-and-model-averaging.R +++ b/R/inference-and-model-averaging.R @@ -46,7 +46,7 @@ effect <- sapply(models, function(model)!.is_component_null(model[["priors"]], "effect")) heterogeneity <- sapply(models, function(model)!.is_component_null(model[["priors"]], "heterogeneity")) bias <- sapply(models, function(model)!.is_component_null(model[["priors"]], "bias")) - multivariate <- sapply(models, function(model)!.is_component_null(model[["priors"]], "multivariate")) + hierarchical <- sapply(models, function(model)!.is_component_null(model[["priors"]], "hierarchical")) # obtain the parameter types weightfunctions <- sapply(models, function(model)any(sapply(model[["priors"]], is.prior.weightfunction))) @@ -59,9 +59,11 @@ components_null <- list("Effect" = !effect, "Heterogeneity" = !heterogeneity, "Bias" = !bias) parameters_null <- list("mu" = !effect, "tau" = !heterogeneity) - if(any(multivariate)){ + if(any(hierarchical)){ + components <- c(components, "Hierarchical") parameters <- c(parameters, "rho") - parameters_null <- c(parameters_null, "rho" = list(!multivariate)) + components_null <- c(components_null, "Hierarchical" = list(!hierarchical)) + parameters_null <- c(parameters_null, "rho" = list(!hierarchical)) } if(any(weightfunctions)){ components <- c(components, "bias.selection-models") @@ -86,6 +88,117 @@ parameters_null <- c(parameters_null, "PEESE" = list(!PEESE)) } + # deal with meta-regression + if(!is.null(object[["formula"]])){ + # use the intercept for the effect + parameters[parameters == "mu"] <- "mu_intercept" + names(parameters_null)[names(parameters_null) == "mu"] <- "mu_intercept" + + # add the terms + model_predictors <- lapply(models, function(model) model[["terms"]]) + model_predictors_test <- lapply(models, function(model) model[["terms_test"]]) + + predictors <- object$add_info[["predictors"]] + predictors_test <- object$add_info[["predictors_test"]] + + # define inference options + components_predictors <- NULL + parameters_predictors <- "mu_intercept" + components_predictors_null <- list() + parameters_predictors_null <- list("mu_intercept" = !effect) + + components_predictors_distributions <- NULL + components_predictors_distributions_null <- list() + + + # predictors + for(i in seq_along(predictors_test)){ + components_predictors <- c(components_predictors, .BayesTools_parameter_name(predictors_test[i])) + components_predictors_null[[.BayesTools_parameter_name(predictors_test[i])]] <- + sapply(model_predictors_test, function(x) if(length(x) == 0) TRUE else !(predictors_test[i] %in% x)) + } + + for(i in seq_along(predictors)){ + parameters_predictors <- c(parameters_predictors, .BayesTools_parameter_name(predictors[i])) + parameters_predictors_null[[.BayesTools_parameter_name(predictors[i])]] <- + sapply(model_predictors_test, function(x) if(length(x) == 0) TRUE else !(predictors[i] %in% x)) + } + + + # get models inference + if(is.null(components_predictors)){ + inference_predictors <- NULL + }else{ + inference_predictors <- BayesTools::ensemble_inference( + model_list = models, + parameters = components_predictors, + is_null_list = components_predictors_null, + conditional = FALSE + ) + } + # deal with the possibility of only null models models + if(all(sapply(parameters_predictors_null, all))){ + inference_predictors_conditional <- NULL + }else{ + inference_predictors_conditional <- BayesTools::ensemble_inference( + model_list = models, + parameters = parameters_predictors[!sapply(parameters_predictors_null, all)], + is_null_list = parameters_predictors_null[!sapply(parameters_predictors_null, all)], + conditional = TRUE + ) + } + + # get model-averaged posteriors + if(is.null(parameters_predictors)){ + posteriors_predictors <- NULL + }else{ + posteriors_predictors <- BayesTools::mix_posteriors( + model_list = models, + parameters = parameters_predictors, + is_null_list = parameters_predictors_null, + seed = object$add_info[["seed"]], + conditional = FALSE + ) + posteriors_predictors <- BayesTools::transform_factor_samples(posteriors_predictors) + } + + # deal with the possibility of only null models models + if(all(sapply(parameters_predictors_null, all))){ + posteriors_predictors_conditional <- NULL + }else{ + posteriors_predictors_conditional <- BayesTools::mix_posteriors( + model_list = models, + parameters = parameters_predictors[!sapply(parameters_predictors_null, all)], + is_null_list = parameters_predictors_null[!sapply(parameters_predictors_null, all)], + seed = object$add_info[["seed"]], + conditional = TRUE + ) + posteriors_predictors_conditional <- BayesTools::transform_factor_samples(posteriors_predictors_conditional) + } + + # create marginal estimates and summary + if(all(sapply(parameters_predictors_null, all))){ + inference_marginal <- NULL + }else{ + inference_marginal <- BayesTools::marginal_inference( + model_list = models, + marginal_parameters = parameters_predictors[!sapply(parameters_predictors_null, all)], + parameters = parameters_predictors, + is_null_list = parameters_predictors_null, + formula = object[["formula"]], + seed = object$add_info[["seed"]], + silent = TRUE + ) + } + }else{ + # create empty objects in case of no predictors + inference_predictors <- NULL + inference_predictors_conditional <- NULL + posteriors_predictors <- NULL + posteriors_predictors_conditional <- NULL + inference_marginal <- NULL + } + ### get models inference inference <- BayesTools::ensemble_inference( model_list = models, @@ -127,20 +240,48 @@ ) } + # rename mu_intercept back to mu + if(any(names(posteriors) == "mu_intercept")){ + attr(posteriors[["mu_intercept"]], "parameter") <- "mu" + names(posteriors)[names(posteriors) == "mu_intercept"] <- "mu" + } + if(any(names(posteriors_conditional) == "mu_intercept")){ + attr(posteriors_conditional[["mu_intercept"]], "parameter") <- "mu" + names(posteriors_conditional)[names(posteriors_conditional) == "mu_intercept"] <- "mu" + } # return the results output <- list( - inference = inference, - inference_conditional = inference_conditional, - posteriors = posteriors, - posteriors_conditional = posteriors_conditional + inference = inference, + inference_conditional = inference_conditional, + inference_predictors = inference_predictors, + inference_predictors_conditional = inference_predictors_conditional, + inference_marginal = inference_marginal, + posteriors = posteriors, + posteriors_conditional = posteriors_conditional, + posteriors_predictors = posteriors_predictors, + posteriors_predictors_conditional = posteriors_predictors_conditional ) return(output) } .compute_coeficients <- function(RoBMA){ + if(!is.null(RoBMA[["posteriors_predictors"]])){ + coefs <- do.call(c, unname(lapply(RoBMA[["posteriors_predictors"]], function(posterior){ + if(inherits(posterior, "mixed_posteriors.factor")){ + out <- apply(posterior, 2, mean) + names(out) <- .output_parameter_names(names(out)) + }else{ + out <- mean(posterior) + names(out) <- .output_parameter_names(attr(posterior,"parameter")) + } + return(out) + }))) + }else{ + coefs <- c("mu" = mean(RoBMA$posteriors[["mu"]])) + } return(c( - "mu" = mean(RoBMA$posteriors[["mu"]]), + coefs, "tau" = mean(RoBMA$posteriors[["tau"]]), "rho" = if(!is.null(RoBMA$posteriors[["rho"]])) mean(RoBMA$posteriors[["rho"]]), if(!is.null(RoBMA$posteriors[["omega"]])) apply(RoBMA$posteriors[["omega"]], 2, mean), diff --git a/R/marginal.R b/R/marginal.R new file mode 100644 index 00000000..64339321 --- /dev/null +++ b/R/marginal.R @@ -0,0 +1,232 @@ +#' @title Summarize marginal estimates of a fitted RoBMA regression object +#' +#' @description \code{marginal_summary} creates summary tables for +#' marginal estimates of a RoBMA regression model. +#' +#' @param object a fitted RoBMA regression object +#' @param conditional show the conditional estimates (assuming that the +#' alternative is true). +#' @inheritParams summary.RoBMA +#' +#' +#' @return \code{marginal_summary} returns a list of tables of class 'BayesTools_table'. +#' +#' @seealso [RoBMA()], [summary.RoBMA()], [diagnostics()], [check_RoBMA()] +#' @export +marginal_summary <- function(object, conditional = FALSE, + output_scale = NULL, probs = c(.025, .975), logBF = FALSE, BF01 = FALSE){ + + if(sum(.get_model_convergence(object)) == 0) + stop("There is no converged model in the ensemble.") + if(!is.RoBMA.reg(object)) + stop("'marginal_summary' function is available only for RoBMA regression models") + if(!is.null(.check_predictors_scaling(object))) + stop("'marginal_summary' function requires standardized predictors") + + BayesTools::check_bool(conditional, "conditional") + BayesTools::check_char(output_scale, "output_scale", allow_NULL = TRUE) + BayesTools::check_real(probs, "probs", allow_NULL = TRUE, check_length = 0) + BayesTools::check_bool(BF01, "BF01") + BayesTools::check_bool(logBF, "logBF") + + # apply version changes to RoBMA object + object <- .update_object(object) + + # check the scales + if(is.null(output_scale)){ + output_scale <- object$add_info[["output_scale"]] + }else if(object$add_info[["output_scale"]] == "y" & .transformation_var(output_scale) != "y"){ + stop("Models estimated using the generall effect size scale 'y' / 'none' cannot be transformed to a different effect size scale.") + }else{ + output_scale <- .transformation_var(output_scale) + } + + + # transform the estimates if needed + if(object$add_info[["output_scale"]] != output_scale){ + object <- .transform_posterior(object, object$add_info[["output_scale"]], output_scale) + } + + # obtain table + estimates <- BayesTools::marginal_estimates_table( + samples = object$RoBMA[["inference_marginal"]][["averaged"]], + inference = object$RoBMA[["inference_marginal"]][["inference"]], + parameters = names(object$RoBMA[["inference_marginal"]][["inference"]]), + logBF = logBF, + BF01 = BF01, + formula_prefix = FALSE, + probs = probs, + title = "Model-averaged marginal estimates:", + footnotes = .scale_note(object$add_info[["prior_scale"]], output_scale), + warnings = .collect_errors_and_warnings(object) + ) + + + # create the output object + output <- list( + call = object[["call"]], + title = "Robust Bayesian meta-analysis", + estimates = estimates + ) + + if(conditional){ + + estimates_conditional <- BayesTools::marginal_estimates_table( + samples = object$RoBMA[["inference_marginal"]][["conditional"]], + inference = object$RoBMA[["inference_marginal"]][["inference"]], + parameters = names(object$RoBMA[["inference_marginal"]][["inference"]]), + logBF = logBF, + BF01 = BF01, + formula_prefix = FALSE, + probs = probs, + title = "Conditional marginal estimates:", + footnotes = .scale_note(object$add_info[["prior_scale"]], output_scale), + warnings = .collect_errors_and_warnings(object) + ) + + output$estimates_conditional <- estimates_conditional + } + + + class(output) <- "marginal_summary.RoBMA" + + return(output) +} + + +#' @title Prints marginal_summary object for RoBMA method +#' +#' @param x a summary of a RoBMA object +#' @param ... additional arguments +#' +#' +#' @return \code{print.marginal_summary.RoBMA} invisibly returns the print statement. +#' +#' @seealso [RoBMA()] +#' @export +print.marginal_summary.RoBMA <- function(x, ...){ + + cat("Call:\n") + print(x[["call"]]) + + cat("\n") + cat(x[["title"]]) + + cat("\n") + print(x[["estimates"]]) + + if(!is.null(x[["estimates_conditional"]])){ + cat("\n") + print(x[["estimates_conditional"]]) + } + + + return(invisible()) +} + + +#' @title Plots marginal estimates of a fitted RoBMA regression object +#' +#' @description \code{marginal_plot} allows to visualize prior and +#' posterior distributions of marginal estimates of a RoBMA regression model. +#' +#' @param x a fitted RoBMA regression object +#' @param parameter regression parameter to be plotted +#' @param conditional whether conditional marginal estimates should be +#' plotted. Defaults to \code{FALSE} which plots the model-averaged +#' estimates. +#' @inheritParams plot.RoBMA +#' +#' +#' @return \code{plot.RoBMA} returns either \code{NULL} if \code{plot_type = "base"} +#' or an object object of class 'ggplot2' if \code{plot_type = "ggplot2"}. +#' +#' @seealso [RoBMA()] +#' @export +marginal_plot <- function(x, parameter, conditional = FALSE, plot_type = "base", prior = FALSE, output_scale = NULL, dots_prior = NULL, ...){ + + # check whether plotting is possible + if(sum(.get_model_convergence(x)) == 0) + stop("There is no converged model in the ensemble.") + if(!is.RoBMA.reg(x)) + stop("'marginal_plot' function is available only for RoBMA regression models") + if(!is.null(.check_predictors_scaling(x))) + stop("'marginal_plot' function requires standardized predictors") + + # check settings + BayesTools::check_char(parameter, "parameter", allow_values = x$add_info[["predictors"]]) + BayesTools::check_bool(conditional, "conditional") + BayesTools::check_char(plot_type, "plot_type", allow_values = c("base", "ggplot")) + BayesTools::check_char(output_scale, "output_scale", allow_NULL = TRUE) + BayesTools::check_bool(prior, "prior") + + # apply version changes to RoBMA object + x <- .update_object(x) + + + ### manage transformations + # get the settings + results_scale <- x$add_info[["output_scale"]] + if(is.null(output_scale)){ + output_scale <- x$add_info[["output_scale"]] + }else{ + output_scale <- .transformation_var(output_scale) + } + # transform the estimates if needed + if(x$add_info[["output_scale"]] != output_scale){ + x <- .transform_posterior(x, x$add_info[["output_scale"]], output_scale) + } + + + # choose the samples + if(conditional){ + samples <- x$RoBMA[["inference_marginal"]][["conditional"]] + }else{ + samples <- x$RoBMA[["inference_marginal"]][["averaged"]] + } + + dots <- .set_dots_plot(..., n_levels = length(samples[[.BayesTools_parameter_name(parameter)]])) + dots_prior <- .set_dots_prior_marginal(dots_prior, n_levels = length(samples[[.BayesTools_parameter_name(parameter)]])) + + # prepare the argument call + args <- dots + args$samples <- samples + args$parameter <- .BayesTools_parameter_name(parameter) + args$plot_type <- plot_type + args$prior <- prior + args$n_points <- 1000 + args$transformation <- NULL + args$transformation_arguments <- NULL + args$transformation_settings <- FALSE + args$par_name <- .plot.RoBMA_par_names(parameter, x, output_scale)[[1]] + args$dots_prior <- dots_prior + + plot <- suppressMessages(do.call(BayesTools::plot_marginal, args)) + + + # return the plots + if(plot_type == "base"){ + return(invisible(plot)) + }else if(plot_type == "ggplot"){ + return(plot) + } +} + + +.set_dots_prior_marginal <- function(dots_prior, n_levels){ + + if(is.null(dots_prior)){ + dots_prior <- list() + } + + if(is.null(dots_prior[["col"]]) & n_levels == 1){ + dots_prior[["col"]] <- "black" + }else if(is.null(dots_prior[["col"]]) & n_levels > 1){ + dots_prior[["col"]] <- grDevices::palette.colors(n = n_levels + 1, palette = "Okabe-Ito")[-1] + } + if(is.null(dots_prior[["lty"]])){ + dots_prior[["lty"]] <- 2 + } + + return(dots_prior) +} diff --git a/R/plots.R b/R/plots.R index 3a251a36..0b82e07e 100644 --- a/R/plots.R +++ b/R/plots.R @@ -97,11 +97,23 @@ plot.RoBMA <- function(x, parameter = "mu", # deal with bad parameter names for PET-PEESE, weightfunction if(tolower(gsub("-", "", gsub("_", "", gsub(".", "", parameter, fixed = TRUE),fixed = TRUE), fixed = TRUE)) %in% c("weightfunction", "weigthfunction", "omega")){ - parameter <- "omega" + parameter <- "omega" + parameter_samples <- "omega" }else if(tolower(gsub("-", "", gsub("_", "", gsub(".", "", parameter, fixed = TRUE),fixed = TRUE), fixed = TRUE)) == "petpeese"){ - parameter <- "PETPEESE" - }else if(!parameter %in% c("mu", "tau", "rho")){ - stop("The passed parameter is not supported for plotting. See '?plot.RoBMA' for more details.") + parameter <- "PETPEESE" + parameter_samples <- "PETPEESE" + }else if(parameter %in% c("mu", "tau", "rho")){ + parameter <- parameter + parameter_samples <- parameter + }else if(is.RoBMA.reg(x) && parameter %in% x$add_info[["predictors"]]){ + parameter <- parameter + parameter_samples <- .BayesTools_parameter_name(parameter) + }else{ + if(is.RoBMA.reg(x)){ + stop(paste0("The passed parameter does not correspond to any of main model parameter ('mu', 'tau', 'omega', 'PET', 'PEESE') or any of the specified predictors: ", paste0("'", x$add_info[["predictors"]], "'", collapse = ", "), ". See '?plot.RoBMA' for more details.")) + }else{ + stop(paste0("The passed parameter does not correspond to any of main model parameter ('mu', 'tau', 'omega', 'PET', 'PEESE'). See '?plot.RoBMA' for more details.")) + } } @@ -124,7 +136,7 @@ plot.RoBMA <- function(x, parameter = "mu", }else if(parameter == "rho"){ # rho is scale invariant transformation <- NULL - }else if(parameter == "mu"){ + }else if(parameter == "mu" | parameter %in% x$add_info[["predictors"]]){ transformation <- eval(parse(text = paste0(".", results_scale, "2", output_scale))) }else if(parameter == "tau"){ transformation <- eval(parse(text = paste0(".scale_", results_scale, "2", output_scale))) @@ -174,10 +186,14 @@ plot.RoBMA <- function(x, parameter = "mu", ) }else{ - if(conditional){ + if(conditional & parameter %in% c("mu", "tau", "rho", "PET", "PEESE", "PETPEESE", "omega")){ samples <- x[["RoBMA"]][["posteriors_conditional"]] - }else{ + }else if(conditional & parameter %in% x$add_info[["predictors"]]){ + samples <- x[["RoBMA"]][["posteriors_predictors_conditional"]] + }else if(!conditional & parameter %in% c("mu", "tau", "rho", "PET", "PEESE", "PETPEESE", "omega")){ samples <- x[["RoBMA"]][["posteriors"]] + }else if(!conditional & parameter %in% x$add_info[["predictors"]]){ + samples <- x[["RoBMA"]][["posteriors_predictors"]] } } @@ -206,10 +222,31 @@ plot.RoBMA <- function(x, parameter = "mu", }else if(is.null(samples[["mu"]]) || is.null(samples[["PET"]]) && is.null(samples[["PEESE"]])){ stop("The ensemble does not contain any posterior samples model-averaged across the PET-PEESE publication bias adjustment. Please, verify that you specified at least one PET-PEESE publication bias adjustment.") } + }else if(parameter %in% x$add_info[["predictors"]]){ + if(conditional && is.null(samples[[parameter_samples]])){ + stop(sprintf("The ensemble does not contain any posterior samples model-averaged across the models assuming the presence of the '%1$s' predictor. Please, verify that you specified at least one model assuming the presence of '%1$s' predictor.", parameter)) + }else if(is.null(samples[[parameter_samples]])){ + stop(sprintf("The ensemble does not contain any posterior samples model-averaged across the '%1$s' predictor. Please, verify that you specified at least one model containing the '%1$s' predictor.", parameter)) + } + } + + + if(parameter %in% x$add_info[["predictors"]]){ + if(inherits(samples[[parameter_samples]], "mixed_posteriors.factor")){ + if(attr(samples[[parameter_samples]],"orthonormal") || attr(samples[[parameter_samples]],"meandif")){ + n_levels <- length(attr(samples[[parameter_samples]],"level_names")) + }else if(attr(samples[[parameter_samples]],"treatment")){ + n_levels <- length(attr(x$add_info[["predictors"]],"level_names")) - 1 + } + }else{ + n_levels <- 1 + } + }else{ + n_levels <- 1 } - dots <- .set_dots_plot(...) + dots <- .set_dots_plot(..., n_levels = n_levels) dots_prior <- .set_dots_prior(dots_prior) if(parameter == "PETPEESE" & show_data){ @@ -223,7 +260,7 @@ plot.RoBMA <- function(x, parameter = "mu", # prepare the argument call args <- dots args$samples <- samples - args$parameter <- parameter + args$parameter <- parameter_samples args$plot_type <- plot_type args$prior <- prior args$n_points <- 1000 @@ -233,10 +270,11 @@ plot.RoBMA <- function(x, parameter = "mu", args$transformation_arguments <- NULL args$transformation_settings <- FALSE args$rescale_x <- rescale_x - args$par_name <- if(parameter %in% c("mu", "tau")) .plot.RoBMA_par_names(parameter, x, output_scale)[[1]] + args$par_name <- if(parameter %in% c("mu", "tau", x$add_info[["predictors"]])) .plot.RoBMA_par_names(parameter, x, output_scale)[[1]] args$dots_prior <- dots_prior - plot <- do.call(BayesTools::plot_posterior, args) + # suppress messages about transformations + plot <- suppressMessages(do.call(BayesTools::plot_posterior, args)) if(parameter == "PETPEESE" & show_data){ @@ -321,7 +359,13 @@ forest <- function(x, conditional = FALSE, plot_type = "base", output_scale = NU }else{ samples_mu <- x[["RoBMA"]][["posteriors"]][["mu"]] } - data <- x[["data"]] + + if(is.RoBMA.reg(x)){ + data <- x[["data"]][["outcome"]] + }else{ + data <- x[["data"]] + } + ### manage transformations @@ -557,29 +601,57 @@ plot_models <- function(x, parameter = "mu", conditional = FALSE, output_scale = ### prepare input - if(conditional){ + if(is.RoBMA.reg(x) && parameter == "mu"){ - model_list <- x[["models"]] - samples <- x[["RoBMA"]][["posteriors_conditional"]] - inference <- x[["RoBMA"]][["inference_conditional"]] + if(conditional){ - # check whether the input exists - if(parameter == "mu" && is.null(samples[["mu"]])) - stop("The ensemble does not contain any posterior samples model-averaged across the models assuming the presence of the effect. Please, verify that you specified at least one model assuming the presence of the effect.") - if(parameter == "tau" && is.null(samples[["tau"]])) - stop("The ensemble does not contain any posterior samples model-averaged across the models assuming the presence of the heterogeneity. Please, verify that you specified at least one model assuming the presence of the heterogeneity.") + model_list <- x[["models"]] + samples <- x[["RoBMA"]][["posteriors_predictors_conditional"]] + inference <- x[["RoBMA"]][["inference_conditional"]] + + # check whether the input exists + if(parameter == "mu_intercept" && is.null(samples[["mu_intercept"]])) + stop("The ensemble does not contain any posterior samples model-averaged across the models assuming the presence of the effect. Please, verify that you specified at least one model assuming the presence of the effect.") + + }else{ + + model_list <- x[["models"]] + samples <- x[["RoBMA"]][["posteriors_predictors"]] + inference <- x[["RoBMA"]][["inference"]] + + } + + parameter <- "mu_intercept" + names(samples)[names(samples) == "mu_intercept"] <- "mu_intercept" + names(inference)[names(inference) == "Effect"] <- "mu_intercept" }else{ - model_list <- x[["models"]] - samples <- x[["RoBMA"]][["posteriors"]] - inference <- x[["RoBMA"]][["inference"]] + if(conditional){ + + model_list <- x[["models"]] + samples <- x[["RoBMA"]][["posteriors_conditional"]] + inference <- x[["RoBMA"]][["inference_conditional"]] + # check whether the input exists + if(parameter == "mu" && is.null(samples[["mu"]])) + stop("The ensemble does not contain any posterior samples model-averaged across the models assuming the presence of the effect. Please, verify that you specified at least one model assuming the presence of the effect.") + if(parameter == "tau" && is.null(samples[["tau"]])) + stop("The ensemble does not contain any posterior samples model-averaged across the models assuming the presence of the heterogeneity. Please, verify that you specified at least one model assuming the presence of the heterogeneity.") + + }else{ + + model_list <- x[["models"]] + samples <- x[["RoBMA"]][["posteriors"]] + inference <- x[["RoBMA"]][["inference"]] + + } + + # deal with the non-matching names + names(inference)[names(inference) == "Effect"] <- "mu" + names(inference)[names(inference) == "Heterogeneity"] <- "tau" } - # deal with the non-matching names - names(inference)[names(inference) == "Effect"] <- "mu" - names(inference)[names(inference) == "Heterogeneity"] <- "tau" dots <- list(...) @@ -611,11 +683,13 @@ plot_models <- function(x, parameter = "mu", conditional = FALSE, output_scale = -.set_dots_plot <- function(...){ +.set_dots_plot <- function(..., n_levels = 1){ dots <- list(...) - if(is.null(dots[["col"]])){ + if(is.null(dots[["col"]]) & n_levels == 1){ dots[["col"]] <- "black" + }else if(is.null(dots[["col"]]) & n_levels > 1){ + dots[["col"]] <- grDevices::palette.colors(n = n_levels + 1, palette = "Okabe-Ito")[-1] } if(is.null(dots[["col.fill"]])){ dots[["col.fill"]] <- "#4D4D4D4C" # scales::alpha("grey30", .30) @@ -643,7 +717,7 @@ plot_models <- function(x, parameter = "mu", conditional = FALSE, output_scale = } .plot.RoBMA_par_names <- function(par, fit, output_scale){ - if(par == "mu"){ + if(par %in% c("mu", "mu_intercept")){ par_names <- list(switch( output_scale, @@ -721,6 +795,17 @@ plot_models <- function(x, parameter = "mu", conditional = FALSE, output_scale = "y" = expression("PEESE") )) + }else{ + + par_names <- list(switch( + output_scale, + "r" = bquote(.(par)~(rho)), + "d" = bquote(.(par)~("Cohen's"~italic(d))), + "z" = bquote(.(par)~("Fisher's"~italic(z))), + "logOR" = bquote(.(par)~("log"(italic("OR")))), + "OR" = bquote(.(par)~(italic("OR"))), + "y" = bquote(.(par)) + )) } return(par_names) diff --git a/R/priors.R b/R/priors.R index 452f2aa9..0f089629 100644 --- a/R/priors.R +++ b/R/priors.R @@ -8,6 +8,11 @@ prior <- BayesTools::prior #' @export prior_none <- BayesTools::prior_none +#' @name prior_factor +#' @inherit BayesTools::prior_factor +#' @export +prior_factor <- BayesTools::prior_factor + #' @name prior_PET #' @inherit BayesTools::prior_PET #' @export @@ -32,3 +37,52 @@ prior_weightfunction <- BayesTools::prior_weightfunction #' \insertAllCited{} #' @export prior_informed <- BayesTools::prior_informed + +#' @title Orthornomal contrast matrix +#' +#' @description Return a matrix of orthornomal contrasts. +#' Code is based on \code{stanova::contr.bayes} and corresponding to description +#' by \insertCite{rouder2012default;textual}{BayesTools} +#' +#' @param n a vector of levels for a factor, or the number of levels +#' @param contrasts logical indicating whether contrasts should be computed +#' +#' @examples +#' contr.orthonormal(c(1, 2)) +#' contr.orthonormal(c(1, 2, 3)) +#' +#' @references +#' \insertAllCited{} +#' +#' @return A matrix with n rows and k columns, with k = n - 1 if \code{contrasts = TRUE} and k = n +#' if \code{contrasts = FALSE}. +#' +#' @export +contr.orthonormal <- BayesTools::contr.orthonormal + +#' @title Mean difference contrast matrix +#' +#' @description Return a matrix of mean difference contrasts. +#' This is an adjustment to the \code{contr.orthonormal} that ascertains that the prior +#' distributions on difference between the gran mean and factor level are identical independent +#' of the number of factor levels (which does not hold for the orthonormal contrast). Furthermore, +#' the contrast is re-scaled so the specified prior distribution exactly corresponds to the prior +#' distribution on difference between each factor level and the grand mean -- this is approximately +#' twice the scale of \code{contr.orthonormal}. +#' +#' @param n a vector of levels for a factor, or the number of levels +#' @param contrasts logical indicating whether contrasts should be computed +#' +#' @examples +#' contr.meandif(c(1, 2)) +#' contr.meandif(c(1, 2, 3)) +#' +#' @references +#' \insertAllCited{} +#' +#' @return A matrix with n rows and k columns, with k = n - 1 if \code{contrasts = TRUE} and k = n +#' if \code{contrasts = FALSE}. +#' +#' @export +contr.meandif <- BayesTools::contr.meandif + diff --git a/R/summary.R b/R/summary.R index c04522f5..d36c0eee 100644 --- a/R/summary.R +++ b/R/summary.R @@ -28,7 +28,7 @@ print.RoBMA <- function(x, ...){ #' of the individual models (\code{"individual"}). Can be abbreviated to first letters. #' @param conditional show the conditional estimates (assuming that the #' alternative is true). Defaults to \code{FALSE}. Only available for -#' \code{type == "conditional"}. +#' \code{type == "ensemble"}. #' @param output_scale transform the meta-analytic estimates to a different #' scale. Defaults to \code{NULL} which returns the same scale as the model was estimated on. #' @param probs quantiles of the posterior samples to be displayed. @@ -88,6 +88,7 @@ summary.RoBMA <- function(object, type = "ensemble", conditional = FALSE, # apply version changes to RoBMA object object <- .update_object(object) + # check the scales if(is.null(output_scale)){ output_scale <- object$add_info[["output_scale"]] }else if(object$add_info[["output_scale"]] == "y" & .transformation_var(output_scale) != "y"){ @@ -103,7 +104,6 @@ summary.RoBMA <- function(object, type = "ensemble", conditional = FALSE, type <- "diagnostics" } - object[["add_info"]][["warnings"]] if(substr(type,1,1) == "e"){ @@ -115,7 +115,7 @@ summary.RoBMA <- function(object, type = "ensemble", conditional = FALSE, # obtain components overview components <- BayesTools::ensemble_inference_table( inference = object$RoBMA[["inference"]], - parameters = names(object$RoBMA[["inference"]])[names(object$RoBMA[["inference"]]) %in% c("Effect", "Heterogeneity", "Bias")], + parameters = names(object$RoBMA[["inference"]])[names(object$RoBMA[["inference"]]) %in% c("Effect", "Heterogeneity", "Bias", "Hierarchical")], logBF = logBF, BF01 = BF01, title = "Components summary:" @@ -152,8 +152,7 @@ summary.RoBMA <- function(object, type = "ensemble", conditional = FALSE, ) } - - ### return results + # create the output object output <- list( call = object[["call"]], title = "Robust Bayesian meta-analysis", @@ -165,6 +164,66 @@ summary.RoBMA <- function(object, type = "ensemble", conditional = FALSE, output$estimates_conditional <- estimates_conditional } + # add meta-regression summaries + if(is.RoBMA.reg(object)){ + + # rename the inference components + for(i in seq_along(object$RoBMA[["inference_predictors"]])){ + attr(object$RoBMA[["inference_predictors"]][[i]], "parameter_name") <- gsub("(mu) ", "", attr(object$RoBMA[["inference_predictors"]][[i]], "parameter_name"), fixed = TRUE) + } + + if(!is.null(object$RoBMA[["inference_predictors"]])){ + output$components_predictors <- BayesTools::ensemble_inference_table( + inference = object$RoBMA[["inference_predictors"]], + parameters = names(object$RoBMA[["inference_predictors"]]), + logBF = logBF, + BF01 = BF01, + title = "Meta-regression components summary:" + ) + } + + if(!is.null(object$RoBMA[["posteriors_predictors"]])){ + # obtain estimates tables + output$estimates_predictors <- BayesTools::ensemble_estimates_table( + samples = object$RoBMA[["posteriors_predictors"]], + parameters = names(object$RoBMA[["posteriors_predictors"]]), + probs = probs, + title = "Model-averaged meta-regression estimates:", + formula_prefix = FALSE, + footnotes = .scale_note(object$add_info[["prior_scale"]], output_scale), + warnings = .collect_errors_and_warnings(object) + ) + } + + + # deal with possibly empty table in case of no alternative models + if(conditional){ + if(is.null(object$RoBMA[["posteriors_predictors_conditional"]])){ + estimates_predictors_conditional <- data.frame(matrix(nrow = 0, ncol = length(probs) + 2)) + colnames(estimates_predictors_conditional) <- c("Mean", "Median", probs) + class(estimates_predictors_conditional) <- c("BayesTools_table", "BayesTools_ensemble_summary", class(estimates_predictors_conditional)) + attr(estimates_predictors_conditional, "type") <- rep("estimate", ncol(estimates_predictors_conditional)) + attr(estimates_predictors_conditional, "rownames") <- TRUE + attr(estimates_predictors_conditional, "title") <- "Conditional meta-regression estimates:" + attr(estimates_predictors_conditional, "footnotes") <- .scale_note(object$add_info[["prior_scale"]], output_scale) + attr(estimates_predictors_conditional, "warnings") <- .collect_errors_and_warnings(object) + }else{ + estimates_predictors_conditional <- BayesTools::ensemble_estimates_table( + samples = object$RoBMA[["posteriors_predictors_conditional"]], + parameters = names(object$RoBMA[["posteriors_predictors_conditional"]]), + probs = probs, + title = "Conditional meta-regression estimates:", + formula_prefix = FALSE, + footnotes = .scale_note(object$add_info[["prior_scale"]], output_scale), + warnings = .collect_errors_and_warnings(object) + ) + } + output$estimates_predictors_conditional <- estimates_predictors_conditional + } + + } + + class(output) <- "summary.RoBMA" attr(output, "type") <- "ensemble" @@ -172,15 +231,23 @@ summary.RoBMA <- function(object, type = "ensemble", conditional = FALSE, }else if(substr(type,1,1) == "m"){ - components <- names(object$RoBMA[["inference"]])[names(object$RoBMA[["inference"]]) %in% c("Effect", "Heterogeneity", "Bias")] + components <- names(object$RoBMA[["inference"]])[names(object$RoBMA[["inference"]]) %in% c("Effect", "Heterogeneity", "Bias", "Hierarchical")] + parameters <- list() if(any(components == "Effect")){ - parameters[["Effect"]] <- "mu" + if(.is_regression(object)){ + parameters[["intercept"]] <- "mu_intercept" + for(i in seq_along(object$add_info[["predictors"]])){ + parameters[[object$add_info[["predictors"]][i]]] <- .BayesTools_parameter_name(object$add_info[["predictors"]][i]) + } + }else{ + parameters[["Effect"]] <- "mu" + } } if(any(components == "Heterogeneity")){ parameters[["Heterogeneity"]] <- "tau" - if(!attr(object$data, "all_independent")){ - parameters[["Var. allocation"]] <- "rho" + if(.is_multivariate(object)){ + parameters[["Hierarchical"]] <- "rho" } } if(any(components == "Bias")){ @@ -210,15 +277,23 @@ summary.RoBMA <- function(object, type = "ensemble", conditional = FALSE, }else if(substr(type,1,1) == "d"){ - components <- names(object$RoBMA[["inference"]])[names(object$RoBMA[["inference"]]) %in% c("Effect", "Heterogeneity", "Bias")] + components <- names(object$RoBMA[["inference"]])[names(object$RoBMA[["inference"]]) %in% c("Effect", "Heterogeneity", "Bias", "Hierarchical")] + parameters <- list() if(any(components == "Effect")){ - parameters[["Effect"]] <- "mu" + if(.is_regression(object)){ + parameters[["intercept"]] <- "mu_intercept" + for(i in seq_along(object$add_info[["predictors"]])){ + parameters[[object$add_info[["predictors"]][i]]] <- .BayesTools_parameter_name(object$add_info[["predictors"]][i]) + } + }else{ + parameters[["Effect"]] <- "mu" + } } if(any(components == "Heterogeneity")){ parameters[["Heterogeneity"]] <- "tau" - if(!attr(object$data, "all_independent")){ - parameters[["Var. allocation"]] <- "rho" + if(.is_multivariate(object)){ + parameters[["Hierarchical"]] <- "rho" } } if(any(components == "Bias")){ @@ -313,14 +388,29 @@ print.summary.RoBMA <- function(x, ...){ cat("\n") print(x[["components"]]) + if(!is.null(x[["components_predictors"]])){ + cat("\n") + print(x[["components_predictors"]]) + } + cat("\n") print(x[["estimates"]]) + if(!is.null(x[["estimates_predictors"]])){ + cat("\n") + print(x[["estimates_predictors"]]) + } + if(!is.null(x[["estimates_conditional"]])){ cat("\n") print(x[["estimates_conditional"]]) } + if(!is.null(x[["estimates_predictors_conditional"]])){ + cat("\n") + print(x[["estimates_predictors_conditional"]]) + } + return(invisible()) }else if(attr(x, "type") == "models"){ @@ -360,13 +450,22 @@ print.summary.RoBMA <- function(x, ...){ #' @param x an object to test #' #' -#' @return \code{is.RoBMA} returns a boolean. +#' @return returns a boolean. #' -#' @export +#' @name is.RoBMA +#' @export is.RoBMA +#' @export is.RoBMA.reg + +#' @rdname is.RoBMA is.RoBMA <- function(x){ inherits(x, "RoBMA") } +#' @rdname is.RoBMA +is.RoBMA.reg <- function(x){ + inherits(x, "RoBMA.reg") +} + #' @title Interprets results of a RoBMA model. diff --git a/R/tools.R b/R/tools.R index 38c20f14..4da2eade 100644 --- a/R/tools.R +++ b/R/tools.R @@ -17,7 +17,16 @@ check_RoBMA <- function(fit){ .is_model_constant <- function(priors){ # checks whether there is at least one non-nill prior - return(all(sapply(priors, function(prior) is.prior.point(prior) | is.prior.none(prior))) & is.null(priors[["omega"]])) + if(is.null(priors[["terms"]])){ + # in simple models + return(all(sapply(priors, function(prior) is.prior.point(prior) | is.prior.none(prior))) & is.null(priors[["omega"]])) + }else{ + # in regression models + non_terms <- all(sapply(priors[names(priors) != "terms"], function(prior) is.prior.point(prior) | is.prior.none(prior))) & is.null(priors[["omega"]]) + terms <- all(sapply(priors[["terms"]], function(prior) is.prior.point(prior))) + return(non_terms && terms) + } + return() } .remove_model_posteriors <- function(object){ for(i in seq_along(object[["models"]])){ @@ -121,11 +130,15 @@ check_RoBMA <- function(fit){ } .is_component_null <- function(priors, component){ if(component == "effect"){ - return(priors[["mu"]][["is_null"]]) + if(is.null(priors[["terms"]])){ + return(priors[["mu"]][["is_null"]]) + }else{ + return(priors[["terms"]][["intercept"]][["is_null"]]) + } }else if(component == "heterogeneity"){ return(priors[["tau"]][["is_null"]]) - }else if(component == "multivariate"){ - if(priors[["tau"]][["is_null"]] || is.null(priors[["rho"]])){ + }else if(component == "hierarchical"){ + if((is.prior.point(priors[["tau"]]) && priors[["tau"]]$parameters[["location"]] == 0) || is.null(priors[["rho"]])){ return(TRUE) }else{ return(priors[["rho"]][["is_null"]]) @@ -143,17 +156,113 @@ check_RoBMA <- function(fit){ } } .multivariate_warning <- function(){ - warning("You are about to estimate multivariate models. Note that this is an extremely computationaly expensive experimental feature.", immediate. = TRUE) + warning("You are about to estimate multivariate models. Note that this is an extremely computationaly expensive experimental feature.", immediate. = TRUE, call. = FALSE) } -.weighted_warning <- function(){ - warning("You are about to estimate weighted models. Note that this is an experimental feature.", immediate. = TRUE) +.weighted_warning <- function(){ + warning("You are about to estimate weighted models. Note that this is an experimental feature.", immediate. = TRUE, call. = FALSE) } .update_object <- function(object){ - # 2.1 -> 2.2 - if(is.null(attr(object$data, "all_independent"))){ - attr(object$data, "all_independent") <- TRUE + # no package version number saved prior to 2.4 + if(!all("version" %in% names(object[["add_info"]]))){ + + # 2.1 -> 2.2 + if(is.null(object[["formula"]]) && is.null(attr(object$data, "all_independent"))){ + attr(object$data, "all_independent") <- TRUE + } + + object[["add_info"]][["version"]] <- list(c(2,2,0)) + } + + # 2.2 -> 2.3 + if(.object_version_older(object, "2.2.0")){ + + if(is.null(object[["formula"]]) && !is.null(attr(object$data, "all_independent")) && is.null(attr(object$data, "weighted"))){ + attr(object$data, "weighted") <- FALSE + } + + object[["add_info"]][["version"]] <- list(c(2,3,0)) + } + + # 2.3 -> 2.4 + if(.object_version_older(object, "2.3.0")){ + + if(!all(c("predictors", "predictors_test", "standardize_predictors") %in% names(object[["add_info"]]))){ + object[["add_info"]][["predictors"]] <- NULL + object[["add_info"]][["predictors_test"]] <- NULL + object[["add_info"]][["standardize_predictors"]] <- NULL + object[["add_info"]] <- object[["add_info"]][c( + "model_type", + "predictors", + "predictors_test", + "prior_scale", + "output_scale", + "effect_measure", + "effect_direction", + "standardize_predictors", + "seed", + "save", + "warnings", + "errors" + )] + } + if(!all("version" %in% names(object[["add_info"]]))){ + object[["add_info"]][["version"]] <- utils::packageVersion("RoBMA") + } + if(!all(c("inference_predictors", "inference_predictors_conditional", "posteriors_predictors", "posteriors_predictors_conditional") %in% names(object[["RoBMA"]]))){ + object[["RoBMA"]] <- list( + "inference" = object[["RoBMA"]][["inference"]], + "inference_conditional" = object[["RoBMA"]][["inference_conditional"]], + "inference_predictors" = NULL, + "inference_predictors_conditional" = NULL, + "posteriors" = object[["RoBMA"]][["posteriors"]], + "posteriors_conditional" = object[["RoBMA"]][["posteriors_conditional"]], + "posteriors_predictors" = NULL, + "posteriors_predictors_conditional" = NULL + ) + } + names(object[["priors"]])[names(object[["priors"]]) == "rho"] <- "hierarchical" + object[["add_info"]][["version"]] <- list(c(2,4,0)) } return(object) } +.object_version_older <- function(object, version){ + + object <- unlist(object[["add_info"]][["version"]]) + current <- as.numeric(unlist(strsplit(version, ".", fixed = TRUE))) + + if(length(object) < 3 | length(current) < 3){ + return(object[1] <= current[1] && object[2] <= current[2]) + }else{ + return(object[1] <= current[1] && object[2] <= current[2] && object[3] <= current[3]) + } +} + +.is_multivariate <- function(object){ + if(.is_regression(object)){ + return(!attr(object[["data"]][["outcome"]], "all_independent")) + }else{ + return(!attr(object[["data"]], "all_independent")) + } +} +.is_weighted <- function(object){ + if(.is_regression(object)){ + return(attr(object[["data"]][["outcome"]], "weighted")) + }else{ + return(attr(object[["data"]], "weighted")) + } +} +.is_regression <- function(object){ + return(!is.null(object[["formula"]])) +} + +.BayesTools_parameter_name <- function(parameter){ + return(BayesTools::JAGS_parameter_names(parameter, formula_parameter = "mu")) +} +.output_parameter_names <- function(parameter){ + return(BayesTools::format_parameter_names(parameter, formula_parameters = "mu", formula_prefix = FALSE)) +} +.reserved_words <- function() c("intercept", "Intercept", "terms", "mu", "tau", "theta", "omega", "rho", "eta", "PET", "PEESE", + "weightfunction", "weigthfunction", "PET-PEESE", "PETPEESE", + "d", "t", "r", "z", "y", "logOR", "lCI", "uCI", "v", "se", "n", "weight") diff --git a/R/transformations.R b/R/transformations.R index f0f627af..29c29834 100644 --- a/R/transformations.R +++ b/R/transformations.R @@ -31,6 +31,8 @@ #' \code{"cohens_d"}, correlation coefficient \code{"r"} and \code{"logOR"}. #' Supplying \code{"none"} will treat the effect sizes as unstandardized and #' refrain from any transformations. +#' @param weight specifies likelihood weights of the individual estimates. +#' Notes that this is an untested experimental feature. #' @param return_all whether data frame containing all filled values should be #' returned. Defaults to \code{FALSE} #' @@ -62,22 +64,23 @@ #' #' @seealso [RoBMA()], [check_setup()], [effect_sizes()], [standard_errors()], and [sample_sizes()] #' @export -combine_data <- function(d = NULL, r = NULL, z = NULL, logOR = NULL, t = NULL, y = NULL, se = NULL, v = NULL, n = NULL, lCI = NULL, uCI = NULL, study_names = NULL, study_ids = NULL, data = NULL, transformation = "fishers_z", return_all = FALSE){ +combine_data <- function(d = NULL, r = NULL, z = NULL, logOR = NULL, t = NULL, y = NULL, se = NULL, v = NULL, n = NULL, lCI = NULL, uCI = NULL, study_names = NULL, study_ids = NULL, weight = NULL, data = NULL, transformation = "fishers_z", return_all = FALSE){ # settings & input check BayesTools::check_char(transformation, "transformation") BayesTools::check_bool(return_all, "return_all") - BayesTools::check_real(d[!is.na(d)], "d", allow_NULL = TRUE, check_length = FALSE) - BayesTools::check_real(r[!is.na(r)], "r", allow_NULL = TRUE, check_length = FALSE, lower = -1, upper = 1, allow_bound = FALSE) - BayesTools::check_real(z[!is.na(z)], "z", allow_NULL = TRUE, check_length = FALSE) - BayesTools::check_real(logOR[!is.na(logOR)], "logOR", allow_NULL = TRUE, check_length = FALSE, allow_bound = FALSE) - BayesTools::check_real(t[!is.na(t)], "t", allow_NULL = TRUE, check_length = FALSE) - BayesTools::check_real(y[!is.na(y)], "y", allow_NULL = TRUE, check_length = FALSE) - BayesTools::check_real(se[!is.na(se)], "se", allow_NULL = TRUE, check_length = FALSE, lower = 0, allow_bound = FALSE) - BayesTools::check_real(v[!is.na(v)], "v", allow_NULL = TRUE, check_length = FALSE, lower = 0, allow_bound = FALSE) - BayesTools::check_int( n[!is.na(n)], "n", allow_NULL = TRUE, check_length = FALSE, lower = 0, allow_bound = FALSE) - BayesTools::check_real(lCI[!is.na(lCI)], "lCI", allow_NULL = TRUE, check_length = FALSE) - BayesTools::check_real(uCI[!is.na(uCI)], "uCI", allow_NULL = TRUE, check_length = FALSE) + BayesTools::check_real(d[!is.na(d)], "d", allow_NULL = TRUE, check_length = FALSE) + BayesTools::check_real(r[!is.na(r)], "r", allow_NULL = TRUE, check_length = FALSE, lower = -1, upper = 1, allow_bound = FALSE) + BayesTools::check_real(z[!is.na(z)], "z", allow_NULL = TRUE, check_length = FALSE) + BayesTools::check_real(logOR[!is.na(logOR)], "logOR", allow_NULL = TRUE, check_length = FALSE, allow_bound = FALSE) + BayesTools::check_real(t[!is.na(t)], "t", allow_NULL = TRUE, check_length = FALSE) + BayesTools::check_real(y[!is.na(y)], "y", allow_NULL = TRUE, check_length = FALSE) + BayesTools::check_real(se[!is.na(se)], "se", allow_NULL = TRUE, check_length = FALSE, lower = 0, allow_bound = FALSE) + BayesTools::check_real(v[!is.na(v)], "v", allow_NULL = TRUE, check_length = FALSE, lower = 0, allow_bound = FALSE) + BayesTools::check_int( n[!is.na(n)], "n", allow_NULL = TRUE, check_length = FALSE, lower = 0, allow_bound = FALSE) + BayesTools::check_real(lCI[!is.na(lCI)], "lCI", allow_NULL = TRUE, check_length = FALSE) + BayesTools::check_real(uCI[!is.na(uCI)], "uCI", allow_NULL = TRUE, check_length = FALSE) + BayesTools::check_real(weight[!is.na(weight)], "weight", allow_NULL = TRUE, check_length = FALSE, lower = 0, allow_bound = FALSE) BayesTools::check_char(study_names[!is.na(study_names)], "study_names", allow_NULL = TRUE, check_length = FALSE) @@ -92,7 +95,7 @@ combine_data <- function(d = NULL, r = NULL, z = NULL, logOR = NULL, t = NULL, original_measure <- NULL } - input_variables <- c("d", "r", "z", "logOR", "y", "se", "v", "n", "lCI", "uCI", "t", "study_names", "study_ids") + input_variables <- c("d", "r", "z", "logOR", "y", "se", "v", "n", "lCI", "uCI", "t", "study_names", "study_ids", "weight") if(!is.null(data)){ if(!is.data.frame(data)) @@ -101,7 +104,7 @@ combine_data <- function(d = NULL, r = NULL, z = NULL, logOR = NULL, t = NULL, stop(paste0("The following variables do not correspond to any effect size/variability measure: ", paste(colnames(data)[!colnames(data) %in% input_variables], collapse = ", "))) data <- data[,colnames(data) %in% input_variables] }else{ - data <- data.frame(do.call(cbind, list(d = d, r = r, z = z, logOR = logOR, t = t, y = y, se = se, v = v, n = n, lCI = lCI, uCI = uCI, study_names = study_names, study_ids = study_ids))) + data <- data.frame(do.call(cbind, list(d = d, r = r, z = z, logOR = logOR, t = t, y = y, se = se, v = v, n = n, lCI = lCI, uCI = uCI, study_names = study_names, study_ids = study_ids, weight = weight))) } if(is.null(original_measure)){ @@ -117,7 +120,7 @@ combine_data <- function(d = NULL, r = NULL, z = NULL, logOR = NULL, t = NULL, } ### into numeric - for(var in c("d", "r", "z", "logOR", "y", "se", "v", "n", "lCI", "uCI", "t")){ + for(var in c("d", "r", "z", "logOR", "y", "se", "v", "n", "lCI", "uCI", "t", "weight")){ data[,var] <- as.numeric(as.character(data[,var])) } @@ -126,7 +129,8 @@ combine_data <- function(d = NULL, r = NULL, z = NULL, logOR = NULL, t = NULL, y = rep(NA, nrow(data)), se = rep(NA, nrow(data)), study_names = rep(NA, nrow(data)), - study_ids = rep(NA, nrow(data)) + study_ids = rep(NA, nrow(data)), + weight = rep(NA, nrow(data)) ) ### check for sufficient input @@ -188,6 +192,11 @@ combine_data <- function(d = NULL, r = NULL, z = NULL, logOR = NULL, t = NULL, # assign factor levels data[,"study_ids"] <- as.integer(as.factor(data[,"study_ids"])) + # add weights if missing + if(all(is.na(data[,"weight"]))){ + data[,"weight"] <- NA + } + ### deal with general 'unstandardized' input if(!anyNA(data[,"y"])){ @@ -211,10 +220,11 @@ combine_data <- function(d = NULL, r = NULL, z = NULL, logOR = NULL, t = NULL, output$se <- data[,"se"] output$study_names <- data[,"study_names"] output$study_ids <- data[,"study_ids"] + output$weight <- data[,"weight"] attr(output, "effect_measure") <- transformation attr(output, "original_measure") <- original_measure attr(output, "all_independent") <- all(is.na(data[,"study_ids"])) - attr(output, "weighted") <- FALSE + attr(output, "weighted") <- !all(is.na(data[,"weight"])) class(output) <- c(class(output), "data.RoBMA") return(output) @@ -367,10 +377,11 @@ combine_data <- function(d = NULL, r = NULL, z = NULL, logOR = NULL, t = NULL, output$se <- data[,"se"] output$study_names <- data[,"study_names"] output$study_ids <- data[,"study_ids"] + output$weight <- data[,"weight"] attr(output, "effect_measure") <- transformation attr(output, "original_measure") <- original_measure attr(output, "all_independent") <- all(is.na(data[,"study_ids"])) - attr(output, "weighted") <- FALSE + attr(output, "weighted") <- !all(is.na(data[,"weight"])) class(output) <- c(class(output), "data.RoBMA") if(anyNA(data[,"se"]) | anyNA(data[,"se"])){ @@ -413,6 +424,51 @@ combine_data <- function(d = NULL, r = NULL, z = NULL, logOR = NULL, t = NULL, } } + for(type in c("posteriors_predictors", "posteriors_predictors_conditional")){ + + for(i in seq_along(object$RoBMA[[type]])){ + + if(inherits(object$RoBMA[[type]][[i]], "mixed_posteriors.factor")){ + for(j in 1:ncol(object$RoBMA[[type]][[i]])){ + object$RoBMA[[type]][[i]][,j] <- .transform_mu( + object$RoBMA[[type]][[i]][,j], + current_scale, + output_scale + ) + } + }else if(inherits(object$RoBMA[[type]][[i]], "mixed_posteriors.simple")){ + object$RoBMA[[type]][[i]] <- .transform_mu( + object$RoBMA[[type]][[i]], + current_scale, + output_scale + ) + } + + } + + } + + for(type in c("conditional", "averaged")){ + + for(i in seq_along(object$RoBMA$inference_marginal[[type]])){ + + for(j in seq_along(object$RoBMA$inference_marginal[[type]][[i]])){ + object$RoBMA$inference_marginal[[type]][[i]][[j]] <- .transform_mu( + object$RoBMA$inference_marginal[[type]][[i]][[j]], + current_scale, + output_scale + ) + attr(object$RoBMA$inference_marginal[[type]][[i]][[j]], "prior_samples") <- .transform_mu( + attr(object$RoBMA$inference_marginal[[type]][[i]][[j]], "prior_samples"), + current_scale, + output_scale + ) + } + + } + } + + object$add_info[["output_scale"]] <- output_scale return(object) diff --git a/R/utilities.R b/R/utilities.R index a61b1054..10d69535 100644 --- a/R/utilities.R +++ b/R/utilities.R @@ -62,6 +62,7 @@ assign("RoBMA_version", "notset", envir = RoBMA.private) assign("min_jags_major", 4, envir = RoBMA.private) assign("max_jags_major", 4, envir = RoBMA.private) assign("max_cores", parallel::detectCores(logical = TRUE) - 1, envir = RoBMA.private) +assign("check_scaling", TRUE, envir = RoBMA.private) # check proper BayesTools package version .check_BayesTools <- function(){ @@ -79,15 +80,16 @@ assign("max_cores", parallel::detectCores(logical = TRUE) - 1, envir = Ro BayesTools_required <- switch( paste0(RoBMA.version, collapse = "."), - "2.1.1" = c("0.1.3", "0.1.3"), - "2.1.2" = c("0.1.3", "0.1.3"), - "2.2.0" = c("0.1.3", "0.1.3"), - "2.2.1" = c("0.2.3", "999.999.999"), - "2.2.2" = c("0.2.3", "999.999.999"), - "2.2.3" = c("0.2.3", "999.999.999"), - "2.3.0" = c("0.2.3", "999.999.999"), - "2.3.1" = c("0.2.3", "999.999.999"), - "2.3.2" = c("0.2.3", "999.999.999"), + "2.1.1" = c("0.1.3", "0.1.3"), + "2.1.2" = c("0.1.3", "0.1.3"), + "2.2.0" = c("0.1.3", "0.1.3"), + "2.2.1" = c("0.2.3", "999.999.999"), + "2.2.2" = c("0.2.3", "999.999.999"), + "2.2.3" = c("0.2.3", "999.999.999"), + "2.3.0" = c("0.2.3", "999.999.999"), + "2.3.1" = c("0.2.3", "999.999.999"), + "2.3.2" = c("0.2.3", "999.999.999"), + "3.0.0" = c("0.2.14", "999.999.999"), stop("New RoBMA version needs to be defined in '.check_BayesTools' function!") ) diff --git a/README.Rmd b/README.Rmd index ed19db13..d981b244 100644 --- a/README.Rmd +++ b/README.Rmd @@ -36,18 +36,21 @@ fit <- readRDS(file = "models/README/Bem2011.RDS") This package estimates an ensemble of meta-analytic models (assuming either the presence or absence of effect, heterogeneity, and publication bias) and uses Bayesian model averaging to combine them. The ensemble uses Bayes factors to test for the presence of absence of the individual components (e.g., effect vs. no effect) and model-averages parameter estimates based on posterior model probabilities. The user can define a wide range of non-informative or informative priors for the effect size, heterogeneity, and publication bias components (including selection, PET, and PEESE style models). The package provides convenient functions for summary, visualizations, and fit diagnostics. -See our new pre-print @bartos2021no (https://doi.org/10.31234/osf.io/kvsp7) for the description of the newest version, RoBMA-PSMA, or our previous paper introducing the method @maier2020robust (https://doi.org/10.31234/osf.io/u4cns). The previous version of the methods is also implemented within the the user-friendly graphical user interface of JASP [@jasp14] and accompanied by a tutorial paper with more examples [@bartos2020adjusting] (https://doi.org/10.31234/osf.io/75bqn). +See our new manuscript @bartos2021no (https://doi.org/10.1002/jrsm.1594) for the description of the newest version, RoBMA-PSMA, or our previous paper introducing the method @maier2020robust (https://doi.org/10.1037/met0000405). The previous version of the methods is also implemented within the the user-friendly graphical user interface of JASP [@jasp14] and accompanied by a tutorial paper with more examples [@bartos2020adjusting] (https://doi.org/10.1177/25152459221109259). We also prepared multiple vignettes that illustrate functionality of the package: + - [Tutorial: Adjusting for publication bias in JASP and R - Selection models, PET-PEESE, and Robust Bayesian meta-analysis](https://fbartos.github.io/RoBMA/articles/Tutorial.html) - [Reproducing Bayesian model-averaged meta-analysis (BMA)](https://fbartos.github.io/RoBMA/articles/ReproducingBMA.html) - - [Fitting custom meta-analytic ensembles](https://fbartos.github.io/RoBMA/articles/CustomEnsembles.html) + - [Hierarchical Bayesian model-averaged meta-analysis](https://fbartos.github.io/RoBMA/articles/HierarchicalBMA.html) - [Informed Bayesian model-averaged meta-analysis in medicine](https://fbartos.github.io/RoBMA/articles/MedicineBMA.html) + - [Fitting custom meta-analytic ensembles](https://fbartos.github.io/RoBMA/articles/CustomEnsembles.html) + - [Robust Bayesian model-averaged meta-regression](https://fbartos.github.io/RoBMA/articles/MetaRegression.html) ## Updates -The package was updated to version 2.0 to provides Bayesian model-averaging across selection models and PET-PEESE (as described in @bartos2021no at ). +The package was updated to version 2.0 to provides Bayesian model-averaging across selection models and PET-PEESE (as described in @bartos2021no at ). ### Backwards Compatibility @@ -61,7 +64,12 @@ to install the previous version if needed. ### News -The 2.0 version brings several updates to the package: +The 3.0 version brings several **experimental** features to the package: + +- meta-regression models via the `RoBMA.reg()` function +- marginal summaries and plots of the regression models via the `marginal_summary()` and `marginal_plot()` function + +The 2.0 version brought several updates to the package: - naming of the arguments specifying prior distributions for the different parameters/components of the models changed (`priors_mu` -> `priors_effect`, `priors_tau` -> `priors_heterogeneity`, and `priors_omega` -> `priors_bias`), - prior distributions for specifying weight functions now use a dedicated function (`prior(distribution = "two.sided", parameters = ...)` -> `prior_weightfunction(distribution = "two.sided", parameters = ...)`), @@ -85,7 +93,7 @@ install.packages("RoBMA") and the development version of the package can be installed from GitHub: ``` r -devtools::install_github("fbartos/RoBMA") +devtools::install_github("FBartos/RoBMA") ``` ## Example diff --git a/README.md b/README.md index 1441bcfa..70015ee4 100644 --- a/README.md +++ b/README.md @@ -25,30 +25,37 @@ heterogeneity, and publication bias components (including selection, PET, and PEESE style models). The package provides convenient functions for summary, visualizations, and fit diagnostics. -See our new pre-print Bartoš et al. (2021) -() for the description of the -newest version, RoBMA-PSMA, or our previous paper introducing the method -Maier et al. (in press) (). The -previous version of the methods is also implemented within the the -user-friendly graphical user interface of JASP (JASP Team, 2020) and -accompanied by a tutorial paper with more examples (Bartoš et al., in -press) (). +See our new manuscript Bartoš, Maier, et al. (2022b) +() for the description of the newest +version, RoBMA-PSMA, or our previous paper introducing the method Maier +et al. (2022) (). The previous +version of the methods is also implemented within the the user-friendly +graphical user interface of JASP (JASP Team, 2020) and accompanied by a +tutorial paper with more examples (Bartoš, Maier, et al., 2022a) +(). We also prepared multiple vignettes that illustrate functionality of the package: -- [Reproducing Bayesian model-averaged meta-analysis - (BMA)](https://fbartos.github.io/RoBMA/articles/ReproducingBMA.html) -- [Fitting custom meta-analytic - ensembles](https://fbartos.github.io/RoBMA/articles/CustomEnsembles.html) -- [Informed Bayesian model-averaged meta-analysis in - medicine](https://fbartos.github.io/RoBMA/articles/MedicineBMA.html) +- [Tutorial: Adjusting for publication bias in JASP and R - Selection + models, PET-PEESE, and Robust Bayesian + meta-analysis](https://fbartos.github.io/RoBMA/articles/Tutorial.html) +- [Reproducing Bayesian model-averaged meta-analysis + (BMA)](https://fbartos.github.io/RoBMA/articles/ReproducingBMA.html) +- [Hierarchical Bayesian model-averaged + meta-analysis](https://fbartos.github.io/RoBMA/articles/HierarchicalBMA.html) +- [Informed Bayesian model-averaged meta-analysis in + medicine](https://fbartos.github.io/RoBMA/articles/MedicineBMA.html) +- [Fitting custom meta-analytic + ensembles](https://fbartos.github.io/RoBMA/articles/CustomEnsembles.html) +- [Robust Bayesian model-averaged + meta-regression](https://fbartos.github.io/RoBMA/articles/MetaRegression.html) ## Updates The package was updated to version 2.0 to provides Bayesian model-averaging across selection models and PET-PEESE (as described in -Bartoš et al. (2021) at ). +Bartoš, Maier, et al. (2022b) at ). ### Backwards Compatibility @@ -65,41 +72,47 @@ projects.) ### News -The 2.0 version brings several updates to the package: - -- naming of the arguments specifying prior distributions for the - different parameters/components of the models changed (`priors_mu` - -\> `priors_effect`, `priors_tau` -\> `priors_heterogeneity`, and - `priors_omega` -\> `priors_bias`), -- prior distributions for specifying weight functions now use a - dedicated function - (`prior(distribution = "two.sided", parameters = ...)` -\> - `prior_weightfunction(distribution = "two.sided", parameters = ...)`), -- new dedicated function for specifying no publication bias adjustment - component / no heterogeneity component (`prior_none()`), -- new dedicated functions for specifying models with the PET and PEESE - publication bias adjustments - (`prior_PET(distribution = "Cauchy", parameters = ...)` and - `prior_PEESE(distribution = "Cauchy", parameters = ...)`), -- new default prior distribution specification for the publication - bias adjustment part of the models (corresponding to the RoBMA-PSMA - model from Bartoš et al. (2021)), -- new `model_type` argument allowing to specify different “pre-canned” - models (`"PSMA"` = RoBMA-PSMA, `"PP"` = RoBMA-PP, `"2w"` = - corresponding to Maier et al. (in press)), -- `combine_data` function allows combination of different effect sizes - / variability measures into a common effect size measure (also used - from within the `RoBMA` function) -- better and improved automatic fitting procedure now enabled by - default (can be turned of with `autofit = FALSE`) -- prior distributions can be specified on the different scale than the - supplied effect sizes (the package fits the model on Fisher’s z - scale and back transforms the results back to the scale that was - used for prior distributions specification, Cohen’s d by default, - but both of them can be overwritten with the `prior_scale` and - `transformation` arguments), -- new prior distributions, e.g., beta or fixed weight functions, -- and plenty of small changes to the arguments, output, and etc… +The 3.0 version brings several **experimental** features to the package: + +- meta-regression models via the `RoBMA.reg()` function +- marginal summaries and plots of the regression models via the + `marginal_summary()` and `marginal_plot()` function + +The 2.0 version brought several updates to the package: + +- naming of the arguments specifying prior distributions for the + different parameters/components of the models changed (`priors_mu` -\> + `priors_effect`, `priors_tau` -\> `priors_heterogeneity`, and + `priors_omega` -\> `priors_bias`), +- prior distributions for specifying weight functions now use a + dedicated function + (`prior(distribution = "two.sided", parameters = ...)` -\> + `prior_weightfunction(distribution = "two.sided", parameters = ...)`), +- new dedicated function for specifying no publication bias adjustment + component / no heterogeneity component (`prior_none()`), +- new dedicated functions for specifying models with the PET and PEESE + publication bias adjustments + (`prior_PET(distribution = "Cauchy", parameters = ...)` and + `prior_PEESE(distribution = "Cauchy", parameters = ...)`), +- new default prior distribution specification for the publication bias + adjustment part of the models (corresponding to the RoBMA-PSMA model + from Bartoš, Maier, et al. (2022b)), +- new `model_type` argument allowing to specify different “pre-canned” + models (`"PSMA"` = RoBMA-PSMA, `"PP"` = RoBMA-PP, `"2w"` = + corresponding to Maier et al. (2022)), +- `combine_data` function allows combination of different effect sizes / + variability measures into a common effect size measure (also used from + within the `RoBMA` function) +- better and improved automatic fitting procedure now enabled by default + (can be turned of with `autofit = FALSE`) +- prior distributions can be specified on the different scale than the + supplied effect sizes (the package fits the model on Fisher’s z scale + and back transforms the results back to the scale that was used for + prior distributions specification, Cohen’s d by default, but both of + them can be overwritten with the `prior_scale` and `transformation` + arguments), +- new prior distributions, e.g., beta or fixed weight functions, +- and plenty of small changes to the arguments, output, and etc… ## Installation @@ -113,18 +126,18 @@ install.packages("RoBMA") and the development version of the package can be installed from GitHub: ``` r -devtools::install_github("fbartos/RoBMA") +devtools::install_github("FBartos/RoBMA") ``` ## Example To illustrate the functionality of the package, we fit the RoBMA-PSMA -model from the example in Bartoš et al. (2021) to adjust for publication -bias in the infamous Bem (2011) “Feeling the future” pre-cognition -study. The RoBMA-PSMA model combines six selection models and PET-PEESE -to adjust for publication bias. As in the pre-print, we analyze the data -as described by Bem et al. (2011) in his reply to methodological -critiques. +model from the example in Bartoš, Maier, et al. (2022b) to adjust for +publication bias in the infamous Bem (2011) “Feeling the future” +pre-cognition study. The RoBMA-PSMA model combines six selection models +and PET-PEESE to adjust for publication bias. As in the pre-print, we +analyze the data as described by Bem et al. (2011) in his reply to +methodological critiques. First, we load the package and the data set included in the package. @@ -132,7 +145,6 @@ First, we load the package and the data set included in the package. library(RoBMA) #> Loading required namespace: runjags #> Loading required namespace: mvtnorm -#> module RoBMA loaded data("Bem2011", package = "RoBMA") Bem2011 @@ -153,27 +165,22 @@ models (the new default settings of RoBMA fitting function). These models represent all possible combinations of prior distributions for the following components: -- effect size (the mean parameter - ![\\mu](https://latex.codecogs.com/png.image?%5Cdpi%7B110%7D&space;%5Cbg_white&space;%5Cmu "\mu")) - - a spike at zero, representing the null hypothesis of the absence - of effect - - a standard normal distribution, representing the alternative - hypothesis of the presence of effect -- heterogeneity (the heterogeneity parameter - ![\\tau](https://latex.codecogs.com/png.image?%5Cdpi%7B110%7D&space;%5Cbg_white&space;%5Ctau "\tau")) - - a spike at zero, representing the null hypothesis of the absence - of heterogeneity (i.e., fixed effect meta-analysis) - - an inverse gamma distribution with shape = 1 and scale = 0.15, - based on Erp et al. (2017), representing the alternative - hypothesis of the presence of heterogeneity (i.e., random effect - meta-analysis) -- publication bias - - no prior distribution, representing the absence of publication - bias - - eight prior distributions specifying two two-sided weight - functions, four one-sided weight functions, and PET and PEESE - publication bias adjustment, representing the presence of - publication bias +- effect size (the mean parameter $\mu$) + - a spike at zero, representing the null hypothesis of the absence of + effect + - a standard normal distribution, representing the alternative + hypothesis of the presence of effect +- heterogeneity (the heterogeneity parameter $\tau$) + - a spike at zero, representing the null hypothesis of the absence of + heterogeneity (i.e., fixed effect meta-analysis) + - an inverse gamma distribution with shape = 1 and scale = 0.15, based + on Erp et al. (2017), representing the alternative hypothesis of the + presence of heterogeneity (i.e., random effect meta-analysis) +- publication bias + - no prior distribution, representing the absence of publication bias + - eight prior distributions specifying two two-sided weight functions, + four one-sided weight functions, and PET and PEESE publication bias + adjustment, representing the presence of publication bias The prior odds of the components are by default set to make all three model categories equally likely a priory (0.5 prior probability of the @@ -195,27 +202,22 @@ inclusion Bayes factor of the ensemble components representing the alternative hypothesis of the presence of the effect, heterogeneity, and publication bias, We can see the data show very weak evidence, barely worth mentioning, against the presence of the effect -(![\\text{BF}\_{10} = 0.479](https://latex.codecogs.com/png.image?%5Cdpi%7B110%7D&space;%5Cbg_white&space;%5Ctext%7BBF%7D_%7B10%7D%20%3D%200.479 "\text{BF}_{10} = 0.479") --\> -![\\text{BF}\_{01} = 2.09](https://latex.codecogs.com/png.image?%5Cdpi%7B110%7D&space;%5Cbg_white&space;%5Ctext%7BBF%7D_%7B01%7D%20%3D%202.09 "\text{BF}_{01} = 2.09")), -moderate evidence for the absence of heterogeneity -(![\\text{BF}\_{\\text{rf}} = 0.143](https://latex.codecogs.com/png.image?%5Cdpi%7B110%7D&space;%5Cbg_white&space;%5Ctext%7BBF%7D_%7B%5Ctext%7Brf%7D%7D%20%3D%200.143 "\text{BF}_{\text{rf}} = 0.143") --\> -![BF\_{\\text{fr}} = 7.00](https://latex.codecogs.com/png.image?%5Cdpi%7B110%7D&space;%5Cbg_white&space;BF_%7B%5Ctext%7Bfr%7D%7D%20%3D%207.00 "BF_{\text{fr}} = 7.00")), -and strong evidence for the presence of publication bias -(![\\text{BF}\_{\\text{pb}} = 16.32](https://latex.codecogs.com/png.image?%5Cdpi%7B110%7D&space;%5Cbg_white&space;%5Ctext%7BBF%7D_%7B%5Ctext%7Bpb%7D%7D%20%3D%2016.32 "\text{BF}_{\text{pb}} = 16.32")). +($\text{BF}_{10} = 0.479$ -\> $\text{BF}_{01} = 2.09$), moderate +evidence for the absence of heterogeneity +($\text{BF}_{\text{rf}} = 0.143$ -\> $BF_{\text{fr}} = 7.00$), and +strong evidence for the presence of publication bias +($\text{BF}_{\text{pb}} = 16.32$). The second table shows model-averaged estimates weighted by the individual models’ posterior probabilities. The mean estimate -![\\mu =0.037](https://latex.codecogs.com/png.image?%5Cdpi%7B110%7D&space;%5Cbg_white&space;%5Cmu%20%3D0.037 "\mu =0.037"), -95% CI \[-0.041, 0.213\], is very close to zero, corresponding to the a -priory expected absence of pre-cognition. The heterogeneity estimate -![\\tau](https://latex.codecogs.com/png.image?%5Cdpi%7B110%7D&space;%5Cbg_white&space;%5Ctau "\tau") -has most of its probability mass around zero due to the higher support -of models assuming absence of the heterogeneity. The parameters omega, -representing the publication weights at each *p*-value interval are -decreasing with increasing *p*-values, showing the publication bias, as -well as the non zero PET and PEESE estimates. +$\mu =0.037$, 95% CI \[-0.041, 0.213\], is very close to zero, +corresponding to the a priory expected absence of pre-cognition. The +heterogeneity estimate $\tau$ has most of its probability mass around +zero due to the higher support of models assuming absence of the +heterogeneity. The parameters omega, representing the publication +weights at each *p*-value interval are decreasing with increasing +*p*-values, showing the publication bias, as well as the non zero PET +and PEESE estimates. ``` r summary(fit) @@ -248,13 +250,10 @@ summary(fit) We can visualize the estimated mean and heterogeneity parameters using the `plot.RoBMA()` function. The arrows in both figures represent the -point probability mass at -![\\mu = 0](https://latex.codecogs.com/png.image?%5Cdpi%7B110%7D&space;%5Cbg_white&space;%5Cmu%20%3D%200 "\mu = 0") -and -![\\tau = 0](https://latex.codecogs.com/png.image?%5Cdpi%7B110%7D&space;%5Cbg_white&space;%5Ctau%20%3D%200 "\tau = 0"), -corresponding to the null hypotheses of the absence of effect and -heterogeneity, both increasing in the posterior model probability from -0.5 to 0.676 and 0.875 respectively. +point probability mass at $\mu = 0$ and $\tau = 0$, corresponding to the +null hypotheses of the absence of effect and heterogeneity, both +increasing in the posterior model probability from 0.5 to 0.676 and +0.875 respectively. ``` r plot(fit, parameter = "mu", xlim = c(-0.5, 0.5)) @@ -318,11 +317,9 @@ We can also visualize the MCMC diagnostics using the diagnostics function. The function can display the chains `type = "chain"` / posterior sample densities `type = "densities"`, and averaged auto-correlations `type = "autocorrelation"`. Here, we request the -chains trace plot of the -![\\mu](https://latex.codecogs.com/png.image?%5Cdpi%7B110%7D&space;%5Cbg_white&space;%5Cmu "\mu") -parameter of the most complex model by setting `show_models = 36` (the -model numbers can be obtained from the summary function with -`type = "models"` argument). +chains trace plot of the $\mu$ parameter of the most complex model by +setting `show_models = 36` (the model numbers can be obtained from the +summary function with `type = "models"` argument). ``` r diagnostics(fit, parameter = "mu", type = "chains", show_models = 36) @@ -345,20 +342,20 @@ line-spacing="2">
-Bartoš, F., Maier, M., Quintana, D. S., & Wagenmakers, E.-J. (in press). -Adjusting for publication bias in JASP & R – selection models, -PET-PEESE, and robust Bayesian meta-analysis. *Advances in Methods and -Practices in Psychological Science*. - +Bartoš, F., Maier, Maximilian, Quintana, D. S., & Wagenmakers, E.-J. +(2022a). Adjusting for publication bias in JASP and R — Selection +models, PET-PEESE, and robust Bayesian meta-analysis. *Advances in +Methods and Practices in Psychological Science*, *5*(3), 1–19. +
Bartoš, F., Maier, M., Wagenmakers, E.-J., Doucouliagos, H., & Stanley, -T. D. (2021). *Robust Bayesian meta-analysis: Model-averaging across -complementary publication bias adjustment methods*. PsyArXiv. - +T. D. (2022b). Robust Bayesian meta-analysis: Model-averaging across +complementary publication bias adjustment methods. *Research Synthesis +Methods*.
@@ -396,9 +393,9 @@ JASP Team. (2020). *JASP (Version 0.14)*.
-Maier, M., Bartoš, F., & Wagenmakers, E.-J. (in press). Robust Bayesian +Maier, M., Bartoš, F., & Wagenmakers, E.-J. (2022). Robust Bayesian meta-analysis: Addressing publication bias with model-averaging. -*Psychological Methods*. +*Psychological Methods*.
diff --git a/data/Kroupova2021.RData b/data/Kroupova2021.RData new file mode 100644 index 00000000..d8bd4aca Binary files /dev/null and b/data/Kroupova2021.RData differ diff --git a/data/Lui2015.RData b/data/Lui2015.RData new file mode 100644 index 00000000..b94dbedd Binary files /dev/null and b/data/Lui2015.RData differ diff --git a/inst/REFERENCES.bib b/inst/REFERENCES.bib index 2a99d052..7ea22434 100644 --- a/inst/REFERENCES.bib +++ b/inst/REFERENCES.bib @@ -47,27 +47,29 @@ @article{erp2017estimates @article{maier2020robust, title = {Robust {B}ayesian Meta-Analysis: Addressing Publication Bias with Model-Averaging}, author = {Maier, Maximilian and Barto{\v{s}}, Franti{\v{s}}ek and Wagenmakers, Eric-Jan}, - year = {in press}, + year = {2022}, journal = {Psychological Methods}, - doi = {10.31234/osf.io/u4cns} + doi = {10.1037/met0000405} } -@unpublished{bartos2021no, +@article{bartos2021no, title = {Robust {B}ayesian meta-analysis: {M}odel-averaging across complementary publication bias adjustment methods}, author = {Barto{\v{s}}, Franti{\v{s}}ek and Maier, Maximilian and Wagenmakers, Eric-Jan and Doucouliagos, Hristos and Stanley, Tom D.}, - year = {2021}, - publisher = {PsyArXiv}, - note = {preprint at \url{https://doi.org/10.31234/osf.io/kvsp7}}, - doi = {10.31234/osf.io/kvsp7} + year = {2022}, + journal = {Research Synthesis Methods}, + doi = {10.1002/jrsm.1594} } @article{bartos2020adjusting, - title = {Adjusting for publication bias in {JASP} & {R} -- selection models, {PET-PEESE}, and robust {B}ayesian meta-analysis}, - author = {Barto{\v{s}}, Franti{\v{s}}ek and Maier, Maximilian and Quintana, Daniel S and Wagenmakers, Eric-Jan}, - year = {in press}, - journal = {Advances in Methods and Practices in Psychological Science}, - doi = {10.31234/osf.io/75bqn} + title = {Adjusting for publication bias in {JASP} and {R} — {S}election models, {PET-PEESE}, and robust {B}ayesian meta-analysis}, + author = {Barto{\v{s}}, Franti{\v{s}}ek and Maier, Maximilian, and Quintana, D.S., and Wagenmakers, Eric-Jan}, + year = {2022}, + journal = {Advances in Methods and Practices in Psychological Science}, + doi = {10.1177/25152459221109259}, + volume = {5}, + number = {3}, + pages = {1--19} } @misc{jasp14, @@ -192,3 +194,218 @@ @article{vevea1995general pages = {419--435}, doi = {10.1007/BF02294384} } + +@article{kroupova2021student, + title = {Student employment and education: {A} meta-analysis}, + author = {Kroupova, Katerina and Havranek, Tomas and Irsova, Zuzana}, + year = 2021, + journal = {CEPR Discussion Paper}, + url = {https://ssrn.com/abstract=3928863} +} + +@article{fixest, + title = {Efficient estimation of maximum likelihood models with multiple fixed-effects: the {R} package {FENmlm}}, + author = {Laurent Berg\'e}, + year = {2018}, + journal = {CREA Discussion Papers}, + number = {13}, +} + +@article{hinne2019conceptual, + title = {A conceptual introduction to {B}ayesian model averaging}, + author = {Hinne, Max and Gronau, Quentin Frederik and van den Bergh, Don and Wagenmakers, Eric-Jan}, + year = 2020, + journal = {Advances in Methods and Practices in Psychological Science}, + volume = 3, + number = 2, + pages = {200--215}, + doi = {10.1177/2515245919898657} +} + +@article{hoeting1999bayesian, + title = {Bayesian model averaging: a tutorial}, + author = {Hoeting, Jennifer A and Madigan, David and Raftery, Adrian E and Volinsky, Chris T}, + year = 1999, + journal = {Statistical Science}, + volume = 14, + number = 4, + pages = {382--401}, + doi = {10.1214/SS\%2F1009212519} +} + +@book{leamer1978specification, + title = {Specification searches: {A}d hoc inference with nonexperimental data}, + author = {Leamer, Edward E}, + volume = {53}, + year = {1978}, + publisher = {Wiley New York} +} + +@article{kass1995bayes, + title = {{B}ayes factors}, + author = {Kass, R. E. and Raftery, A. E.}, + year = 1995, + journal = {Journal of the American Statistical Association}, + volume = 90, + number = 430, + pages = {773--795}, + doi = {10.1080/01621459.1995.10476572} +} + +@article{rouder2019teaching, + title = {Teaching {B}ayes’ theorem: Strength of evidence as predictive accuracy}, + author = {Rouder, Jeffrey N and Morey, Richard D}, + year = 2019, + journal = {The American Statistician}, + publisher = {Taylor \& Francis}, + volume = 73, + number = 2, + pages = {186--190}, + doi = {10.1080/00031305.2017.1341334} +} + +@article{wrinch1921on, + title = {On certain fundamental principles of scientific inquiry}, + author = {Wrinch, D. and Jeffreys, H.}, + year = 1921, + journal = {Philosophical Magazine}, + volume = 42, + pages = {369--390}, + doi = {10.1080/14786442108633773} +} + +@article{maier2022nudge, + title = {No evidence for nudging after adjusting for publication bias}, + author = {Maier, Maximilian and Barto{\v{s}}, Franti{\v{s}}ek and Stanley, TD and Shanks, David R., and Harris J.L., Adam and Wagenmakers, Eric-Jan}, + year={2022}, + journal = {Proceedings of the National Academy of Sciences}, + volume = {119}, + number = {31}, + doi = {10.1073/pnas.2200300119} +} + +@unpublished{maier2022publication, + title = {Publication bias in research on construal level theory}, + author = {Maier, Maximilian and Barto{\v{s}}, Franti{\v{s}}ek and Oh, Megan and Wagenmakers, Eric-Jan and Shanks, David and Harris, Adam}, + year = {2022}, + publisher = {PsyArXiv}, + url = {https://doi.org/10.31234/osf.io/r8nyu}, + note = {Preprint available at https://doi.org/10.31234/osf.io/r8nyu} +} + +@unpublished{bartos2022adjusting, + title = {Adjusting for publication bias reveals mixed evidence for the impact of cash transfers on subjective well-being and mental health}, + author = {Barto{\v{s}}, Franti{\v{s}}ek and Maier, Maximilian and Stanley, TD and Wagenmakers, Eric-Jan}, + year = {2022}, + url = {https://doi.org/10.31234/osf.io/d9vcg}, + note = {Preprint available at https://doi.org/10.31234/osf.io/d9vcg} +} + +@article{ibrahim2000power, + title = {Power prior distributions for regression models}, + author = {Ibrahim, Joseph G and Chen, Ming-Hui}, + journal = {Statistical Science}, + pages = {46--60}, + year = {2000}, + doi = {0.1214/ss/1009212673} +} + +@book{asimov1988prelude, + title = {Prelude to Foundation}, + author = {Asimov, Isaac}, + year = {1988}, + ISBN = {0-385-23313-2}, + note = {p. 172}, + place = {New York}, + publisher = {Doubleday Foundation} +} + +@article{rouder2012default, + title = {Default {B}ayes factors for model selection in regression}, + author = {Rouder, Jeffrey N and Morey, Richard D.}, + journal = {Multivariate Behavioral Research}, + doi = {10.1080/00273171.2012.734737}, + number = 6, + volume = 47, + pages = {877--903}, + year = {2012} +} + +@unpublished{wagenmakers2021history, + title = {History and nature of the {J}effreys-{L}indley paradox}, + author = {Wagenmakers, Eric-Jan and Ly, Alexander}, + journal = {arXiv}, + year = {2021}, + url = {https://arxiv.org/abs/2111.10191}, + note = {Preprint available at https://arxiv.org/abs/2111.10191} +} + +@article{metafor, + title = {Conducting meta-analyses in {R} with the {metafor} package}, + author = {Wolfgang, Viechtbauer}, + year = 2010, + journal = {Journal of Statistical Software}, + volume = 36, + number = 3, + pages = {1--48}, + url = {https://www.jstatsoft.org/v36/i03/} +} + +@article{konstantopoulos2011fixed, + title = {Fixed effects and variance components estimation in three-level meta-analysis}, + author = {Konstantopoulos, Spyros}, + journal = {Research Synthesis Methods}, + volume = {2}, + number = {1}, + pages = {61--76}, + year = {2011}, + doi = {10.1002/jrsm.35⁠}, + publisher = {Wiley Online Library} +} + + +@misc{metadat, + title = {{m}etadat: {M}eta-analysis datasets}, + author = {Thomas, White and Daniel, Noble and Alistair, Senior and W. Kyle, Hamilton and Wolfgang, Viechtbauer}, + year = 2019, + url = {https://cran.r-project.org/package=metadat} +} + +@article{lui2015intergenerational, + title = {Intergenerational cultural conflict, mental health, and educational outcomes among Asian and {L}atino/a {A}mericans: {Q}ualitative and meta-analytic review}, + author = {Lui, P Priscilla}, + year = {2015}, + journal = {Psychological Bulletin}, + publisher = {American Psychological Association}, + volume = {141}, + number = {2}, + doi = {10.1037/a0038449}, + pages = {404--446} +} + +@article{iyengar1988selection, + title = {Selection models and the file drawer problem}, + author = {Iyengar, Satish and Greenhouse, Joel B}, + year = 1988, + journal = {Statistical Science}, + pages = {109--117}, + doi = {10.1214/ss/1177013012}, + volume = {3}, + number = {1} +} + +@article{weightr, + title = {{weightr}: {E}stimating weight-function models for publication bias}, + author = {Coburn, Kathleen M and Vevea, Jack L and Coburn, Maintainer Kathleen M}, + year = 2019, + note = {R package version 2.0.2}, + journal = {https://CRAN.R-project.org/package=weightr} +} + +@misc{RoBMA, + title = {{RoBMA}: {A}n {R} package for robust {B}ayesian meta-analyses}, + author = {Barto{\v{s}}, Franti{\v{s}}ek and Maier, Maximilian}, + year = {2020}, + note = {R package}, + url = {https://CRAN.R-project.org/package=RoBMA}, +} diff --git a/man/Anderson2010.Rd b/man/Anderson2010.Rd index ceb95d26..dd93990c 100644 --- a/man/Anderson2010.Rd +++ b/man/Anderson2010.Rd @@ -1,5 +1,5 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/data.R +% Please edit documentation in R/datasets.R \docType{data} \name{Anderson2010} \alias{Anderson2010} diff --git a/man/Bem2011.Rd b/man/Bem2011.Rd index 9361627a..fa9380b4 100644 --- a/man/Bem2011.Rd +++ b/man/Bem2011.Rd @@ -1,5 +1,5 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/data.R +% Please edit documentation in R/datasets.R \docType{data} \name{Bem2011} \alias{Bem2011} diff --git a/man/Kroupova2021.Rd b/man/Kroupova2021.Rd new file mode 100644 index 00000000..3be22fac --- /dev/null +++ b/man/Kroupova2021.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/datasets.R +\docType{data} +\name{Kroupova2021} +\alias{Kroupova2021} +\title{881 estimates from 69 studies of a relationship between employment and +educational outcomes collected by \insertCite{kroupova2021student;textual}{RoBMA}} +\format{ +A data.frame with 11 columns and 881 observations. +} +\usage{ +Kroupova2021 +} +\value{ +a data.frame. +} +\description{ +The data set contains partial correlation coefficients, standard errors, +study labels, samples sizes, type of the educational outcome, intensity of the +employment, gender of the student population, study location, study design, whether +the study controlled for endogenity, and whether the study controlled for motivation. +The original data set including additional variables and the publication can be found +at http://meta-analysis.cz/students. +(Note that some standard errors and employment intensities are missing.) +} +\references{ +\insertAllCited{} +} +\keyword{datasets} diff --git a/man/Lui2015.Rd b/man/Lui2015.Rd new file mode 100644 index 00000000..e2d3f5c8 --- /dev/null +++ b/man/Lui2015.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/datasets.R +\docType{data} +\name{Lui2015} +\alias{Lui2015} +\title{18 studies of a relationship between acculturation mismatch and +intergenerational cultural conflict collected by +\insertCite{lui2015intergenerational;textual}{RoBMA}} +\format{ +A data.frame with 3 columns and 18 observations. +} +\usage{ +Lui2015 +} +\value{ +a data.frame. +} +\description{ +The data set contains correlation coefficients r, +sample sizes n, and labels for each study assessing the +relationship between acculturation mismatch (that is the result of the contrast +between the collectivist cultures of Asian and Latin immigrant groups +and the individualist culture in the United States) and intergenerational cultural +conflict \insertCite{lui2015intergenerational}{RoBMA} which was used as an +example in \insertCite{bartos2020adjusting;textual}{RoBMA}. +} +\references{ +\insertAllCited{} +} +\keyword{datasets} diff --git a/man/Poulsen2006.Rd b/man/Poulsen2006.Rd index 46ca5aab..8a529d2e 100644 --- a/man/Poulsen2006.Rd +++ b/man/Poulsen2006.Rd @@ -1,5 +1,5 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/data.R +% Please edit documentation in R/datasets.R \docType{data} \name{Poulsen2006} \alias{Poulsen2006} diff --git a/man/RoBMA.Rd b/man/RoBMA.Rd index fcf5765e..8758048a 100644 --- a/man/RoBMA.Rd +++ b/man/RoBMA.Rd @@ -1,5 +1,5 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/main.R +% Please edit documentation in R/RoBMA.R \name{RoBMA} \alias{RoBMA} \title{Estimate a Robust Bayesian Meta-Analysis} @@ -19,6 +19,7 @@ RoBMA( study_names = NULL, study_ids = NULL, data = NULL, + weight = NULL, transformation = if (is.null(y)) "fishers_z" else "none", prior_scale = if (is.null(y)) "cohens_d" else "none", effect_direction = "positive", @@ -44,8 +45,8 @@ RoBMA( priors_heterogeneity_null = prior(distribution = "point", parameters = list(location = 0)), priors_bias_null = prior_none(), - priors_rho = prior("beta", parameters = list(alpha = 1, beta = 1)), - priors_rho_null = NULL, + priors_hierarchical = prior("beta", parameters = list(alpha = 1, beta = 1)), + priors_hierarchical_null = NULL, chains = 3, sample = 5000, burnin = 2000, @@ -95,6 +96,9 @@ studies being independent.} an alternative input entry to specifying the \code{d}, \code{r}, \code{y}, etc... directly. I.e., you cannot pass the a data.frame and reference to the columns.} +\item{weight}{specifies likelihood weights of the individual estimates. +Notes that this is an untested experimental feature.} + \item{transformation}{transformation to be applied to the supplied effect sizes before fitting the individual models. Defaults to \code{"fishers_z"}. We highly recommend using \code{"fishers_z"} @@ -167,14 +171,14 @@ a point null hypotheses at zero (a fixed effect meta-analytic models), that will be treated as belonging to the null hypothesis. Defaults no publication bias adjustment, \code{prior_none()}.} -\item{priors_rho}{list of prior distributions for the variance allocation (\code{rho}) -parameter that will be treated as belonging to the alternative hypothesis. This setting allows -users to fit a three-level meta-analysis when \code{study_ids} are supplied. Note that this is -an experimental feature and see News for more details. Defaults to a beta distribution +\item{priors_hierarchical}{list of prior distributions for the correlation of random effects +(\code{rho}) parameter that will be treated as belonging to the alternative hypothesis. This setting allows +users to fit a hierarchical (three-level) meta-analysis when \code{study_ids} are supplied. +Note that this is an experimental feature and see News for more details. Defaults to a beta distribution \code{prior(distribution = "beta", parameters = list(alpha = 1, beta = 1))}.} -\item{priors_rho_null}{list of prior distributions for the variance allocation (\code{rho}) -parameter that will be treated as belonging to the null hypothesis. Defaults to \code{NULL}.} +\item{priors_hierarchical_null}{list of prior distributions for the correlation of random effects +(\code{rho}) parameter that will be treated as belonging to the null hypothesis. Defaults to \code{NULL}.} \item{chains}{a number of chains of the MCMC algorithm.} diff --git a/man/RoBMA.reg.Rd b/man/RoBMA.reg.Rd new file mode 100644 index 00000000..45219569 --- /dev/null +++ b/man/RoBMA.reg.Rd @@ -0,0 +1,275 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/RoBMA-reg.R +\name{RoBMA.reg} +\alias{RoBMA.reg} +\title{Estimate a Robust Bayesian Meta-Analysis Meta-Regression} +\usage{ +RoBMA.reg( + formula, + data, + test_predictors = TRUE, + study_names = NULL, + study_ids = NULL, + transformation = if (any(colnames(data) != "y")) "fishers_z" else "none", + prior_scale = if (any(colnames(data) != "y")) "cohens_d" else "none", + standardize_predictors = TRUE, + effect_direction = "positive", + priors = NULL, + model_type = NULL, + priors_effect = prior(distribution = "normal", parameters = list(mean = 0, sd = 1)), + priors_heterogeneity = prior(distribution = "invgamma", parameters = list(shape = 1, + scale = 0.15)), + priors_bias = list(prior_weightfunction(distribution = "two.sided", parameters = + list(alpha = c(1, 1), steps = c(0.05)), prior_weights = 1/12), + prior_weightfunction(distribution = "two.sided", parameters = list(alpha = c(1, 1, + 1), steps = c(0.05, 0.1)), prior_weights = 1/12), prior_weightfunction(distribution = + "one.sided", parameters = list(alpha = c(1, 1), steps = c(0.05)), prior_weights = + 1/12), prior_weightfunction(distribution = "one.sided", parameters = list(alpha = + c(1, 1, 1), steps = c(0.025, 0.05)), prior_weights = 1/12), + + prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1, + 1), steps = c(0.05, 0.5)), prior_weights = 1/12), prior_weightfunction(distribution = + "one.sided", parameters = list(alpha = c(1, 1, 1, 1), steps = c(0.025, 0.05, 0.5)), + prior_weights = 1/12), prior_PET(distribution = "Cauchy", parameters = list(0, 1), + truncation = list(0, Inf), prior_weights = 1/4), prior_PEESE(distribution = "Cauchy", + parameters = list(0, 5), truncation = list(0, Inf), prior_weights = 1/4)), + priors_effect_null = prior(distribution = "point", parameters = list(location = 0)), + priors_heterogeneity_null = prior(distribution = "point", parameters = list(location = + 0)), + priors_bias_null = prior_none(), + priors_hierarchical = prior("beta", parameters = list(alpha = 1, beta = 1)), + priors_hierarchical_null = NULL, + prior_covariates = prior("normal", parameters = list(mean = 0, sd = 0.25)), + prior_covariates_null = prior("spike", parameters = list(location = 0)), + prior_factors = prior_factor("mnormal", parameters = list(mean = 0, sd = 0.25), + contrast = "meandif"), + prior_factors_null = prior_factor("spike", parameters = list(location = 0), contrast = + "meandif"), + chains = 3, + sample = 5000, + burnin = 2000, + adapt = 500, + thin = 1, + parallel = FALSE, + autofit = TRUE, + autofit_control = set_autofit_control(), + convergence_checks = set_convergence_checks(), + save = "all", + seed = NULL, + silent = TRUE, + ... +) +} +\arguments{ +\item{formula}{a formula for the meta-regression model} + +\item{data}{a data object created by the \code{combine_data} function. This is +an alternative input entry to specifying the \code{d}, \code{r}, \code{y}, etc... +directly. I.e., you cannot pass the a data.frame and reference to the columns.} + +\item{test_predictors}{vector of predictor names that will be test +(i.e., assigned both the null and alternative prior distributions). +Defaults to \code{TRUE}, all predictors are tested using the default +prior distributions (i.e., \code{prior_covariates}, +\code{prior_covariates_null}, \code{prior_factors}, and +\code{prior_factors_null}). To only estimate +and adjust for the effect of predictors use \code{FALSE}. If +\code{priors} is specified, any settings in \code{test_predictors} +is overridden.} + +\item{study_names}{an optional argument with the names of the studies} + +\item{study_ids}{an optional argument specifying dependency between the +studies (for using a multilevel model). Defaults to \code{NULL} for +studies being independent.} + +\item{transformation}{transformation to be applied to the supplied +effect sizes before fitting the individual models. Defaults to +\code{"fishers_z"}. We highly recommend using \code{"fishers_z"} +transformation since it is the only variance stabilizing measure +and does not bias PET and PEESE style models. The other options are +\code{"cohens_d"}, correlation coefficient \code{"r"} and \code{"logOR"}. +Supplying \code{"none"} will treat the effect sizes as unstandardized and +refrain from any transformations.} + +\item{prior_scale}{a scale used to define priors. Defaults to \code{"cohens_d"}. +Other options are \code{"fishers_z"}, correlation coefficient \code{"r"}, +and \code{"logOR"}. The prior scale does not need to match the effect sizes measure - +the samples from prior distributions are internally transformed to match the +\code{transformation} of the data. The \code{prior_scale} corresponds to +the scale of default output, but can be changed within the summary function.} + +\item{standardize_predictors}{whether continuous predictors should be standardized prior to +estimating the model. Defaults to \code{TRUE}.} + +\item{effect_direction}{the expected direction of the effect. The one-sided +selection sets the weights omega to 1 to significant results in the expected +direction. Defaults to \code{"positive"} (another option is \code{"negative"}).} + +\item{priors}{named list of prior distributions for each predictor +(with names corresponding to the predictors). It allows users to +specify both the null and alternative hypothesis prior distributions +for each predictor by assigning the corresponding element of the named +list with another named list (with \code{"null"} and +\code{"alt"}). +If only one prior is specified for a given parameter, it is +assumed to correspond to the alternative hypotheses and the default null +hypothesis is specified (i.e., \code{prior_covariates_null} or +\code{prior_factors_null}). +If a named list with only one named prior distribution is provided (either +\code{"null"} or \code{"alt"}), only this prior distribution is used and no +default distribution is filled in. +Parameters without specified prior distributions are assumed to be only adjusted +for using the default alternative hypothesis prior distributions (i.e., +\code{prior_covariates} or \code{prior_factors}). +If \code{priors} is specified, \code{test_predictors} is ignored.} + +\item{model_type}{string specifying the RoBMA ensemble. Defaults to \code{NULL}. +The other options are \code{"PSMA"}, \code{"PP"}, and \code{"2w"} which override +settings passed to the \code{priors_effect}, \code{priors_heterogeneity}, +\code{priors_effect}, \code{priors_effect_null}, \code{priors_heterogeneity_null}, +\code{priors_bias_null}, and \code{priors_effect}. See details for more information +about the different model types.} + +\item{priors_effect}{list of prior distributions for the effect size (\code{mu}) +parameter that will be treated as belonging to the alternative hypothesis. Defaults to +a standard normal distribution +\code{prior(distribution = "normal", parameters = list(mean = 0, sd = 1))}.} + +\item{priors_heterogeneity}{list of prior distributions for the heterogeneity \code{tau} +parameter that will be treated as belonging to the alternative hypothesis. Defaults to +\code{prior(distribution = "invgamma", parameters = list(shape = 1, scale = .15))} that +is based on heterogeneities estimates from psychology \insertCite{erp2017estimates}{RoBMA}.} + +\item{priors_bias}{list of prior distributions for the publication bias adjustment +component that will be treated as belonging to the alternative hypothesis. +Defaults to \code{list( +prior_weightfunction(distribution = "two.sided", parameters = list(alpha = c(1, 1), + steps = c(0.05)), prior_weights = 1/12), +prior_weightfunction(distribution = "two.sided", parameters = list(alpha = c(1, 1, 1), + steps = c(0.05, 0.10)), prior_weights = 1/12), +prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1), + steps = c(0.05)), prior_weights = 1/12), +prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1, 1), + steps = c(0.025, 0.05)), prior_weights = 1/12), +prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1, 1), + steps = c(0.05, 0.5)), prior_weights = 1/12), +prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1, 1, 1), + steps = c(0.025, 0.05, 0.5)), prior_weights = 1/12), +prior_PET(distribution = "Cauchy", parameters = list(0,1), truncation = list(0, Inf), + prior_weights = 1/4), +prior_PEESE(distribution = "Cauchy", parameters = list(0,5), truncation = list(0, Inf), + prior_weights = 1/4) +)}, corresponding to the RoBMA-PSMA model introduce by \insertCite{bartos2021no;textual}{RoBMA}.} + +\item{priors_effect_null}{list of prior distributions for the effect size (\code{mu}) +parameter that will be treated as belonging to the null hypothesis. Defaults to +a point null hypotheses at zero, +\code{prior(distribution = "point", parameters = list(location = 0))}.} + +\item{priors_heterogeneity_null}{list of prior distributions for the heterogeneity \code{tau} +parameter that will be treated as belonging to the null hypothesis. Defaults to +a point null hypotheses at zero (a fixed effect meta-analytic models), +\code{prior(distribution = "point", parameters = list(location = 0))}.} + +\item{priors_bias_null}{list of prior weight functions for the \code{omega} parameter +that will be treated as belonging to the null hypothesis. Defaults no publication +bias adjustment, \code{prior_none()}.} + +\item{priors_hierarchical}{list of prior distributions for the correlation of random effects +(\code{rho}) parameter that will be treated as belonging to the alternative hypothesis. This setting allows +users to fit a hierarchical (three-level) meta-analysis when \code{study_ids} are supplied. +Note that this is an experimental feature and see News for more details. Defaults to a beta distribution +\code{prior(distribution = "beta", parameters = list(alpha = 1, beta = 1))}.} + +\item{priors_hierarchical_null}{list of prior distributions for the correlation of random effects +(\code{rho}) parameter that will be treated as belonging to the null hypothesis. Defaults to \code{NULL}.} + +\item{prior_covariates}{a prior distributions for the regression parameter +of continuous covariates on the effect size under the alternative hypothesis +(unless set explicitly in \code{priors}). Defaults to a relatively wide normal +distribution \code{prior(distribution = "normal", parameters = list(mean = 0, sd = 0.25))}.} + +\item{prior_covariates_null}{a prior distributions for the regression parameter +of continuous covariates on the effect size under the null hypothesis +(unless set explicitly in \code{priors}). Defaults to a no effect +\code{prior("spike", parameters = list(location = 0))}.} + +\item{prior_factors}{a prior distributions for the regression parameter +of categorical covariates on the effect size under the alternative hypothesis +(unless set explicitly in \code{priors}). Defaults to a relatively wide +multivariate normal distribution specifying differences from the mean contrasts +\code{prior_factor("mnormal", parameters = list(mean = 0, sd = 0.25), contrast = "meandif")}.} + +\item{prior_factors_null}{a prior distributions for the regression parameter +of categorical covariates on the effect size under the null hypothesis +(unless set explicitly in \code{priors}). Defaults to a no effect +\code{prior("spike", parameters = list(location = 0))}.} + +\item{chains}{a number of chains of the MCMC algorithm.} + +\item{sample}{a number of sampling iterations of the MCMC algorithm. +Defaults to \code{5000}.} + +\item{burnin}{a number of burnin iterations of the MCMC algorithm. +Defaults to \code{2000}.} + +\item{adapt}{a number of adaptation iterations of the MCMC algorithm. +Defaults to \code{500}.} + +\item{thin}{a thinning of the chains of the MCMC algorithm. Defaults to +\code{1}.} + +\item{parallel}{whether the individual models should be fitted in parallel. +Defaults to \code{FALSE}. The implementation is not completely stable +and might cause a connection error.} + +\item{autofit}{whether the model should be fitted until the convergence +criteria (specified in \code{autofit_control}) are satisfied. Defaults to +\code{TRUE}.} + +\item{autofit_control}{allows to pass autofit control settings with the +\code{\link[=set_autofit_control]{set_autofit_control()}} function. See \code{?set_autofit_control} for +options and default settings.} + +\item{convergence_checks}{automatic convergence checks to assess the fitted +models, passed with \code{\link[=set_convergence_checks]{set_convergence_checks()}} function. See +\code{?set_convergence_checks} for options and default settings.} + +\item{save}{whether all models posterior distributions should be kept +after obtaining a model-averaged result. Defaults to \code{"all"} which +does not remove anything. Set to \code{"min"} to significantly reduce +the size of final object, however, some model diagnostics and further +manipulation with the object will not be possible.} + +\item{seed}{a seed to be set before model fitting, marginal likelihood +computation, and posterior mixing for reproducibility of results. Defaults +to \code{NULL} - no seed is set.} + +\item{silent}{whether all print messages regarding the fitting process +should be suppressed. Defaults to \code{TRUE}. Note that \code{parallel = TRUE} +also suppresses all messages.} + +\item{...}{additional arguments.} +} +\value{ +\code{RoBMA.reg} returns an object of class 'RoBMA.reg'. +} +\description{ +\code{RoBMA} is used to estimate a Robust Bayesian +Meta-Analysis. The interface allows a complete customization of +the ensemble with different prior (or list of prior) distributions +for each component. +} +\details{ +See \code{\link[=RoBMA]{RoBMA()}} for more details. + +Note that these default prior distributions are relatively wide and more informed +prior distributions for testing for the presence of moderation should be considered. +} +\references{ +\insertAllCited{} +} +\seealso{ +\code{\link[=RoBMA]{RoBMA()}} \code{\link[=summary.RoBMA]{summary.RoBMA()}}, \code{\link[=update.RoBMA]{update.RoBMA()}}, \code{\link[=check_setup.reg]{check_setup.reg()}} +} diff --git a/man/check_setup.Rd b/man/check_setup.Rd index 9c772130..7725efcb 100644 --- a/man/check_setup.Rd +++ b/man/check_setup.Rd @@ -27,8 +27,8 @@ check_setup( priors_heterogeneity_null = prior(distribution = "point", parameters = list(location = 0)), priors_bias_null = prior_none(), - priors_rho = prior("beta", parameters = list(alpha = 1, beta = 1)), - priors_rho_null = NULL, + priors_hierarchical = prior("beta", parameters = list(alpha = 1, beta = 1)), + priors_hierarchical_null = NULL, models = FALSE, silent = FALSE ) @@ -86,14 +86,14 @@ a point null hypotheses at zero (a fixed effect meta-analytic models), that will be treated as belonging to the null hypothesis. Defaults no publication bias adjustment, \code{prior_none()}.} -\item{priors_rho}{list of prior distributions for the variance allocation (\code{rho}) -parameter that will be treated as belonging to the alternative hypothesis. This setting allows -users to fit a three-level meta-analysis when \code{study_ids} are supplied. Note that this is -an experimental feature and see News for more details. Defaults to a beta distribution +\item{priors_hierarchical}{list of prior distributions for the correlation of random effects +(\code{rho}) parameter that will be treated as belonging to the alternative hypothesis. This setting allows +users to fit a hierarchical (three-level) meta-analysis when \code{study_ids} are supplied. +Note that this is an experimental feature and see News for more details. Defaults to a beta distribution \code{prior(distribution = "beta", parameters = list(alpha = 1, beta = 1))}.} -\item{priors_rho_null}{list of prior distributions for the variance allocation (\code{rho}) -parameter that will be treated as belonging to the null hypothesis. Defaults to \code{NULL}.} +\item{priors_hierarchical_null}{list of prior distributions for the correlation of random effects +(\code{rho}) parameter that will be treated as belonging to the null hypothesis. Defaults to \code{NULL}.} \item{models}{should the models' details be printed.} @@ -108,5 +108,5 @@ implied by the specified prior distributions. It is useful for checking the ensemble configuration prior to fitting all of the models. } \seealso{ -\code{\link[=RoBMA]{RoBMA()}} +\code{\link[=check_setup.reg]{check_setup.reg()}} \code{\link[=RoBMA]{RoBMA()}} } diff --git a/man/check_setup.reg.Rd b/man/check_setup.reg.Rd new file mode 100644 index 00000000..00b4f5ee --- /dev/null +++ b/man/check_setup.reg.Rd @@ -0,0 +1,215 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/check-input-and-settings.R +\name{check_setup.reg} +\alias{check_setup.reg} +\title{Prints summary of \code{"RoBMA.reg"} ensemble implied by the specified priors +and formula} +\usage{ +check_setup.reg( + formula, + data, + test_predictors = TRUE, + study_names = NULL, + study_ids = NULL, + transformation = if (any(colnames(data) != "y")) "fishers_z" else "none", + prior_scale = if (any(colnames(data) != "y")) "cohens_d" else "none", + standardize_predictors = TRUE, + effect_direction = "positive", + priors = NULL, + model_type = NULL, + priors_effect = prior(distribution = "normal", parameters = list(mean = 0, sd = 1)), + priors_heterogeneity = prior(distribution = "invgamma", parameters = list(shape = 1, + scale = 0.15)), + priors_bias = list(prior_weightfunction(distribution = "two.sided", parameters = + list(alpha = c(1, 1), steps = c(0.05)), prior_weights = 1/12), + prior_weightfunction(distribution = "two.sided", parameters = list(alpha = c(1, 1, + 1), steps = c(0.05, 0.1)), prior_weights = 1/12), prior_weightfunction(distribution = + "one.sided", parameters = list(alpha = c(1, 1), steps = c(0.05)), prior_weights = + 1/12), prior_weightfunction(distribution = "one.sided", parameters = list(alpha = + c(1, 1, 1), steps = c(0.025, 0.05)), prior_weights = 1/12), + + prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1, + 1), steps = c(0.05, 0.5)), prior_weights = 1/12), prior_weightfunction(distribution = + "one.sided", parameters = list(alpha = c(1, 1, 1, 1), steps = c(0.025, 0.05, 0.5)), + prior_weights = 1/12), prior_PET(distribution = "Cauchy", parameters = list(0, 1), + truncation = list(0, Inf), prior_weights = 1/4), prior_PEESE(distribution = "Cauchy", + parameters = list(0, 5), truncation = list(0, Inf), prior_weights = 1/4)), + priors_effect_null = prior(distribution = "point", parameters = list(location = 0)), + priors_heterogeneity_null = prior(distribution = "point", parameters = list(location = + 0)), + priors_bias_null = prior_none(), + priors_hierarchical = prior("beta", parameters = list(alpha = 1, beta = 1)), + priors_hierarchical_null = NULL, + prior_covariates = prior("normal", parameters = list(mean = 0, sd = 0.25)), + prior_covariates_null = prior("spike", parameters = list(location = 0)), + prior_factors = prior_factor("mnormal", parameters = list(mean = 0, sd = 0.25), + contrast = "meandif"), + prior_factors_null = prior("spike", parameters = list(location = 0)), + models = FALSE, + silent = FALSE, + ... +) +} +\arguments{ +\item{formula}{a formula for the meta-regression model} + +\item{data}{a data object created by the \code{combine_data} function. This is +an alternative input entry to specifying the \code{d}, \code{r}, \code{y}, etc... +directly. I.e., you cannot pass the a data.frame and reference to the columns.} + +\item{test_predictors}{vector of predictor names that will be test +(i.e., assigned both the null and alternative prior distributions). +Defaults to \code{TRUE}, all predictors are tested using the default +prior distributions (i.e., \code{prior_covariates}, +\code{prior_covariates_null}, \code{prior_factors}, and +\code{prior_factors_null}). To only estimate +and adjust for the effect of predictors use \code{FALSE}. If +\code{priors} is specified, any settings in \code{test_predictors} +is overridden.} + +\item{study_names}{an optional argument with the names of the studies} + +\item{study_ids}{an optional argument specifying dependency between the +studies (for using a multilevel model). Defaults to \code{NULL} for +studies being independent.} + +\item{transformation}{transformation to be applied to the supplied +effect sizes before fitting the individual models. Defaults to +\code{"fishers_z"}. We highly recommend using \code{"fishers_z"} +transformation since it is the only variance stabilizing measure +and does not bias PET and PEESE style models. The other options are +\code{"cohens_d"}, correlation coefficient \code{"r"} and \code{"logOR"}. +Supplying \code{"none"} will treat the effect sizes as unstandardized and +refrain from any transformations.} + +\item{prior_scale}{a scale used to define priors. Defaults to \code{"cohens_d"}. +Other options are \code{"fishers_z"}, correlation coefficient \code{"r"}, +and \code{"logOR"}. The prior scale does not need to match the effect sizes measure - +the samples from prior distributions are internally transformed to match the +\code{transformation} of the data. The \code{prior_scale} corresponds to +the scale of default output, but can be changed within the summary function.} + +\item{standardize_predictors}{whether continuous predictors should be standardized prior to +estimating the model. Defaults to \code{TRUE}.} + +\item{effect_direction}{the expected direction of the effect. The one-sided +selection sets the weights omega to 1 to significant results in the expected +direction. Defaults to \code{"positive"} (another option is \code{"negative"}).} + +\item{priors}{named list of prior distributions for each predictor +(with names corresponding to the predictors). It allows users to +specify both the null and alternative hypothesis prior distributions +for each predictor by assigning the corresponding element of the named +list with another named list (with \code{"null"} and +\code{"alt"}). +If only one prior is specified for a given parameter, it is +assumed to correspond to the alternative hypotheses and the default null +hypothesis is specified (i.e., \code{prior_covariates_null} or +\code{prior_factors_null}). +If a named list with only one named prior distribution is provided (either +\code{"null"} or \code{"alt"}), only this prior distribution is used and no +default distribution is filled in. +Parameters without specified prior distributions are assumed to be only adjusted +for using the default alternative hypothesis prior distributions (i.e., +\code{prior_covariates} or \code{prior_factors}). +If \code{priors} is specified, \code{test_predictors} is ignored.} + +\item{model_type}{string specifying the RoBMA ensemble. Defaults to \code{NULL}. +The other options are \code{"PSMA"}, \code{"PP"}, and \code{"2w"} which override +settings passed to the \code{priors_effect}, \code{priors_heterogeneity}, +\code{priors_effect}, \code{priors_effect_null}, \code{priors_heterogeneity_null}, +\code{priors_bias_null}, and \code{priors_effect}. See details for more information +about the different model types.} + +\item{priors_effect}{list of prior distributions for the effect size (\code{mu}) +parameter that will be treated as belonging to the alternative hypothesis. Defaults to +a standard normal distribution +\code{prior(distribution = "normal", parameters = list(mean = 0, sd = 1))}.} + +\item{priors_heterogeneity}{list of prior distributions for the heterogeneity \code{tau} +parameter that will be treated as belonging to the alternative hypothesis. Defaults to +\code{prior(distribution = "invgamma", parameters = list(shape = 1, scale = .15))} that +is based on heterogeneities estimates from psychology \insertCite{erp2017estimates}{RoBMA}.} + +\item{priors_bias}{list of prior distributions for the publication bias adjustment +component that will be treated as belonging to the alternative hypothesis. +Defaults to \code{list( +prior_weightfunction(distribution = "two.sided", parameters = list(alpha = c(1, 1), + steps = c(0.05)), prior_weights = 1/12), +prior_weightfunction(distribution = "two.sided", parameters = list(alpha = c(1, 1, 1), + steps = c(0.05, 0.10)), prior_weights = 1/12), +prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1), + steps = c(0.05)), prior_weights = 1/12), +prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1, 1), + steps = c(0.025, 0.05)), prior_weights = 1/12), +prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1, 1), + steps = c(0.05, 0.5)), prior_weights = 1/12), +prior_weightfunction(distribution = "one.sided", parameters = list(alpha = c(1, 1, 1, 1), + steps = c(0.025, 0.05, 0.5)), prior_weights = 1/12), +prior_PET(distribution = "Cauchy", parameters = list(0,1), truncation = list(0, Inf), + prior_weights = 1/4), +prior_PEESE(distribution = "Cauchy", parameters = list(0,5), truncation = list(0, Inf), + prior_weights = 1/4) +)}, corresponding to the RoBMA-PSMA model introduce by \insertCite{bartos2021no;textual}{RoBMA}.} + +\item{priors_effect_null}{list of prior distributions for the effect size (\code{mu}) +parameter that will be treated as belonging to the null hypothesis. Defaults to +a point null hypotheses at zero, +\code{prior(distribution = "point", parameters = list(location = 0))}.} + +\item{priors_heterogeneity_null}{list of prior distributions for the heterogeneity \code{tau} +parameter that will be treated as belonging to the null hypothesis. Defaults to +a point null hypotheses at zero (a fixed effect meta-analytic models), +\code{prior(distribution = "point", parameters = list(location = 0))}.} + +\item{priors_bias_null}{list of prior weight functions for the \code{omega} parameter +that will be treated as belonging to the null hypothesis. Defaults no publication +bias adjustment, \code{prior_none()}.} + +\item{priors_hierarchical}{list of prior distributions for the correlation of random effects +(\code{rho}) parameter that will be treated as belonging to the alternative hypothesis. This setting allows +users to fit a hierarchical (three-level) meta-analysis when \code{study_ids} are supplied. +Note that this is an experimental feature and see News for more details. Defaults to a beta distribution +\code{prior(distribution = "beta", parameters = list(alpha = 1, beta = 1))}.} + +\item{priors_hierarchical_null}{list of prior distributions for the correlation of random effects +(\code{rho}) parameter that will be treated as belonging to the null hypothesis. Defaults to \code{NULL}.} + +\item{prior_covariates}{a prior distributions for the regression parameter +of continuous covariates on the effect size under the alternative hypothesis +(unless set explicitly in \code{priors}). Defaults to a relatively wide normal +distribution \code{prior(distribution = "normal", parameters = list(mean = 0, sd = 0.25))}.} + +\item{prior_covariates_null}{a prior distributions for the regression parameter +of continuous covariates on the effect size under the null hypothesis +(unless set explicitly in \code{priors}). Defaults to a no effect +\code{prior("spike", parameters = list(location = 0))}.} + +\item{prior_factors}{a prior distributions for the regression parameter +of categorical covariates on the effect size under the alternative hypothesis +(unless set explicitly in \code{priors}). Defaults to a relatively wide +multivariate normal distribution specifying differences from the mean contrasts +\code{prior_factor("mnormal", parameters = list(mean = 0, sd = 0.25), contrast = "meandif")}.} + +\item{prior_factors_null}{a prior distributions for the regression parameter +of categorical covariates on the effect size under the null hypothesis +(unless set explicitly in \code{priors}). Defaults to a no effect +\code{prior("spike", parameters = list(location = 0))}.} + +\item{models}{should the models' details be printed.} + +\item{silent}{do not print the results.} + +\item{...}{additional arguments.} +} +\value{ +\code{check_setup.reg} invisibly returns list of summary tables. +} +\description{ +\code{check_setup} prints summary of \code{"RoBMA.reg"} ensemble +implied by the specified prior distributions. It is useful for checking +the ensemble configuration prior to fitting all of the models. +} +\seealso{ +\code{\link[=check_setup]{check_setup()}} \code{\link[=RoBMA.reg]{RoBMA.reg()}} +} diff --git a/man/combine_data.Rd b/man/combine_data.Rd index 734dff54..cde052e8 100644 --- a/man/combine_data.Rd +++ b/man/combine_data.Rd @@ -18,6 +18,7 @@ combine_data( uCI = NULL, study_names = NULL, study_ids = NULL, + weight = NULL, data = NULL, transformation = "fishers_z", return_all = FALSE @@ -53,6 +54,9 @@ transformations are unavailable with this type of input)} studies (for using a multilevel model). Defaults to \code{NULL} for studies being independent.} +\item{weight}{specifies likelihood weights of the individual estimates. +Notes that this is an untested experimental feature.} + \item{data}{a data frame with column names corresponding to the variable names used to supply data individually} diff --git a/man/contr.meandif.Rd b/man/contr.meandif.Rd new file mode 100644 index 00000000..12c5cb74 --- /dev/null +++ b/man/contr.meandif.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/priors.R +\name{contr.meandif} +\alias{contr.meandif} +\title{Mean difference contrast matrix} +\usage{ +contr.meandif(n, contrasts = TRUE) +} +\arguments{ +\item{n}{a vector of levels for a factor, or the number of levels} + +\item{contrasts}{logical indicating whether contrasts should be computed} +} +\value{ +A matrix with n rows and k columns, with k = n - 1 if \code{contrasts = TRUE} and k = n +if \code{contrasts = FALSE}. +} +\description{ +Return a matrix of mean difference contrasts. +This is an adjustment to the \code{contr.orthonormal} that ascertains that the prior +distributions on difference between the gran mean and factor level are identical independent +of the number of factor levels (which does not hold for the orthonormal contrast). Furthermore, +the contrast is re-scaled so the specified prior distribution exactly corresponds to the prior +distribution on difference between each factor level and the grand mean -- this is approximately +twice the scale of \code{contr.orthonormal}. +} +\examples{ +contr.meandif(c(1, 2)) +contr.meandif(c(1, 2, 3)) + +} +\references{ +\insertAllCited{} +} diff --git a/man/contr.orthonormal.Rd b/man/contr.orthonormal.Rd new file mode 100644 index 00000000..90281643 --- /dev/null +++ b/man/contr.orthonormal.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/priors.R +\name{contr.orthonormal} +\alias{contr.orthonormal} +\title{Orthornomal contrast matrix} +\usage{ +contr.orthonormal(n, contrasts = TRUE) +} +\arguments{ +\item{n}{a vector of levels for a factor, or the number of levels} + +\item{contrasts}{logical indicating whether contrasts should be computed} +} +\value{ +A matrix with n rows and k columns, with k = n - 1 if \code{contrasts = TRUE} and k = n +if \code{contrasts = FALSE}. +} +\description{ +Return a matrix of orthornomal contrasts. +Code is based on \code{stanova::contr.bayes} and corresponding to description +by \insertCite{rouder2012default;textual}{BayesTools} +} +\examples{ +contr.orthonormal(c(1, 2)) +contr.orthonormal(c(1, 2, 3)) + +} +\references{ +\insertAllCited{} +} diff --git a/man/diagnostics.Rd b/man/diagnostics.Rd index f550a636..486ec008 100644 --- a/man/diagnostics.Rd +++ b/man/diagnostics.Rd @@ -2,6 +2,9 @@ % Please edit documentation in R/diagnostics.R \name{diagnostics} \alias{diagnostics} +\alias{diagnostics_autocorrelation} +\alias{diagnostics_trace} +\alias{diagnostics_density} \title{Checks a fitted RoBMA object} \usage{ diagnostics( @@ -14,6 +17,34 @@ diagnostics( title = is.null(show_models) | length(show_models) > 1, ... ) + +diagnostics_autocorrelation( + fit, + parameter = NULL, + plot_type = "base", + show_models = NULL, + lags = 30, + title = is.null(show_models) | length(show_models) > 1, + ... +) + +diagnostics_trace( + fit, + parameter = NULL, + plot_type = "base", + show_models = NULL, + title = is.null(show_models) | length(show_models) > 1, + ... +) + +diagnostics_density( + fit, + parameter = NULL, + plot_type = "base", + show_models = NULL, + title = is.null(show_models) | length(show_models) > 1, + ... +) } \arguments{ \item{fit}{a fitted RoBMA object} diff --git a/man/figures/README-fig_mu_chain-1.png b/man/figures/README-fig_mu_chain-1.png index 0771add1..cd543add 100644 Binary files a/man/figures/README-fig_mu_chain-1.png and b/man/figures/README-fig_mu_chain-1.png differ diff --git a/man/is.RoBMA.Rd b/man/is.RoBMA.Rd index 36974e15..e756fd46 100644 --- a/man/is.RoBMA.Rd +++ b/man/is.RoBMA.Rd @@ -2,15 +2,18 @@ % Please edit documentation in R/summary.R \name{is.RoBMA} \alias{is.RoBMA} +\alias{is.RoBMA.reg} \title{Reports whether x is a RoBMA object} \usage{ is.RoBMA(x) + +is.RoBMA.reg(x) } \arguments{ \item{x}{an object to test} } \value{ -\code{is.RoBMA} returns a boolean. +returns a boolean. } \description{ Reports whether x is a RoBMA object diff --git a/man/marginal_plot.Rd b/man/marginal_plot.Rd new file mode 100644 index 00000000..1094d31a --- /dev/null +++ b/man/marginal_plot.Rd @@ -0,0 +1,63 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/marginal.R +\name{marginal_plot} +\alias{marginal_plot} +\title{Plots marginal estimates of a fitted RoBMA regression object} +\usage{ +marginal_plot( + x, + parameter, + conditional = FALSE, + plot_type = "base", + prior = FALSE, + output_scale = NULL, + dots_prior = NULL, + ... +) +} +\arguments{ +\item{x}{a fitted RoBMA regression object} + +\item{parameter}{regression parameter to be plotted} + +\item{conditional}{whether conditional marginal estimates should be +plotted. Defaults to \code{FALSE} which plots the model-averaged +estimates.} + +\item{plot_type}{whether to use a base plot \code{"base"} +or ggplot2 \code{"ggplot"} for plotting. Defaults to +\code{"base"}.} + +\item{prior}{whether prior distribution should be added to +figure. Defaults to \code{FALSE}.} + +\item{output_scale}{transform the effect sizes and the meta-analytic +effect size estimate to a different scale. Defaults to \code{NULL} +which returns the same scale as the model was estimated on.} + +\item{dots_prior}{list of additional graphical arguments +to be passed to the plotting function of the prior +distribution. Supported arguments are \code{lwd}, +\code{lty}, \code{col}, and \code{col.fill}, to adjust +the line thickness, line type, line color, and fill color +of the prior distribution respectively.} + +\item{...}{list of additional graphical arguments +to be passed to the plotting function. Supported arguments +are \code{lwd}, \code{lty}, \code{col}, \code{col.fill}, +\code{xlab}, \code{ylab}, \code{main}, \code{xlim}, \code{ylim} +to adjust the line thickness, line type, line color, fill color, +x-label, y-label, title, x-axis range, and y-axis range +respectively.} +} +\value{ +\code{plot.RoBMA} returns either \code{NULL} if \code{plot_type = "base"} +or an object object of class 'ggplot2' if \code{plot_type = "ggplot2"}. +} +\description{ +\code{marginal_plot} allows to visualize prior and +posterior distributions of marginal estimates of a RoBMA regression model. +} +\seealso{ +\code{\link[=RoBMA]{RoBMA()}} +} diff --git a/man/marginal_summary.Rd b/man/marginal_summary.Rd new file mode 100644 index 00000000..c556e9d7 --- /dev/null +++ b/man/marginal_summary.Rd @@ -0,0 +1,42 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/marginal.R +\name{marginal_summary} +\alias{marginal_summary} +\title{Summarize marginal estimates of a fitted RoBMA regression object} +\usage{ +marginal_summary( + object, + conditional = FALSE, + output_scale = NULL, + probs = c(0.025, 0.975), + logBF = FALSE, + BF01 = FALSE +) +} +\arguments{ +\item{object}{a fitted RoBMA regression object} + +\item{conditional}{show the conditional estimates (assuming that the +alternative is true).} + +\item{output_scale}{transform the meta-analytic estimates to a different +scale. Defaults to \code{NULL} which returns the same scale as the model was estimated on.} + +\item{probs}{quantiles of the posterior samples to be displayed. +Defaults to \code{c(.025, .975)}} + +\item{logBF}{show log of Bayes factors. Defaults to \code{FALSE}.} + +\item{BF01}{show Bayes factors in support of the null hypotheses. Defaults to +\code{FALSE}.} +} +\value{ +\code{marginal_summary} returns a list of tables of class 'BayesTools_table'. +} +\description{ +\code{marginal_summary} creates summary tables for +marginal estimates of a RoBMA regression model. +} +\seealso{ +\code{\link[=RoBMA]{RoBMA()}}, \code{\link[=summary.RoBMA]{summary.RoBMA()}}, \code{\link[=diagnostics]{diagnostics()}}, \code{\link[=check_RoBMA]{check_RoBMA()}} +} diff --git a/man/print.marginal_summary.RoBMA.Rd b/man/print.marginal_summary.RoBMA.Rd new file mode 100644 index 00000000..c8010fd8 --- /dev/null +++ b/man/print.marginal_summary.RoBMA.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/marginal.R +\name{print.marginal_summary.RoBMA} +\alias{print.marginal_summary.RoBMA} +\title{Prints marginal_summary object for RoBMA method} +\usage{ +\method{print}{marginal_summary.RoBMA}(x, ...) +} +\arguments{ +\item{x}{a summary of a RoBMA object} + +\item{...}{additional arguments} +} +\value{ +\code{print.marginal_summary.RoBMA} invisibly returns the print statement. +} +\description{ +Prints marginal_summary object for RoBMA method +} +\seealso{ +\code{\link[=RoBMA]{RoBMA()}} +} diff --git a/man/prior_factor.Rd b/man/prior_factor.Rd new file mode 100644 index 00000000..6d4f9f43 --- /dev/null +++ b/man/prior_factor.Rd @@ -0,0 +1,99 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/priors.R +\name{prior_factor} +\alias{prior_factor} +\title{Creates a prior distribution for factors} +\usage{ +prior_factor( + distribution, + parameters, + truncation = list(lower = -Inf, upper = Inf), + prior_weights = 1, + contrast = "meandif" +) +} +\arguments{ +\item{distribution}{name of the prior distribution. The +possible options are +\describe{ +\item{\code{"point"}}{for a point density characterized by a +\code{location} parameter.} +\item{\code{"normal"}}{for a normal distribution characterized +by a \code{mean} and \code{sd} parameters.} +\item{\code{"lognormal"}}{for a lognormal distribution characterized +by a \code{meanlog} and \code{sdlog} parameters.} +\item{\code{"cauchy"}}{for a Cauchy distribution characterized +by a \code{location} and \code{scale} parameters. Internally +converted into a generalized t-distribution with \code{df = 1}.} +\item{\code{"t"}}{for a generalized t-distribution characterized +by a \code{location}, \code{scale}, and \code{df} parameters.} +\item{\code{"gamma"}}{for a gamma distribution characterized +by either \code{shape} and \code{rate}, or \code{shape} and +\code{scale} parameters. The later is internally converted to +the \code{shape} and \code{rate} parametrization} +\item{\code{"invgamma"}}{for an inverse-gamma distribution +characterized by a \code{shape} and \code{scale} parameters. The +JAGS part uses a 1/gamma distribution with a shape and rate +parameter.} +\item{\code{"beta"}}{for a beta distribution +characterized by an \code{alpha} and \code{beta} parameters.} +\item{\code{"exp"}}{for an exponential distribution +characterized by either \code{rate} or \code{scale} +parameter. The later is internally converted to +\code{rate}.} +\item{\code{"uniform"}}{for a uniform distribution defined on a +range from \code{a} to \code{b}} +}} + +\item{parameters}{list of appropriate parameters for a given +\code{distribution}.} + +\item{truncation}{list with two elements, \code{lower} and +\code{upper}, that define the lower and upper truncation of the +distribution. Defaults to \code{list(lower = -Inf, upper = Inf)}. +The truncation is automatically set to the bounds of the support.} + +\item{prior_weights}{prior odds associated with a given distribution. +The value is passed into the model fitting function, which creates models +corresponding to all combinations of prior distributions for each of +the model parameters and sets the model priors odds to the product +of its prior distributions.} + +\item{contrast}{type of contrast for the prior distribution. The possible options are +\describe{ +\item{\code{"meandif"}}{for contrast centered around the grand mean +with equal marginal distributions, making the prior distribution exchangeable +across factor levels. In contrast to \code{"orthonormal"}, the marginal distributions +are identical regardless of the number of factor levels and the specified prior +distribution corresponds to the difference from grand mean for each factor level. +Only supports \code{distribution = "mnormal"} and \code{distribution = "mt"} +which generates the corresponding multivariate normal/t distributions.} +\item{\code{"orthonormal"}}{for contrast centered around the grand mean +with equal marginal distributions, making the prior distribution exchangeable +across factor levels. Only supports \code{distribution = "mnormal"} and +\code{distribution = "mt"} which generates the corresponding multivariate normal/t +distributions.} +\item{\code{"treatment"}}{for contrasts using the first level as a comparison +group and setting equal prior distribution on differences between the individual +factor levels and the comparison level.} +\item{\code{"independent"}}{for contrasts specifying dependent prior distribution +for each factor level (note that this leads to an overparameterized model if the +intercept is included).} +}} +} +\value{ +return an object of class 'prior'. +} +\description{ +\code{prior_factor} creates a prior distribution for fitting +models with factor predictors. (Note that results across different operating +systems might vary due to differences in JAGS numerical precision.) +} +\examples{ +# create an orthonormal prior distribution +p1 <- prior_factor(distribution = "mnormal", contrast = "orthonormal", + parameters = list(mean = 0, sd = 1)) +} +\seealso{ +\code{\link[BayesTools:prior]{prior()}} +} diff --git a/man/summary.RoBMA.Rd b/man/summary.RoBMA.Rd index b1b390ab..6cc91e21 100644 --- a/man/summary.RoBMA.Rd +++ b/man/summary.RoBMA.Rd @@ -27,7 +27,7 @@ of the individual models (\code{"individual"}). Can be abbreviated to first lett \item{conditional}{show the conditional estimates (assuming that the alternative is true). Defaults to \code{FALSE}. Only available for -\code{type == "conditional"}.} +\code{type == "ensemble"}.} \item{output_scale}{transform the meta-analytic estimates to a different scale. Defaults to \code{NULL} which returns the same scale as the model was estimated on.} diff --git a/man/update.RoBMA.Rd b/man/update.RoBMA.Rd index d1c44ce8..a2ace8b2 100644 --- a/man/update.RoBMA.Rd +++ b/man/update.RoBMA.Rd @@ -1,5 +1,5 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/main.R +% Please edit documentation in R/RoBMA.R \name{update.RoBMA} \alias{update.RoBMA} \title{Updates a fitted RoBMA object} @@ -10,12 +10,12 @@ prior_effect = NULL, prior_heterogeneity = NULL, prior_bias = NULL, - prior_rho = NULL, + prior_hierarchical = NULL, prior_weights = NULL, prior_effect_null = NULL, prior_heterogeneity_null = NULL, prior_bias_null = NULL, - prior_rho_null = NULL, + prior_hierarchical_null = NULL, study_names = NULL, chains = NULL, adapt = NULL, @@ -50,10 +50,10 @@ Defaults to \code{NULL}.} component that will be treated as belonging to the alternative hypothesis. Defaults to \code{NULL}.} -\item{prior_rho}{prior distributions for the variance allocation (\code{rho}) -parameter that will be treated as belonging to the alternative hypothesis. This setting allows -users to fit a three-level meta-analysis when \code{study_ids} are supplied. Note that this is -an experimental feature and see News for more details. Defaults to a beta distribution +\item{prior_hierarchical}{prior distribution for the correlation of random effects +(\code{rho}) parameter that will be treated as belonging to the alternative hypothesis. This setting allows +users to fit a hierarchical (three-level) meta-analysis when \code{study_ids} are supplied. +Note that this is an experimental feature and see News for more details. Defaults to a beta distribution \code{prior(distribution = "beta", parameters = list(alpha = 1, beta = 1))}.} \item{prior_weights}{either a single value specifying prior model weight @@ -72,8 +72,8 @@ Defaults to \code{NULL}.} component that will be treated as belonging to the null hypothesis. Defaults to \code{NULL}.} -\item{prior_rho_null}{prior distributions for the variance allocation (\code{rho}) -parameter that will be treated as belonging to the null hypothesis. Defaults to \code{NULL}.} +\item{prior_hierarchical_null}{prior distribution for the correlation of random effects +(\code{rho}) parameter that will be treated as belonging to the null hypothesis. Defaults to \code{NULL}.} \item{study_names}{an optional argument with the names of the studies} diff --git a/models/HierarchicalBMA/fit.0.RDS b/models/HierarchicalBMA/fit.0.RDS new file mode 100644 index 00000000..00cbeb09 Binary files /dev/null and b/models/HierarchicalBMA/fit.0.RDS differ diff --git a/models/HierarchicalBMA/fit.RDS b/models/HierarchicalBMA/fit.RDS new file mode 100644 index 00000000..cd1f1e5a Binary files /dev/null and b/models/HierarchicalBMA/fit.RDS differ diff --git a/models/HierarchicalBMA/fit_BMA.RDS b/models/HierarchicalBMA/fit_BMA.RDS new file mode 100644 index 00000000..a58fdd1e Binary files /dev/null and b/models/HierarchicalBMA/fit_BMA.RDS differ diff --git a/models/HierarchicalBMA/hierarchical_test.RDS b/models/HierarchicalBMA/hierarchical_test.RDS new file mode 100644 index 00000000..95a92bb7 Binary files /dev/null and b/models/HierarchicalBMA/hierarchical_test.RDS differ diff --git a/models/MetaRegression/fit_3PP_reg10.RDS b/models/MetaRegression/fit_3PP_reg10.RDS new file mode 100644 index 00000000..68a4660b Binary files /dev/null and b/models/MetaRegression/fit_3PP_reg10.RDS differ diff --git a/models/MetaRegression/fit_BFE.RDS b/models/MetaRegression/fit_BFE.RDS new file mode 100644 index 00000000..38527ae5 Binary files /dev/null and b/models/MetaRegression/fit_BFE.RDS differ diff --git a/models/MetaRegression/fit_BFE10.RDS b/models/MetaRegression/fit_BFE10.RDS new file mode 100644 index 00000000..4f5e3766 Binary files /dev/null and b/models/MetaRegression/fit_BFE10.RDS differ diff --git a/models/MetaRegression/fit_PSMA_reg10.RDS b/models/MetaRegression/fit_PSMA_reg10.RDS new file mode 100644 index 00000000..1e669417 Binary files /dev/null and b/models/MetaRegression/fit_PSMA_reg10.RDS differ diff --git a/models/MetaRegression/fit_wBFE.RDS b/models/MetaRegression/fit_wBFE.RDS new file mode 100644 index 00000000..b321cbf7 Binary files /dev/null and b/models/MetaRegression/fit_wBFE.RDS differ diff --git a/models/MetaRegression/fit_wBFE10.RDS b/models/MetaRegression/fit_wBFE10.RDS new file mode 100644 index 00000000..a21c09c9 Binary files /dev/null and b/models/MetaRegression/fit_wBFE10.RDS differ diff --git a/models/MetaRegression/fit_wBFE_reg.RDS b/models/MetaRegression/fit_wBFE_reg.RDS new file mode 100644 index 00000000..549f70f3 Binary files /dev/null and b/models/MetaRegression/fit_wBFE_reg.RDS differ diff --git a/models/MetaRegression/fit_wBFE_reg10.RDS b/models/MetaRegression/fit_wBFE_reg10.RDS new file mode 100644 index 00000000..4a603fbe Binary files /dev/null and b/models/MetaRegression/fit_wBFE_reg10.RDS differ diff --git a/models/MetaRegression/fit_wPSMA.RDS b/models/MetaRegression/fit_wPSMA.RDS new file mode 100644 index 00000000..31c03033 Binary files /dev/null and b/models/MetaRegression/fit_wPSMA.RDS differ diff --git a/models/MetaRegression/fit_wPSMA_reg.RDS b/models/MetaRegression/fit_wPSMA_reg.RDS new file mode 100644 index 00000000..74f59209 Binary files /dev/null and b/models/MetaRegression/fit_wPSMA_reg.RDS differ diff --git a/models/MetaRegression/fit_wPSMA_reg10.RDS b/models/MetaRegression/fit_wPSMA_reg10.RDS new file mode 100644 index 00000000..f043fc26 Binary files /dev/null and b/models/MetaRegression/fit_wPSMA_reg10.RDS differ diff --git a/models/MetaRegression/fitting_help.R b/models/MetaRegression/fitting_help.R new file mode 100644 index 00000000..1670d54b --- /dev/null +++ b/models/MetaRegression/fitting_help.R @@ -0,0 +1,460 @@ +library(RoBMA) +data("Kroupova2021", package = "RoBMA") +Kroupova2021 <- Kroupova2021[!is.na(Kroupova2021$se),] + + +fit_BFE <- RoBMA.reg( + # specify the model formula and data input + formula = ~ 1, + data = Kroupova2021, + + # specify slightly informative prior for the effect size parameter (on Fisher's z scale) + priors_effect = prior("normal", parameters = list(mean = 0, sd = 1)), + prior_scale = "fishers_z", + + # remove the remaining model components + priors_bias = NULL, + priors_heterogeneity = NULL, + priors_effect_null = NULL, + + # some additional settings + parallel = TRUE, seed = 1 +) +fit_wBFE <- RoBMA.reg( + # specify the model formula and data input + formula = ~ 1, + data = Kroupova2021, + study_ids = Kroupova2021$study, + weighted = TRUE, + + # specify slightly informative prior for the effect size parameter (on Fisher's z scale) + priors_effect = prior("normal", parameters = list(mean = 0, sd = 1)), + prior_scale = "fishers_z", + + # remove the remaining model components + priors_bias = NULL, + priors_heterogeneity = NULL, + priors_effect_null = NULL, + + # some additional settings + parallel = TRUE, seed = 1 +) +fit_BFE10 <- RoBMA.reg( + # specify the model formula and data input + formula = ~ 1, + data = Kroupova2021, + + # specify informative prior for the effect size parameter under the alternative hypothesis + # and a specify a null hypothesis of no effect + priors_effect = prior("normal", parameters = list(mean = 0, sd = 0.25)), + priors_effect_null = prior("spike", parameters = list(location = 0)), + prior_scale = "fishers_z", + + # remove the remaining model components + priors_bias = NULL, + priors_heterogeneity = NULL, + + # some additional settings + parallel = TRUE, seed = 1 +) +fit_wBFE10 <- RoBMA.reg( + # specify the model formula and data input + formula = ~ 1, + data = Kroupova2021, + study_ids = Kroupova2021$study, + weighted = TRUE, + + # specify informative prior for the effect size parameter under the alternative hypothesis + # and a specify a null hypothesis of no effect + priors_effect = prior("normal", parameters = list(mean = 0, sd = 0.25)), + priors_effect_null = prior("spike", parameters = list(location = 0)), + prior_scale = "fishers_z", + + # remove the remaining model components + priors_bias = NULL, + priors_heterogeneity = NULL, + + # some additional settings + parallel = TRUE, seed = 1 +) +fit_wPSMA <- RoBMA.reg( + # specify the model formula and data input + formula = ~ 1, + data = Kroupova2021, + effect_direction = "negative", + study_ids = Kroupova2021$study, + weighted = TRUE, + + # specify informative prior for the effect size parameter under the alternative hypothesis + # and a specify a null hypothesis of no effect + priors_effect = prior("normal", parameters = list(mean = 0, sd = 0.25)), + priors_effect_null = prior("spike", parameters = list(location = 0)), + prior_scale = "fishers_z", + + # some additional settings + parallel = TRUE, seed = 1 +) +fit_wBFE_reg <- RoBMA.reg( + # specify the model formula and data input + formula = ~ 1 + location, + data = Kroupova2021, + study_ids = Kroupova2021$study, + test_predictors = "", + weighted = TRUE, + + # specify slightly informative prior for the effect size parameter (on Fisher's z scale) + priors = list( + location = prior_factor("mnormal", list(mean = 0, sd = 0.50), contrast = "orthonormal") + ), + priors_effect = prior("normal", parameters = list(mean = 0, sd = 1)), + prior_scale = "fishers_z", + + # remove the remaining model components + priors_bias = NULL, + priors_heterogeneity = NULL, + priors_effect_null = NULL, + + # some additional settings + parallel = TRUE, seed = 1 +) +fit_wBFE_reg10 <- RoBMA.reg( + # specify the model formula and data input + formula = ~ 1 + location, + data = Kroupova2021, + study_ids = Kroupova2021$study, + test_predictors = "location", + weighted = TRUE, + + # specify slightly informative prior for the effect size parameter (on Fisher's z scale) + priors = list( + location = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal") + ), + priors_effect = prior("normal", parameters = list(mean = 0, sd = 0.25)), + prior_scale = "fishers_z", + + # remove the remaining model components + priors_bias = NULL, + priors_heterogeneity = NULL, + priors_effect_null = NULL, + + # some additional settings + parallel = TRUE, seed = 1 +) +fit_wPSMA_reg <- RoBMA.reg( + # specify the model formula and data input + formula = ~ 1 + education_outcome + students_gender + location + design + endogenity_control, + data = Kroupova2021, + effect_direction = "negative", + study_ids = Kroupova2021$study, + weighted = TRUE, + test_predictors = "", + + # specify informative prior for the effect size parameter under the alternative hypothesis + # and a specify a null hypothesis of no effect + priors = list( + education_outcome = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + students_gender = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + location = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + design = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + endogenity_control = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal") + ), + priors_effect = prior("normal", parameters = list(mean = 0, sd = 0.25)), + priors_effect_null = prior("spike", parameters = list(location = 0)), + prior_scale = "fishers_z", + + # some additional settings + parallel = TRUE, seed = 1 +) + +fit_wPSMA_reg10 <- RoBMA.reg( + # specify the model formula and data input + formula = ~ 1 + education_outcome + students_gender + location + design + endogenity_control, + data = Kroupova2021, + effect_direction = "negative", + study_ids = Kroupova2021$study, + weighted = TRUE, + test_predictors = c("education_outcome", "students_gender", "location", "design", "endogenity_control"), + + # specify informative prior for the effect size parameter under the alternative hypothesis + # and a specify a null hypothesis of no effect + priors = list( + education_outcome = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + students_gender = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + location = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + design = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + endogenity_control = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal") + ), + priors_effect = prior("normal", parameters = list(mean = 0, sd = 0.25)), + priors_effect_null = prior("spike", parameters = list(location = 0)), + prior_scale = "fishers_z", + + # some additional settings + parallel = FALSE, seed = 1, do_not_fit = TRUE +) + +missing <- list.files("../models/MetaRegression/fit_wPSMA_reg10/") +missing <- gsub("m_", "", missing) +missing <- gsub(".RDS", "", missing) +missing <- seq_along(fit_wPSMA_reg10$models)[!seq_along(fit_wPSMA_reg10$models) %in% as.numeric(missing)] + +cl <- parallel::makeCluster(23) +parallel::clusterExport(cl, c("fit_wPSMA_reg10")) +parallel::parSapplyLB(cl, missing, function(i){ + + library(RoBMA) + temp_model <- RoBMA:::.fit_RoBMA_model(fit_wPSMA_reg10, i) + saveRDS(temp_model, file = paste0("../models/MetaRegression/fit_wPSMA_reg10/", "m_", i, ".RDS"), compress = "xz") + +}) + +parallel::stopCluster(cl) + +fit_wPSMA_reg10 <- RoBMA.reg( + # specify the model formula and data input + formula = ~ 1 + education_outcome + students_gender + location + design + endogenity_control, + data = Kroupova2021, + effect_direction = "negative", + study_ids = Kroupova2021$study, + weighted = TRUE, + test_predictors = c("education_outcome", "students_gender", "location", "design", "endogenity_control"), + + # specify informative prior for the effect size parameter under the alternative hypothesis + # and a specify a null hypothesis of no effect + priors = list( + education_outcome = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + students_gender = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + location = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + design = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + endogenity_control = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal") + ), + priors_effect = prior("normal", parameters = list(mean = 0, sd = 0.25)), + priors_effect_null = prior("spike", parameters = list(location = 0)), + prior_scale = "fishers_z", + + # some additional settings + parallel = FALSE, seed = 1, do_not_fit = TRUE +) + +for(i in seq_along(fit_PSMA_reg10$models)){ + fit_PSMA_reg10$models[[i]] <- readRDS(paste0("../models/MetaRegression/fit_PSMA_reg10/", "m_", i, ".RDS")) +} + +fit_PSMA_reg10$models <- BayesTools::models_inference(fit_PSMA_reg10[["models"]]) +fit_PSMA_reg10$RoBMA <- RoBMA:::.ensemble_inference(fit_PSMA_reg10) +fit_PSMA_reg10$coefficients <- RoBMA:::.compute_coeficients(fit_PSMA_reg10[["RoBMA"]]) + +fit_PSMA_reg10$add_info[["errors"]] <- c(fit_PSMA_reg10$add_info[["errors"]], RoBMA:::.get_model_errors(fit_PSMA_reg10)) +fit_PSMA_reg10$add_info[["warnings"]] <- c(fit_PSMA_reg10$add_info[["warnings"]], RoBMA:::.get_model_warnings(fit_PSMA_reg10)) + +fit_PSMA_reg10 <- RoBMA:::.remove_model_posteriors(fit_PSMA_reg10) +fit_PSMA_reg10 <- RoBMA:::.remove_model_margliks(fit_PSMA_reg10) + +class(fit_PSMA_reg10) <- c("RoBMA", "RoBMA.reg") + +fit_wPSMA <- RoBMA:::.remove_model_posteriors(fit_wPSMA) +fit_wPSMA <- RoBMA:::.remove_model_margliks(fit_wPSMA) + +fit_wPSMA_reg <- RoBMA:::.remove_model_posteriors(fit_wPSMA_reg) +fit_wPSMA_reg <- RoBMA:::.remove_model_margliks(fit_wPSMA_reg) + +saveRDS(fit_BFE, file = "../models/MetaRegression/fit_BFE.RDS", compress = "xz") +saveRDS(fit_wBFE, file = "../models/MetaRegression/fit_wBFE.RDS", compress = "xz") +saveRDS(fit_BFE10, file = "../models/MetaRegression/fit_BFE10.RDS", compress = "xz") +saveRDS(fit_wBFE10, file = "../models/MetaRegression/fit_wBFE10.RDS", compress = "xz") +saveRDS(fit_wPSMA, file = "../models/MetaRegression/fit_wPSMA.RDS", compress = "xz") +saveRDS(fit_wBFE_reg , file = "../models/MetaRegression/fit_wBFE_reg.RDS", compress = "xz") +saveRDS(fit_wBFE_reg10 , file = "../models/MetaRegression/fit_wBFE_reg10.RDS", compress = "xz") +saveRDS(fit_wPSMA_reg, file = "../models/MetaRegression/fit_wPSMA_reg.RDS", compress = "xz") +saveRDS(fit_PSMA_reg10, file = "../models/MetaRegression/fit_PSMA_reg10.RDS", compress = "xz") + + + + +fit_PSMA_reg10 <- RoBMA.reg( + # specify the model formula and data input + formula = ~ 1 + education_outcome + students_gender + location + design + endogenity_control, + data = Kroupova2021, + effect_direction = "negative", + test_predictors = c("education_outcome", "students_gender", "location", "design", "endogenity_control"), + + # specify informative prior for the effect size parameter under the alternative hypothesis + # and a specify a null hypothesis of no effect + priors = list( + education_outcome = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + students_gender = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + location = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + design = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + endogenity_control = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal") + ), + priors_effect = prior("normal", parameters = list(mean = 0, sd = 0.25)), + priors_effect_null = prior("spike", parameters = list(location = 0)), + prior_scale = "fishers_z", + + # some additional settings + parallel = FALSE, seed = 1, do_not_fit = TRUE +) + +missing <- list.files("../models/MetaRegression/fit_PSMA_reg10/") +missing <- gsub("m_", "", missing) +missing <- gsub(".RDS", "", missing) +missing <- seq_along(fit_PSMA_reg10$models)[!seq_along(fit_PSMA_reg10$models) %in% as.numeric(missing)] + +cl <- parallel::makeCluster(23) +parallel::clusterExport(cl, c("fit_PSMA_reg10")) +parallel::parSapplyLB(cl, missing, function(i){ + + library(RoBMA) + temp_model <- RoBMA:::.fit_RoBMA_model(fit_PSMA_reg10, i) + saveRDS(temp_model, file = paste0("../models/MetaRegression/fit_PSMA_reg10/", "m_", i, ".RDS"), compress = "xz") + +}) + +parallel::stopCluster(cl) + +fit_PSMA_reg10 <- RoBMA.reg( + # specify the model formula and data input + formula = ~ 1 + education_outcome + students_gender + location + design + endogenity_control, + data = Kroupova2021, + effect_direction = "negative", + test_predictors = c("education_outcome", "students_gender", "location", "design", "endogenity_control"), + + # specify informative prior for the effect size parameter under the alternative hypothesis + # and a specify a null hypothesis of no effect + priors = list( + education_outcome = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + students_gender = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + location = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + design = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + endogenity_control = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal") + ), + priors_effect = prior("normal", parameters = list(mean = 0, sd = 0.25)), + priors_effect_null = prior("spike", parameters = list(location = 0)), + prior_scale = "fishers_z", + + # some additional settings + parallel = FALSE, seed = 1, do_not_fit = TRUE +) + +for(i in seq_along(fit_PSMA_reg10$models)){ + fit_PSMA_reg10$models[[i]] <- readRDS(paste0("../models/MetaRegression/fit_PSMA_reg10/", "m_", i, ".RDS")) +} + +fit_PSMA_reg10$models <- BayesTools::models_inference(fit_PSMA_reg10[["models"]]) +fit_PSMA_reg10$RoBMA <- RoBMA:::.ensemble_inference(fit_PSMA_reg10) +fit_PSMA_reg10$coefficients <- RoBMA:::.compute_coeficients(fit_PSMA_reg10[["RoBMA"]]) + +fit_PSMA_reg10$add_info[["errors"]] <- c(fit_PSMA_reg10$add_info[["errors"]], RoBMA:::.get_model_errors(fit_PSMA_reg10)) +fit_PSMA_reg10$add_info[["warnings"]] <- c(fit_PSMA_reg10$add_info[["warnings"]], RoBMA:::.get_model_warnings(fit_PSMA_reg10)) + +fit_PSMA_reg10 <- RoBMA:::.remove_model_posteriors(fit_PSMA_reg10) +fit_PSMA_reg10 <- RoBMA:::.remove_model_margliks(fit_PSMA_reg10) + +class(fit_PSMA_reg10) <- c("RoBMA", "RoBMA.reg") + +saveRDS(fit_PSMA_reg10, file = "../models/MetaRegression/fit_PSMA_reg10.RDS", compress = "xz") +summary(fit_PSMA_reg10) + + + + + + + +fit_3PP_reg10 <- RoBMA.reg( + # specify the model formula and data input + formula = ~ 1 + education_outcome + students_gender + location + design + endogenity_control, + data = Kroupova2021, + study_ids = Kroupova2021$study, + effect_direction = "negative", + test_predictors = c("education_outcome", "students_gender", "location", "design", "endogenity_control"), + + # specify informative prior for the effect size parameter under the alternative hypothesis + # and a specify a null hypothesis of no effect + priors = list( + education_outcome = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + students_gender = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + location = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + design = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + endogenity_control = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal") + ), + priors_effect = prior("normal", parameters = list(mean = 0, sd = 0.25)), + priors_effect_null = prior("spike", parameters = list(location = 0)), + priors_bias = list( + prior_PET(distribution = "Cauchy", parameters = list(0, 1), truncation = list(0, Inf), prior_weights = 1/2), + prior_PEESE(distribution = "Cauchy", parameters = list(0, 5), truncation = list(0, Inf), prior_weights = 1/2) + ), + prior_scale = "fishers_z", + + # some additional settings + parallel = FALSE, silent = FALSE, seed = 1, do_not_fit = TRUE +) + +missing <- list.files("../models/MetaRegression/fit_3PP_reg10/") +missing <- gsub("m_", "", missing) +missing <- gsub(".RDS", "", missing) +missing <- seq_along(fit_3PP_reg10$models)[!seq_along(fit_3PP_reg10$models) %in% as.numeric(missing)] + +for(i in missing){ + temp_model <- RoBMA:::.fit_RoBMA_model(fit_3PP_reg10, i) + saveRDS(temp_model, file = paste0("../models/MetaRegression/fit_3PP_reg10/", "m_", i, ".RDS"), compress = "xz") +} + +cl <- parallel::makeCluster(23) +parallel::clusterExport(cl, c("fit_3PP_reg10")) +parallel::parSapplyLB(cl, missing, function(i){ + + library(RoBMA) + temp_model <- RoBMA:::.fit_RoBMA_model(fit_3PP_reg10, i) + saveRDS(temp_model, file = paste0("../models/MetaRegression/fit_3PP_reg10/", "m_", i, ".RDS"), compress = "xz") + +}) + +parallel::stopCluster(cl) + +fit_3PP_reg10 <- RoBMA.reg( + # specify the model formula and data input + formula = ~ 1 + education_outcome + students_gender + location + design + endogenity_control, + data = Kroupova2021, + study_ids = Kroupova2021$study, + effect_direction = "negative", + test_predictors = c("education_outcome", "students_gender", "location", "design", "endogenity_control"), + + # specify informative prior for the effect size parameter under the alternative hypothesis + # and a specify a null hypothesis of no effect + priors = list( + education_outcome = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + students_gender = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + location = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + design = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal"), + endogenity_control = prior_factor("mnormal", list(mean = 0, sd = 0.10), contrast = "orthonormal") + ), + priors_effect = prior("normal", parameters = list(mean = 0, sd = 0.25)), + priors_effect_null = prior("spike", parameters = list(location = 0)), + priors_bias = list( + prior_PET(distribution = "Cauchy", parameters = list(0, 1), truncation = list(0, Inf), prior_weights = 1/2), + prior_PEESE(distribution = "Cauchy", parameters = list(0, 5), truncation = list(0, Inf), prior_weights = 1/2) + ), + prior_scale = "fishers_z", + + # some additional settings + parallel = FALSE, seed = 1, do_not_fit = TRUE +) + +for(i in seq_along(fit_3PP_reg10$models)){ + fit_3PP_reg10$models[[i]] <- readRDS(paste0("../models/MetaRegression/fit_3PP_reg10/", "m_", i, ".RDS")) +} + +fit_3PP_reg10$models <- BayesTools::models_inference(fit_3PP_reg10[["models"]]) +fit_3PP_reg10$RoBMA <- RoBMA:::.ensemble_inference(fit_3PP_reg10) +fit_3PP_reg10$coefficients <- RoBMA:::.compute_coeficients(fit_3PP_reg10[["RoBMA"]]) + +fit_3PP_reg10$add_info[["errors"]] <- c(fit_3PP_reg10$add_info[["errors"]], RoBMA:::.get_model_errors(fit_3PP_reg10)) +fit_3PP_reg10$add_info[["warnings"]] <- c(fit_3PP_reg10$add_info[["warnings"]], RoBMA:::.get_model_warnings(fit_3PP_reg10)) + +fit_3PP_reg10 <- RoBMA:::.remove_model_posteriors(fit_3PP_reg10) +fit_3PP_reg10 <- RoBMA:::.remove_model_margliks(fit_3PP_reg10) + +class(fit_3PP_reg10) <- c("RoBMA", "RoBMA.reg") + +saveRDS(fit_3PP_reg10, file = "../models/MetaRegression/fit_3PP_reg10.RDS", compress = "xz") +summary(fit_3PP_reg10) diff --git a/models/Tutorial/Lui2015.csv b/models/Tutorial/Lui2015.csv new file mode 100644 index 00000000..acd28b8c --- /dev/null +++ b/models/Tutorial/Lui2015.csv @@ -0,0 +1,19 @@ +r,n,study +0.21,115,"Ahn, Kim, & Park (2008)" +0.29,283,Basanez et al. (2013) +0.22,80,Bounkeua (2007) +0.26,109,Hajizadeh (2009) +0.23,61,Hamid (2007) +0.54,107,Hwang & Wood (2009a) +0.56,79,Hwang & Wood (2009b) +0.29,121,"Hwang, Wood, & Fujimoto (2010)" +0.26,166,"Juang, Syed, & Takaki (2007)" +0.02,208,Kim (2001) +-0.06,265,Lau et al. (2005) +0.38,51,Leong (2004) +0.25,60,Luna (2012) +0.08,73,Martinez (2006) +0.17,350,Nguyen (2010) +0.33,93,Tsai-Chae & Nagata (2008) +0.36,188,Ying & Han (2007a) +0.13,490,Ying & Han (2007b) diff --git a/models/Tutorial/fit_RoBMA_Lui2015.RDS b/models/Tutorial/fit_RoBMA_Lui2015.RDS new file mode 100644 index 00000000..0fe19e7c Binary files /dev/null and b/models/Tutorial/fit_RoBMA_Lui2015.RDS differ diff --git a/models/Tutorial/fit_RoBMA_perinull_Lui2015.RDS b/models/Tutorial/fit_RoBMA_perinull_Lui2015.RDS new file mode 100644 index 00000000..d3836594 Binary files /dev/null and b/models/Tutorial/fit_RoBMA_perinull_Lui2015.RDS differ diff --git a/src/Makevars.in b/src/Makevars.in index 8f0e4d40..aba3e872 100644 --- a/src/Makevars.in +++ b/src/Makevars.in @@ -32,7 +32,7 @@ OBJECTS = distributions/DMN.o distributions/DWMN1.o distributions/DWMN2.o distri distributions/DWN1.o distributions/DWN2.o distributions/DWT1.o distributions/DWT2.o \ distributions/DWN.o distributions/DWWN1.o distributions/DWWN2.o \ transformations/z.o transformations/r.o transformations/d.o transformations/logOR.o \ - functions/wmnorm.o functions/mnorm.o \ + functions/wmnorm.o functions/mnorm.o matrix/matrix.o \ source/tools.o source/mnorm.o source/wmnorm.o source/transformations.o \ init.o RoBMA.o testRoBMA.o \ diff --git a/src/Makevars.ucrt b/src/Makevars.ucrt index c0f470b4..49bd82b3 100644 --- a/src/Makevars.ucrt +++ b/src/Makevars.ucrt @@ -65,8 +65,7 @@ endif JAGS_MAJOR_ASSUMED := 4 PKG_CPPFLAGS=-I"$(JAGS_ROOT)/include" -D JAGS_MAJOR_ASSUMED=$(JAGS_MAJOR_ASSUMED) -D JAGS_MAJOR_FORCED=0$(JAGS_MAJOR_VERSION) -PKG_LIBS=-L"$(JAGS_ROOT)/${R_ARCH}/bin" -ljags-$(JAGS_MAJOR) -ljrmath-0 - +PKG_LIBS=-L"$(JAGS_ROOT)/${R_ARCH}/bin" -ljags-$(JAGS_MAJOR) -ljrmath-0 -llapack ############### ### Objects to be compiled @@ -77,7 +76,7 @@ OBJECTS = distributions/DMN.o distributions/DWMN1.o distributions/DWMN2.o distri distributions/DWN1.o distributions/DWN2.o distributions/DWT1.o distributions/DWT2.o \ distributions/DWN.o distributions/DWWN1.o distributions/DWWN2.o \ transformations/z.o transformations/r.o transformations/d.o transformations/logOR.o \ - functions/wmnorm.o functions/mnorm.o \ + functions/wmnorm.o functions/mnorm.o matrix/matrix.o \ source/tools.o source/mnorm.o source/wmnorm.o source/transformations.o \ init.o RoBMA.o testRoBMA.o @@ -90,12 +89,12 @@ OBJECTS = distributions/DMN.o distributions/DWMN1.o distributions/DWMN2.o distri ############### # PKG_CPPFLAGS=-I"$(JAGS_ROOT)/include" -# PKG_LIBS=-L"$(JAGS_ROOT)/${R_ARCH}/bin" -ljags-4 +# PKG_LIBS=-L"$(JAGS_ROOT)/${R_ARCH}/bin" -ljags-4 -llapack # OBJECTS = distributions/DMN.o distributions/DWMN1.o distributions/DWMN2.o distributions/DMNv.o distributions/DWMN1v.o distributions/DWMN2v.o \ # distributions/DWN1.o distributions/DWN2.o distributions/DWT1.o distributions/DWT2.o \ # distributions/DWN.o distributions/DWWN1.o distributions/DWWN2.o \ # transformations/z.o transformations/r.o transformations/d.o transformations/logOR.o \ -# functions/wmnorm.o functions/mnorm.o \ +# functions/wmnorm.o functions/mnorm.o matrix/matrix.o \ # source/tools.o source/mnorm.o source/wmnorm.o source/transformations.o \ # init.o RoBMA.o testRoBMA.o diff --git a/src/Makevars.win b/src/Makevars.win index 32e12ea7..757533d9 100644 --- a/src/Makevars.win +++ b/src/Makevars.win @@ -75,7 +75,7 @@ OBJECTS = distributions/DMN.o distributions/DWMN1.o distributions/DWMN2.o distri distributions/DWN1.o distributions/DWN2.o distributions/DWT1.o distributions/DWT2.o \ distributions/DWN.o distributions/DWWN1.o distributions/DWWN2.o \ transformations/z.o transformations/r.o transformations/d.o transformations/logOR.o \ - functions/wmnorm.o functions/mnorm.o \ + functions/wmnorm.o functions/mnorm.o matrix/matrix.o \ source/tools.o source/mnorm.o source/wmnorm.o source/transformations.o \ init.o RoBMA.o testRoBMA.o @@ -88,12 +88,12 @@ OBJECTS = distributions/DMN.o distributions/DWMN1.o distributions/DWMN2.o distri ############### # PKG_CPPFLAGS=-I"$(JAGS_ROOT)/include" -# PKG_LIBS=-L"$(JAGS_ROOT)/${R_ARCH}/bin" -ljags-4 -ljrmath-0 +# PKG_LIBS=-L"$(JAGS_ROOT)/${R_ARCH}/bin" -ljags-4 -ljrmath-0 -llapack # OBJECTS = distributions/DMN.o distributions/DWMN1.o distributions/DWMN2.o distributions/DMT2v.o distributions/DWMT1v.o distributions/DWMT2v.o \ # distributions/DWN1.o distributions/DWN2.o distributions/DWT1.o distributions/DWT2.o \ # distributions/DWN.o distributions/DWWN1.o distributions/DWWN2.o \ # transformations/z.o transformations/r.o transformations/d.o transformations/logOR.o \ -# functions/wmnorm.o functions/mnorm.o \ +# functions/wmnorm.o functions/mnorm.o matrix/matrix.o \ # source/tools.o source/mnorm.o source/wmnorm.o source/transformations.o \ # init.o RoBMA.o testRoBMA.o diff --git a/src/RoBMA.cc b/src/RoBMA.cc index 81fe589b..dc8b4542 100644 --- a/src/RoBMA.cc +++ b/src/RoBMA.cc @@ -6,7 +6,7 @@ #include "distributions/DWN.h" #include "distributions/DWWN1.h" #include "distributions/DWWN2.h" -#include "distributions/DMN.h" +//#include "distributions/DMN.h" #include "distributions/DWMN1.h" #include "distributions/DWMN2.h" #include "distributions/DMNv.h" @@ -42,7 +42,7 @@ namespace jags { insert(new DWN); insert(new DWWN1); insert(new DWWN2); - insert(new DMN); + //insert(new DMN); insert(new DWMN1); insert(new DWMN2); insert(new DMNv); diff --git a/src/matrix/matrix.cc b/src/matrix/matrix.cc new file mode 100644 index 00000000..4c3ea53e --- /dev/null +++ b/src/matrix/matrix.cc @@ -0,0 +1,184 @@ +#include +#include +#include +#include +#include "matrix.h" + +#include + +/* lapack prototypes */ +extern "C" +{ + void dsyev_(const char* jobz, const char* uplo, int* n, double* a, int* lda, + double* w, double* work, int* lwork, int* info ); + + void dgesv_(const int* n, const int* nrhs, double* a, + const int* lda, int* ipiv, double* b, const int* ldb, + int* info); + + void dpotrf_(const char *uplo, const int *n, double *a, + const int *lda, const int *info); + + void dpotri_(const char *uplo, const int *n, double *a, + const int *lda, const int *info); +} + +/* adapted from the BUGS module from JAGS*/ + +namespace jags { +namespace RoBMA { + +double logdet(double const *a, int n) +{ + // Log determinant of n x n symmetric positive matrix a */ + + int N = n*n; + double *acopy = new double[N]; + for (int i = 0; i < N; i++) { + acopy[i] = a[i]; + } + + double *w = new double[n]; + int lwork = -1; + double worktest = 0; + int info = 0; + dsyev_("N","U", &n, acopy, &n, w, &worktest, &lwork, &info); + if (info != 0) { + delete [] acopy; + delete [] w; + throwRuntimeError("unable to calculate workspace size for dsyev"); + } + lwork = static_cast(worktest); + double *work = new double[lwork]; + dsyev_("N","U", &n, acopy, &n, w, work, &lwork, &info); + delete [] acopy; + delete [] work; + if (info != 0) { + delete [] w; + throwRuntimeError("unable to calculate eigenvalues in dsyev"); + } + + if (w[0] <= 0) { + throwRuntimeError("Non positive definite matrix in call to logdet"); + } + + double logdet = 0; + for (int i = 0; i < n; i++) { + logdet += std::log(w[i]); + } + delete [] w; + + return logdet; +} + +bool check_symmetric_ispd(double const *a, int n) +{ + /* Checks that an n x n symmetric matrix is positive definite. + The code is essentially the same as logdet, but we return + false if the smallest eigenvalue is less than zero. + */ + + int N = n*n; + std::vector acopy(N); + std::copy(a, a+N, acopy.begin()); + + //Workspace query to get optimal workspace + std::vector w(n); + int lwork = -1; + double worktest = 0; + int info = 0; + dsyev_("N","U", &n, &acopy[0], &n, &w[0], &worktest, &lwork, &info); + if (info != 0) { + throwRuntimeError("unable to calculate workspace size for dsyev"); + } + lwork = static_cast(worktest); + std::vector work(lwork); + + //Calculate eigenvalues + dsyev_("N","U", &n, &acopy[0], &n, &w[0], &work[0], &lwork, &info); + if (info != 0) { + throwRuntimeError("unable to calculate eigenvalues in dsyev"); + } + + return w[0] > 0; +} + + +bool inverse_spd (double *X, double const *A, int n) +{ + /* invert n x n symmetric positive definite matrix A. Put result in X*/ + + int N = n*n; + double *Acopy = new double[N]; + for (int i = 0; i < N; i++) { + Acopy[i] = A[i]; + } + + int info = 0; + dpotrf_("L", &n, Acopy, &n, &info); + if (info < 0) { + throwLogicError("Illegal argument in inverse_spd"); + } + else if (info > 0) { + delete [] Acopy; + throwRuntimeError("Cannot invert matrix: not positive definite"); + } + dpotri_("L", &n, Acopy, &n, &info); + + for (int i = 0; i < n; ++i) { + X[i*n + i] = Acopy[i*n + i]; + for (int j = 0; j < i; ++j) { + X[i*n + j] = X[j*n + i] = Acopy[j*n + i]; + } + } + delete [] Acopy; + + if (info != 0) { + throwRuntimeError("Unable to invert symmetric positive definite matrix"); + } + return true; +} + + +bool inverse (double *X, double const *A, int n) +{ + /* invert n x n matrix A. Put result in X */ + + int N = n*n; + double *Acopy = new double[N]; + for (int i = 0; i < N; i++) { + Acopy[i] = A[i]; + X[i] = 0; + } + for (int i = 0; i < n; i++) { + X[i*n + i] = 1; + } + + int info = 0; + int *ipiv = new int[n]; + dgesv_(&n, &n, Acopy, &n, ipiv, X, &n, &info); + + delete [] ipiv; + delete [] Acopy; + + if (info != 0) { + return false; + } + return true; +} + +bool check_symmetry(double const *x, unsigned int n, double tol) +{ + for (unsigned int i = 1; i < n; ++i) { + double const *xp = x + i; + double const *yp = x + n*i; + for (unsigned int j = 0; j < i; ++j) { + if (std::fabs(*xp - *yp) > tol) return false; + xp += n; + yp++; + } + } + return true; +} + +}} diff --git a/src/matrix/matrix.h b/src/matrix/matrix.h new file mode 100644 index 00000000..82770cc1 --- /dev/null +++ b/src/matrix/matrix.h @@ -0,0 +1,66 @@ +#ifndef MATRIX_H_ +#define MATRIX_H_ + +namespace jags { +namespace RoBMA { + +/** + * Inverts a general square matrix using the LAPACK routine DGESV + * + * @param X Pointer to an array of length n squared, which will contain + * the inverse on exit. + * + * @param A pointer to array containing the values of the matrix + * + * @param n number or rows or columns in the matrix + */ +bool inverse (double *X, double const *A, int n); + +/** + * Inverts a symmetrix positive definite matrix by Cholesky + * decomposition using the LAPACK routines DPOTRF and DPOTRI. + * + * @param X Pointer to an array of length n squared, which will contain + * the inverse on exit. + * + * @param A pointer to array containing the values of the matrix. Only + * the lower triangle of the matrix (in column-major order) is used. + * + * @param n number or rows or columns in the matrix + */ +bool inverse_spd (double *X, double const *A, int n); + +/** + * Checks whether a symmetric matrix is positive definite + * + * @param A pointer to array containing the values of the matrix. Only + * the lower triangle (in column-major order) is used. + * + * @param n number or rows or columns in the matrix + */ +bool check_symmetric_ispd(double const *a, int n); + +/** + * Log determinant of a symmetric positive definite matrix + * + * @param A pointer to array containing the values of the matrix. Only + * the lower triangle (in column-major order) is used. + * + * @param n number or rows or columns in the matrix + */ +double logdet(double const *A, int n); + +/** + * Checks the symmetry of a square matrix + * + * @param A pointer to array containing the values of the matrix + * + * @param n number or rows or columns in the matrix + * + * @param tol tolerance for symmetry test + */ +bool check_symmetry(double const *X, unsigned int n, double tol=1e-7); + +}} + +#endif /* MATRIX_H_ */ diff --git a/src/source/mnorm.cc b/src/source/mnorm.cc index 2b40a65e..bc00ecfe 100644 --- a/src/source/mnorm.cc +++ b/src/source/mnorm.cc @@ -8,6 +8,10 @@ #include #include #include +#include +#include "../matrix/matrix.h" + +using namespace std; // wrapper around the mvtnorm package double cpp_mnorm_cdf(double *lower, double *upper, int *infin, double *mu, double *sigma_stdev, double *sigma_corr, int K) @@ -58,203 +62,24 @@ double cpp_mnorm_cdf(double *lower, double *upper, int *infin, double *mu, doubl return value; } +// adapted from the BUGS module from JAGS double cpp_mnorm_lpdf(double const *x, double const *mu, double const *sigma, const int K) { - double * sigma_chol; - double * chol_inv; - sigma_chol = new double [K*K]; - chol_inv = new double [K*K]; - - for(int i = 0; i < K*K; i++){ - *(sigma_chol + i) = 0; - *(chol_inv + i) = 0; - } - - // lower triangle cholesky decomposition - chol(&sigma[0], K, sigma_chol); + vector T(K * K); + jags::RoBMA::inverse_spd (&T[0], sigma, K); - // inverse - inverse(&sigma_chol[0], K, chol_inv); - - // product of the diagonal ellements - double diag_prod = 0; - for(int i = 0; i < K; i++){ - diag_prod += std::log(*(chol_inv+K*i+i)); - } - - // standardized means? (based on arma::inplace_tri_mat_mult) - double * z; - z = new double [K]; - for(int i = 0; i < K; i++){ - *(z+i) = 0; - } - for(int i = 0; i < K; i++){ - *(z+i) += *(x+i) - *(mu+i); - } - for(int i = K - 1; i >= 0; i--){ - double temp = 0; - for(int j = 0; j <= i; j++){ - temp += *(chol_inv+K*i+j) * *(z+j); + double loglik = 0; + vector delta(K); + for (int i = 0; i < K; ++i) { + delta[i] = x[i] - mu[i]; + loglik -= (delta[i] * T[i + i * K] * delta[i])/2; + for (int j = 0; j < i; ++j) { + loglik -= delta[i] * T[i + j * K] * delta[j]; } - *(z+i) = temp; - } - - // log lik - double log_lik = 0; - for(int i = 0; i < K; i++){ - log_lik += std::pow(*(z+i), 2); } - log_lik = - 0.5 * log_lik + diag_prod - K * 0.9189385; - - // clean the memory - delete[] sigma_chol; - delete[] chol_inv; - delete[] z; - - return log_lik; -} + loglik -= jags::RoBMA::logdet(sigma, K)/2 + K * M_LN_SQRT_2PI; -// based on: https://www.geeksforgeeks.org/cholesky-decomposition-matrix-decomposition/ -void chol(double const *matrix, const int n, double *lower) -{ - // Decomposing a matrix into Lower Triangular - for (int i = 0; i < n; i++) { - for (int j = 0; j <= i; j++) { - double sum = 0; - - if (j == i) { - // summation for diagnols - for (int k = 0; k < j; k++) - sum += std::pow(lower[n*j+k], 2); - lower[n*j+j] = std::sqrt(*(matrix+n*j+j) - sum); - } else { - // Evaluating L(i, j) using L(j, j) - for (int k = 0; k < j; k++) - sum += (lower[n*i+k] * lower[n*j+k]); - lower[n*i+j] = (*(matrix+n*i+j) - sum) / lower[n*j+j]; - } - } - } + return loglik; } - -void cofactor(double const *matrix, double *temp, int p, int q, int n, int const K) -{ - int i = 0, j = 0; - - // Looping for each element of the matrix - for (int row = 0; row < n; row++){ - for (int col = 0; col < n; col++){ - // Copying into temporary matrix only those element - // which are not in given row and column - if (row != p && col != q){ - temp[K*i+j++] = *(matrix+K*row+col); - - // Row is filled, so increase row index and - // reset col index - if (j == n - 1){ - j = 0; - i++; - } - } - } - } -} - -// Recursive function for finding determinant of matrix. -// n is current dimension of A[][]. -double determinant(double const *matrix, int n, int const K) -{ - double D = 0; // Initialize result - - // Base case : if matrix contains single element - if (n == 1) - return *matrix; - - // To store cofactors - double * temp; - temp = new double [K*K]; - - int sign = 1; // To store sign multiplier - - // Iterate for each element of first row - for (int f = 0; f < n; f++){ - // Getting Cofactor of A[0][f] - cofactor(&matrix[0], temp, 0, f, n, K); - D += sign * *(matrix+f) * determinant(temp, n - 1, K); - - // terms are to be added with alternate sign - sign = -sign; - } - - // clean the memory - delete[] temp; - - return D; -} - -// Function to get adjoint of A[N][N] in adj[N][N]. -void adjoint(double const *matrix, double *adj, int const K) -{ - if (K == 1) - { - *adj = 1; - return; - } - - // temp is used to store cofactors of A[][] - int sign = 1; - - double * temp; - temp = new double [K*K]; - - for (int i=0; i (or 0-length row.names)" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 2 Parameter prior distributions" +" Prior prob. 0.062 (mu) intercept ~ Spike(0) " +" log(marglik) -9.89 (mu) mod_cat ~ mean difference contrast: mSpike(0)" +" Post. prob. 0.000 (mu) mod_con ~ Spike(0) " +" Inclusion BF 0.000 tau ~ InvGamma(1, 0.15) " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"tau 0.800 0.155 0.565 0.776 1.164 0.00162 0.010 9090 1.000" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 3 Parameter prior distributions" +" Prior prob. 0.062 (mu) intercept ~ Normal(0, 1) " +" log(marglik) -489.55 (mu) mod_cat ~ mean difference contrast: mSpike(0)" +" Post. prob. 0.000 (mu) mod_con ~ Spike(0) " +" Inclusion BF 0.000 tau ~ Spike(0) " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"intercept 0.000 0.025 -0.050 0.000 0.048 0.00025 0.010 9932 1.001" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 4 Parameter prior distributions" +" Prior prob. 0.062 (mu) intercept ~ Normal(0, 1) " +" log(marglik) -11.49 (mu) mod_cat ~ mean difference contrast: mSpike(0)" +" Post. prob. 0.000 (mu) mod_con ~ Spike(0) " +" Inclusion BF 0.000 tau ~ InvGamma(1, 0.15) " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"intercept 0.000 0.210 -0.420 0.001 0.417 0.00218 0.010 9267 1.001" +"tau 0.826 0.165 0.574 0.802 1.219 0.00180 0.011 8365 1.000" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 5 Parameter prior distributions" +" Prior prob. 0.062 (mu) intercept ~ Spike(0) " +" log(marglik) 18.06 (mu) mod_cat ~ mean difference contrast: mNormal(0, 0.25)" +" Post. prob. 0.476 (mu) mod_con ~ Spike(0) " +" Inclusion BF 13.608 tau ~ Spike(0) " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"mod_cat [dif: A] -0.945 0.034 -1.010 -0.945 -0.877 0.00028 0.008 1880 1.002" +"mod_cat [dif: B] 0.000 0.035 -0.070 0.000 0.068 0.00029 0.008 1773 1.002" +"mod_cat [dif: C] 0.945 0.035 0.874 0.945 1.013 0.00029 0.008 1703 1.002" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 6 Parameter prior distributions" +" Prior prob. 0.062 (mu) intercept ~ Spike(0) " +" log(marglik) 14.62 (mu) mod_cat ~ mean difference contrast: mNormal(0, 0.25)" +" Post. prob. 0.015 (mu) mod_con ~ Spike(0) " +" Inclusion BF 0.233 tau ~ InvGamma(1, 0.15) " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"mod_cat [dif: A] -0.940 0.040 -1.020 -0.941 -0.865 0.00032 0.008 1906 1.004" +"mod_cat [dif: B] 0.001 0.040 -0.078 0.000 0.079 0.00033 0.008 1837 1.001" +"mod_cat [dif: C] 0.939 0.040 0.861 0.940 1.017 0.00032 0.008 1950 1.002" +"tau 0.049 0.019 0.022 0.045 0.093 0.00022 0.012 7219 1.001" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 7 Parameter prior distributions" +" Prior prob. 0.062 (mu) intercept ~ Normal(0, 1) " +" log(marglik) 14.36 (mu) mod_cat ~ mean difference contrast: mNormal(0, 0.25)" +" Post. prob. 0.012 (mu) mod_con ~ Spike(0) " +" Inclusion BF 0.180 tau ~ Spike(0) " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"intercept 0.000 0.025 -0.048 0.000 0.049 0.00025 0.010 9865 1.000" +"mod_cat [dif: A] -0.946 0.035 -1.013 -0.946 -0.877 0.00028 0.008 1817 1.003" +"mod_cat [dif: B] 0.000 0.036 -0.071 0.002 0.069 0.00029 0.008 1836 1.003" +"mod_cat [dif: C] 0.946 0.035 0.875 0.946 1.015 0.00029 0.008 1825 1.003" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 8 Parameter prior distributions" +" Prior prob. 0.062 (mu) intercept ~ Normal(0, 1) " +" log(marglik) 11.05 (mu) mod_cat ~ mean difference contrast: mNormal(0, 0.25)" +" Post. prob. 0.000 (mu) mod_con ~ Spike(0) " +" Inclusion BF 0.006 tau ~ InvGamma(1, 0.15) " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"intercept 0.000 0.028 -0.055 0.000 0.057 0.00028 0.010 9929 1.000" +"mod_cat [dif: A] -0.939 0.040 -1.013 -0.940 -0.860 0.00032 0.008 1777 1.002" +"mod_cat [dif: B] 0.001 0.040 -0.078 0.003 0.079 0.00033 0.008 1693 1.000" +"mod_cat [dif: C] 0.937 0.040 0.859 0.938 1.013 0.00032 0.008 1844 1.003" +"tau 0.050 0.020 0.022 0.047 0.098 0.00026 0.013 5937 1.000" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 9 Parameter prior distributions" +" Prior prob. 0.062 (mu) intercept ~ Spike(0) " +" log(marglik) -28.92 (mu) mod_cat ~ mean difference contrast: mSpike(0)" +" Post. prob. 0.000 (mu) mod_con ~ Normal(0, 0.25) " +" Inclusion BF 0.000 tau ~ Spike(0) " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"mod_con 0.764 0.025 0.714 0.764 0.812 0.00026 0.010 9167 1.001" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 10 Parameter prior distributions" +" Prior prob. 0.062 (mu) intercept ~ Spike(0) " +" log(marglik) 2.10 (mu) mod_cat ~ mean difference contrast: mSpike(0)" +" Post. prob. 0.000 (mu) mod_con ~ Normal(0, 0.25) " +" Inclusion BF 0.000 tau ~ InvGamma(1, 0.15) " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"mod_con 0.704 0.077 0.540 0.707 0.844 0.00091 0.012 7137 1.000" +"tau 0.263 0.063 0.167 0.254 0.410 0.00073 0.012 7513 1.000" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 11 Parameter prior distributions" +" Prior prob. 0.062 (mu) intercept ~ Normal(0, 1) " +" log(marglik) -32.61 (mu) mod_cat ~ mean difference contrast: mSpike(0)" +" Post. prob. 0.000 (mu) mod_con ~ Normal(0, 0.25) " +" Inclusion BF 0.000 tau ~ Spike(0) " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"intercept 0.000 0.025 -0.049 0.000 0.050 0.00026 0.011 8926 1.000" +"mod_con 0.764 0.025 0.714 0.764 0.814 0.00026 0.010 9602 1.000" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 12 Parameter prior distributions" +" Prior prob. 0.062 (mu) intercept ~ Normal(0, 1) " +" log(marglik) -0.53 (mu) mod_cat ~ mean difference contrast: mSpike(0)" +" Post. prob. 0.000 (mu) mod_con ~ Normal(0, 0.25) " +" Inclusion BF 0.000 tau ~ InvGamma(1, 0.15) " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"intercept 0.000 0.076 -0.151 0.000 0.150 0.00082 0.011 8631 1.000" +"mod_con 0.699 0.082 0.519 0.703 0.847 0.00098 0.012 7022 1.000" +"tau 0.277 0.069 0.175 0.266 0.443 0.00092 0.013 5633 1.000" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 13 Parameter prior distributions" +" Prior prob. 0.062 (mu) intercept ~ Spike(0) " +" log(marglik) 18.03 (mu) mod_cat ~ mean difference contrast: mNormal(0, 0.25)" +" Post. prob. 0.463 (mu) mod_con ~ Normal(0, 0.25) " +" Inclusion BF 12.936 tau ~ Spike(0) " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"mod_cat [dif: A] -0.822 0.083 -0.983 -0.821 -0.660 0.00046 0.006 501 1.002" +"mod_cat [dif: B] 0.001 0.035 -0.068 0.001 0.071 0.00020 0.006 3855 1.000" +"mod_cat [dif: C] 0.821 0.084 0.661 0.820 0.984 0.00046 0.006 520 1.001" +"mod_con 0.112 0.069 -0.025 0.112 0.248 0.00300 0.043 536 1.001" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 14 Parameter prior distributions" +" Prior prob. 0.062 (mu) intercept ~ Spike(0) " +" log(marglik) 14.96 (mu) mod_cat ~ mean difference contrast: mNormal(0, 0.25)" +" Post. prob. 0.022 (mu) mod_con ~ Normal(0, 0.25) " +" Inclusion BF 0.330 tau ~ InvGamma(1, 0.15) " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"mod_cat [dif: A] -0.790 0.095 -0.968 -0.792 -0.586 0.00055 0.006 405 1.001" +"mod_cat [dif: B] 0.001 0.041 -0.080 0.001 0.084 0.00024 0.006 3502 1.001" +"mod_cat [dif: C] 0.789 0.095 0.593 0.791 0.964 0.00055 0.006 457 1.002" +"mod_con 0.137 0.078 -0.010 0.135 0.300 0.00349 0.044 505 1.001" +"tau 0.054 0.023 0.022 0.049 0.109 0.00032 0.014 5028 1.000" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 15 Parameter prior distributions" +" Prior prob. 0.062 (mu) intercept ~ Normal(0, 1) " +" log(marglik) 14.34 (mu) mod_cat ~ mean difference contrast: mNormal(0, 0.25)" +" Post. prob. 0.012 (mu) mod_con ~ Normal(0, 0.25) " +" Inclusion BF 0.175 tau ~ Spike(0) " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"intercept 0.000 0.025 -0.049 0.000 0.050 0.00018 0.007 19835 1.000" +"mod_cat [dif: A] -0.819 0.083 -0.981 -0.816 -0.661 0.00046 0.006 492 1.007" +"mod_cat [dif: B] 0.000 0.035 -0.070 0.000 0.069 0.00019 0.006 4166 1.001" +"mod_cat [dif: C] 0.819 0.083 0.662 0.817 0.981 0.00046 0.006 529 1.008" +"mod_con 0.114 0.069 -0.022 0.115 0.246 0.00297 0.043 544 1.004" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 16 Parameter prior distributions" +" Prior prob. 0.062 (mu) intercept ~ Normal(0, 1) " +" log(marglik) 11.42 (mu) mod_cat ~ mean difference contrast: mNormal(0, 0.25)" +" Post. prob. 0.001 (mu) mod_con ~ Normal(0, 0.25) " +" Inclusion BF 0.009 tau ~ InvGamma(1, 0.15) " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"intercept 0.000 0.030 -0.058 0.000 0.060 0.00019 0.006 23705 1.000" +"mod_cat [dif: A] -0.781 0.104 -0.973 -0.787 -0.559 0.00053 0.005 482 1.008" +"mod_cat [dif: B] 0.001 0.042 -0.083 0.001 0.084 0.00021 0.005 4182 1.001" +"mod_cat [dif: C] 0.780 0.103 0.566 0.786 0.973 0.00052 0.005 463 1.010" +"mod_con 0.143 0.085 -0.016 0.140 0.320 0.00371 0.044 528 1.006" +"tau 0.057 0.026 0.024 0.052 0.122 0.00046 0.018 3097 1.002" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." diff --git a/tests/results/summary.individual/15.txt b/tests/results/summary.individual/15.txt new file mode 100644 index 00000000..4015e01c --- /dev/null +++ b/tests/results/summary.individual/15.txt @@ -0,0 +1,92 @@ +"Call:" +"RoBMA.reg(formula = ~mod_con, data = df_reg, priors = list(mod_con = list(null = prior(\"normal\", " +" list(0, 0.05)), alt = prior(\"normal\", list(0.3, 0.15)))), " +" priors_heterogeneity = NULL, priors_bias = list(prior_weightfunction(distribution = \"two.sided\", " +" parameters = list(alpha = c(1, 1), steps = c(0.05)), " +" prior_weights = 1/2), prior_PET(distribution = \"Cauchy\", " +" parameters = list(0, 1), truncation = list(0, Inf), prior_weights = 1/2)), " +" priors_effect_null = NULL, parallel = TRUE, seed = 1)" +"" +"Robust Bayesian meta-analysis " +" Model 1 Parameter prior distributions" +" Prior prob. 0.250 (mu) intercept ~ Normal(0, 1) " +" log(marglik) -115.99 (mu) mod_con ~ Normal(0, 0.05)" +" Post. prob. 0.000 tau ~ Spike(0) " +" Inclusion BF 0.000 " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"intercept 0.000 0.024 -0.046 0.000 0.047 0.00025 0.010 9623 1.000" +"mod_con 0.760 0.022 0.716 0.760 0.803 0.00023 0.011 8797 1.000" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 2 Parameter prior distributions" +" Prior prob. 0.125 (mu) intercept ~ Normal(0, 1) " +" log(marglik) -115.22 (mu) mod_con ~ Normal(0, 0.05) " +" Post. prob. 0.000 tau ~ Spike(0) " +" Inclusion BF 0.000 omega[two-sided: .05] ~ CumDirichlet(1, 1)" +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"intercept 0.000 0.024 -0.048 0.000 0.048 0.00025 0.010 9849 1.000" +"mod_con 0.759 0.022 0.715 0.759 0.801 0.00023 0.011 8983 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" +"omega[0.05,1] 0.394 0.262 0.036 0.342 0.942 0.00388 0.015 4536 1.000" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 3 Parameter prior distributions" +" Prior prob. 0.125 (mu) intercept ~ Normal(0, 1) " +" log(marglik) -116.13 (mu) mod_con ~ Normal(0, 0.05) " +" Post. prob. 0.000 tau ~ Spike(0) " +" Inclusion BF 0.000 PET ~ Cauchy(0, 1)[0, Inf]" +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"intercept -0.121 0.149 -0.535 -0.078 0.019 0.00568 0.038 686 1.025" +"mod_con 0.760 0.022 0.717 0.760 0.803 0.00009 0.004 53589 1.000" +"PET 1.272 1.548 0.032 0.790 5.632 0.06108 0.039 642 1.001" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 4 Parameter prior distributions" +" Prior prob. 0.250 (mu) intercept ~ Normal(0, 1) " +" log(marglik) 17.07 (mu) mod_con ~ Normal(0.3, 0.15)" +" Post. prob. 0.453 tau ~ Spike(0) " +" Inclusion BF 2.489 " +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"intercept 0.000 0.025 -0.049 0.000 0.048 0.00027 0.011 8710 1.000" +"mod_con 0.924 0.024 0.878 0.924 0.973 0.00025 0.010 9345 1.000" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 5 Parameter prior distributions" +" Prior prob. 0.125 (mu) intercept ~ Normal(0, 1) " +" log(marglik) 17.50 (mu) mod_con ~ Normal(0.3, 0.15) " +" Post. prob. 0.350 tau ~ Spike(0) " +" Inclusion BF 3.774 omega[two-sided: .05] ~ CumDirichlet(1, 1)" +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"intercept 0.000 0.025 -0.048 0.000 0.048 0.00025 0.010 9801 1.000" +"mod_con 0.923 0.024 0.874 0.923 0.970 0.00026 0.011 8856 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" +"omega[0.05,1] 0.446 0.270 0.044 0.415 0.964 0.00404 0.015 4479 1.001" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +" " +" Model 6 Parameter prior distributions" +" Prior prob. 0.125 (mu) intercept ~ Normal(0, 1) " +" log(marglik) 16.93 (mu) mod_con ~ Normal(0.3, 0.15) " +" Post. prob. 0.196 tau ~ Spike(0) " +" Inclusion BF 1.709 PET ~ Cauchy(0, 1)[0, Inf]" +"" +"Parameter estimates:" +" Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" +"intercept -0.123 0.140 -0.548 -0.080 0.017 0.00530 0.038 702 1.012" +"mod_con 0.924 0.024 0.877 0.924 0.971 0.00010 0.004 54313 1.000" +"PET 1.297 1.458 0.036 0.813 5.761 0.05603 0.038 677 1.001" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." diff --git a/tests/results/summary.individual/2.txt b/tests/results/summary.individual/2.txt index 712af721..01fac6d6 100644 --- a/tests/results/summary.individual/2.txt +++ b/tests/results/summary.individual/2.txt @@ -22,7 +22,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.428 0.271 0.045 0.381 0.958 0.00414 0.015 4300 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -35,7 +35,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.1] 0.631 0.237 0.158 0.656 0.983 0.00371 0.016 4094 1.000" "omega[0.1,1] 0.323 0.204 0.045 0.281 0.793 0.00297 0.015 4721 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -62,7 +62,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.227 0.195 0.039 0.173 0.731 0.00234 0.012 6897 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.478 0.271 0.061 0.453 0.969 0.00403 0.015 4515 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -76,7 +76,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.216 0.180 0.041 0.165 0.684 0.00205 0.011 7706 1.001" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.1] 0.653 0.233 0.174 0.685 0.985 0.00369 0.016 4000 1.000" "omega[0.1,1] 0.368 0.217 0.054 0.336 0.845 0.00320 0.015 4615 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -103,7 +103,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.405 0.224 -0.018 0.399 0.860 0.00250 0.011 8062 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.547 0.261 0.086 0.554 0.974 0.00369 0.014 5032 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -117,7 +117,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.377 0.218 -0.026 0.371 0.820 0.00242 0.011 8100 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.1] 0.687 0.221 0.206 0.725 0.989 0.00349 0.016 4028 1.000" "omega[0.1,1] 0.420 0.219 0.071 0.399 0.862 0.00308 0.014 5071 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -146,7 +146,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.381 0.257 -0.108 0.379 0.880 0.00281 0.011 8369 1.000" "tau 0.190 0.179 0.036 0.139 0.627 0.00206 0.011 7577 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.565 0.259 0.099 0.574 0.978 0.00379 0.015 4684 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -161,7 +161,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.358 0.248 -0.106 0.349 0.861 0.00274 0.011 8164 1.001" "tau 0.184 0.164 0.036 0.136 0.609 0.00190 0.012 7394 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.001" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.1] 0.703 0.213 0.235 0.740 0.989 0.00331 0.016 4150 1.000" "omega[0.1,1] 0.433 0.219 0.081 0.415 0.868 0.00317 0.014 4780 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." diff --git a/tests/results/summary.individual/4.txt b/tests/results/summary.individual/4.txt index 89d61ea5..0c535481 100644 --- a/tests/results/summary.individual/4.txt +++ b/tests/results/summary.individual/4.txt @@ -22,7 +22,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.403 0.270 0.038 0.351 0.951 0.00442 0.016 3729 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -35,7 +35,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.001" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.1] 0.624 0.243 0.144 0.649 0.982 0.00384 0.016 3983 1.000" "omega[0.1,1] 0.314 0.209 0.039 0.270 0.800 0.00331 0.016 3975 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -49,7 +49,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.001" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.412 0.274 0.039 0.356 0.959 0.00457 0.017 3593 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -62,7 +62,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.605 0.251 0.121 0.627 0.982 0.00421 0.017 3555 1.000" "omega[0.05,1] 0.261 0.202 0.023 0.205 0.758 0.00361 0.018 3126 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -76,7 +76,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.5] 0.609 0.247 0.135 0.628 0.983 0.00404 0.016 3718 1.000" "omega[0.5,1] 0.206 0.192 0.005 0.145 0.711 0.00362 0.019 2808 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -90,7 +90,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.711 0.211 0.240 0.753 0.990 0.00348 0.016 3679 1.000" "omega[0.05,0.5] 0.440 0.220 0.086 0.421 0.881 0.00362 0.016 3680 1.000" "omega[0.5,1] 0.150 0.149 0.003 0.100 0.557 0.00287 0.019 2698 1.000" @@ -142,7 +142,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.226 0.194 0.039 0.172 0.717 0.00242 0.012 6403 1.002" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.460 0.270 0.056 0.424 0.963 0.00397 0.015 4630 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -156,7 +156,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.213 0.183 0.038 0.162 0.676 0.00216 0.012 7167 1.001" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.1] 0.652 0.234 0.169 0.683 0.985 0.00380 0.016 3780 1.000" "omega[0.1,1] 0.351 0.212 0.050 0.314 0.828 0.00313 0.015 4612 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -171,7 +171,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.243 0.215 0.041 0.184 0.796 0.00249 0.012 7504 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.454 0.271 0.051 0.418 0.963 0.00394 0.015 4749 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -185,7 +185,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.220 0.190 0.040 0.167 0.701 0.00220 0.012 7454 1.001" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.617 0.246 0.135 0.641 0.984 0.00385 0.016 4067 1.000" "omega[0.05,1] 0.290 0.206 0.031 0.240 0.788 0.00322 0.016 4103 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -200,7 +200,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.256 0.238 0.040 0.189 0.824 0.00270 0.011 7777 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.5] 0.634 0.238 0.156 0.662 0.984 0.00380 0.016 3927 1.002" "omega[0.5,1] 0.216 0.196 0.005 0.155 0.728 0.00365 0.019 2878 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -215,7 +215,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.238 0.204 0.039 0.182 0.759 0.00249 0.012 6710 1.001" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.724 0.203 0.268 0.765 0.990 0.00353 0.017 3310 1.001" "omega[0.05,0.5] 0.470 0.220 0.096 0.459 0.898 0.00371 0.017 3509 1.000" "omega[0.5,1] 0.165 0.160 0.004 0.114 0.590 0.00319 0.020 2509 1.000" @@ -269,7 +269,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.405 0.222 -0.015 0.399 0.853 0.00252 0.011 7733 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.533 0.267 0.077 0.529 0.976 0.00398 0.015 4508 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -283,7 +283,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.377 0.217 -0.027 0.371 0.819 0.00256 0.012 7164 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.1] 0.680 0.224 0.201 0.715 0.987 0.00366 0.016 3745 1.000" "omega[0.1,1] 0.407 0.222 0.063 0.383 0.864 0.00339 0.015 4318 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -298,7 +298,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.394 0.236 -0.061 0.393 0.863 0.00274 0.012 7422 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.568 0.259 0.088 0.584 0.977 0.00375 0.014 4771 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -312,7 +312,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.353 0.233 -0.087 0.349 0.817 0.00289 0.012 6475 1.000" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.670 0.228 0.183 0.708 0.986 0.00366 0.016 3891 1.000" "omega[0.05,1] 0.389 0.225 0.048 0.361 0.856 0.00339 0.015 4388 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -327,7 +327,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.365 0.253 -0.145 0.370 0.853 0.00298 0.012 7216 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.5] 0.694 0.220 0.210 0.736 0.989 0.00351 0.016 3945 1.000" "omega[0.5,1] 0.302 0.232 0.008 0.251 0.825 0.00413 0.018 3147 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -342,7 +342,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.325 0.248 -0.156 0.326 0.808 0.00291 0.012 7242 1.000" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.749 0.194 0.297 0.792 0.991 0.00320 0.016 3683 1.000" "omega[0.05,0.5] 0.518 0.214 0.123 0.519 0.905 0.00352 0.016 3702 1.000" "omega[0.5,1] 0.226 0.190 0.005 0.176 0.687 0.00339 0.018 3150 1.000" @@ -398,7 +398,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.382 0.257 -0.107 0.381 0.887 0.00300 0.012 7340 1.000" "tau 0.189 0.176 0.036 0.136 0.656 0.00219 0.012 6456 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.001" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.550 0.262 0.088 0.556 0.975 0.00399 0.015 4310 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -413,7 +413,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.355 0.247 -0.103 0.348 0.850 0.00286 0.012 7409 1.000" "tau 0.182 0.158 0.037 0.136 0.591 0.00197 0.012 6460 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.1] 0.691 0.220 0.212 0.728 0.989 0.00352 0.016 3884 1.000" "omega[0.1,1] 0.418 0.220 0.070 0.397 0.869 0.00335 0.015 4300 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -429,7 +429,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.355 0.285 -0.215 0.364 0.891 0.00354 0.012 6484 1.000" "tau 0.206 0.199 0.038 0.147 0.728 0.00251 0.013 6306 1.002" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.001" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.570 0.262 0.089 0.584 0.981 0.00386 0.015 4605 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -444,7 +444,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.296 0.294 -0.299 0.301 0.842 0.00420 0.014 4922 1.001" "tau 0.206 0.206 0.036 0.146 0.725 0.00286 0.014 5202 1.000" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.001" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.669 0.228 0.179 0.704 0.987 0.00379 0.017 3632 1.002" "omega[0.05,1] 0.383 0.223 0.047 0.354 0.850 0.00351 0.016 4022 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -460,7 +460,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.247 0.364 -0.601 0.284 0.842 0.00546 0.015 4426 1.002" "tau 0.249 0.266 0.038 0.166 0.954 0.00378 0.014 4936 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.5] 0.680 0.226 0.192 0.717 0.988 0.00361 0.016 3903 1.000" "omega[0.5,1] 0.277 0.227 0.007 0.220 0.808 0.00429 0.019 2792 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -476,7 +476,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.204 0.365 -0.619 0.236 0.823 0.00573 0.016 4062 1.001" "tau 0.242 0.244 0.038 0.167 0.851 0.00358 0.015 4649 1.000" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.746 0.196 0.286 0.790 0.991 0.00332 0.017 3471 1.000" "omega[0.05,0.5] 0.511 0.216 0.119 0.511 0.904 0.00374 0.017 3337 1.000" "omega[0.5,1] 0.197 0.178 0.004 0.144 0.650 0.00341 0.019 2735 1.000" diff --git a/tests/results/summary.individual/5.txt b/tests/results/summary.individual/5.txt index d5f8da6a..d03f4f4a 100644 --- a/tests/results/summary.individual/5.txt +++ b/tests/results/summary.individual/5.txt @@ -23,7 +23,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.387 0.272 0.033 0.328 0.953 0.00440 0.016 3806 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -36,7 +36,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.1] 0.618 0.247 0.131 0.643 0.983 0.00404 0.016 3723 1.002" "omega[0.1,1] 0.303 0.208 0.035 0.255 0.796 0.00344 0.017 3664 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -50,7 +50,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.391 0.270 0.036 0.333 0.950 0.00431 0.016 3941 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -63,7 +63,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.597 0.259 0.109 0.622 0.982 0.00430 0.017 3625 1.000" "omega[0.05,1] 0.252 0.201 0.020 0.194 0.756 0.00346 0.017 3383 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -77,7 +77,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.5] 0.602 0.251 0.125 0.621 0.982 0.00400 0.016 3942 1.000" "omega[0.5,1] 0.203 0.188 0.005 0.144 0.704 0.00382 0.020 2428 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -91,7 +91,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.703 0.214 0.234 0.743 0.989 0.00356 0.017 3616 1.000" "omega[0.05,0.5] 0.432 0.222 0.078 0.412 0.879 0.00377 0.017 3473 1.000" "omega[0.5,1] 0.150 0.150 0.003 0.101 0.568 0.00303 0.020 2467 1.001" @@ -143,7 +143,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.227 0.190 0.040 0.173 0.707 0.00220 0.012 7425 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.001" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.442 0.274 0.044 0.405 0.960 0.00418 0.015 4293 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -157,7 +157,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.211 0.185 0.037 0.162 0.675 0.00234 0.013 6217 1.001" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.1] 0.640 0.238 0.161 0.668 0.985 0.00385 0.016 3822 1.001" "omega[0.1,1] 0.338 0.214 0.045 0.298 0.828 0.00326 0.015 4314 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -172,7 +172,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.245 0.219 0.041 0.186 0.783 0.00249 0.011 7694 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.447 0.271 0.047 0.412 0.959 0.00390 0.014 4831 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -186,7 +186,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.223 0.193 0.039 0.170 0.724 0.00233 0.012 6862 1.001" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.617 0.248 0.129 0.643 0.985 0.00413 0.017 3605 1.001" "omega[0.05,1] 0.283 0.205 0.029 0.234 0.779 0.00325 0.016 3974 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -201,7 +201,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.259 0.236 0.042 0.196 0.819 0.00276 0.012 7312 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.5] 0.637 0.239 0.159 0.667 0.984 0.00395 0.017 3661 1.000" "omega[0.5,1] 0.214 0.194 0.005 0.155 0.717 0.00337 0.017 3298 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -216,7 +216,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.239 0.208 0.040 0.183 0.757 0.00241 0.012 7433 1.000" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.718 0.207 0.252 0.756 0.990 0.00346 0.017 3583 1.000" "omega[0.05,0.5] 0.461 0.219 0.092 0.448 0.890 0.00362 0.016 3676 1.000" "omega[0.5,1] 0.158 0.152 0.004 0.109 0.558 0.00280 0.018 2933 1.000" @@ -270,7 +270,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.395 0.211 0.000 0.388 0.824 0.00245 0.012 7451 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.001" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.518 0.270 0.068 0.515 0.973 0.00404 0.015 4479 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -284,7 +284,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.372 0.209 -0.013 0.364 0.806 0.00238 0.011 7688 1.001" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.1] 0.672 0.228 0.187 0.709 0.988 0.00370 0.016 3799 1.001" "omega[0.1,1] 0.398 0.222 0.059 0.370 0.858 0.00332 0.015 4493 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -299,7 +299,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.394 0.220 -0.031 0.392 0.825 0.00262 0.012 7045 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.564 0.262 0.086 0.579 0.977 0.00369 0.014 5027 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -313,7 +313,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.348 0.223 -0.078 0.345 0.798 0.00279 0.013 6384 1.001" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.671 0.230 0.178 0.709 0.988 0.00375 0.016 3773 1.000" "omega[0.05,1] 0.383 0.226 0.046 0.351 0.857 0.00359 0.016 3975 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -328,7 +328,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.363 0.242 -0.118 0.366 0.828 0.00277 0.011 7654 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.5] 0.685 0.221 0.208 0.724 0.988 0.00346 0.016 4076 1.001" "omega[0.5,1] 0.301 0.228 0.009 0.254 0.812 0.00376 0.016 3678 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -343,7 +343,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.322 0.237 -0.150 0.326 0.779 0.00281 0.012 7110 1.000" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.746 0.195 0.291 0.791 0.992 0.00322 0.017 3668 1.001" "omega[0.05,0.5] 0.505 0.217 0.114 0.504 0.907 0.00352 0.016 3778 1.001" "omega[0.5,1] 0.219 0.185 0.006 0.170 0.669 0.00328 0.018 3177 1.004" @@ -399,7 +399,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.373 0.249 -0.101 0.369 0.861 0.00283 0.011 7764 1.000" "tau 0.189 0.168 0.036 0.139 0.636 0.00198 0.012 7242 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.533 0.265 0.078 0.531 0.974 0.00401 0.015 4380 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -414,7 +414,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.350 0.241 -0.098 0.343 0.833 0.00271 0.011 7911 1.001" "tau 0.181 0.161 0.037 0.133 0.592 0.00192 0.012 7061 1.001" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.1] 0.679 0.226 0.199 0.715 0.989 0.00367 0.016 3791 1.001" "omega[0.1,1] 0.406 0.218 0.068 0.381 0.856 0.00318 0.015 4715 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -430,7 +430,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.352 0.276 -0.216 0.360 0.866 0.00340 0.012 6567 1.000" "tau 0.207 0.208 0.038 0.147 0.727 0.00242 0.012 7362 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.001" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.565 0.261 0.087 0.581 0.977 0.00379 0.015 4734 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -445,7 +445,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.305 0.282 -0.262 0.312 0.822 0.00373 0.013 5714 1.003" "tau 0.199 0.193 0.036 0.143 0.693 0.00255 0.013 5692 1.001" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.674 0.227 0.188 0.710 0.987 0.00347 0.015 4270 1.001" "omega[0.05,1] 0.390 0.225 0.045 0.364 0.857 0.00333 0.015 4565 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -461,7 +461,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.248 0.356 -0.584 0.283 0.843 0.00530 0.015 4515 1.001" "tau 0.248 0.254 0.039 0.169 0.901 0.00364 0.014 4892 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.5] 0.680 0.224 0.197 0.717 0.987 0.00345 0.015 4232 1.001" "omega[0.5,1] 0.270 0.227 0.006 0.208 0.804 0.00403 0.018 3174 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -477,7 +477,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.215 0.354 -0.620 0.248 0.814 0.00563 0.016 3956 1.004" "tau 0.239 0.244 0.037 0.163 0.874 0.00357 0.015 4691 1.002" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.743 0.198 0.282 0.787 0.991 0.00340 0.017 3388 1.000" "omega[0.05,0.5] 0.509 0.217 0.117 0.507 0.904 0.00368 0.017 3488 1.001" "omega[0.5,1] 0.205 0.184 0.004 0.151 0.668 0.00348 0.019 2807 1.002" diff --git a/tests/results/summary.individual/6.txt b/tests/results/summary.individual/6.txt index 339c3205..d5a03124 100644 --- a/tests/results/summary.individual/6.txt +++ b/tests/results/summary.individual/6.txt @@ -23,7 +23,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.428 0.271 0.045 0.381 0.958 0.00414 0.015 4300 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -36,7 +36,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.1] 0.631 0.237 0.158 0.656 0.983 0.00371 0.016 4094 1.000" "omega[0.1,1] 0.323 0.204 0.045 0.281 0.793 0.00297 0.015 4721 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -50,7 +50,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.421 0.271 0.044 0.372 0.957 0.00415 0.015 4248 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -63,7 +63,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.606 0.249 0.126 0.627 0.982 0.00410 0.016 3679 1.000" "omega[0.05,1] 0.271 0.203 0.028 0.216 0.776 0.00339 0.017 3576 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -77,7 +77,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.5] 0.616 0.243 0.145 0.639 0.984 0.00377 0.016 4138 1.000" "omega[0.5,1] 0.210 0.192 0.005 0.150 0.710 0.00347 0.018 3065 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -91,7 +91,7 @@ "" "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.001" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.707 0.211 0.243 0.745 0.988 0.00343 0.016 3774 1.000" "omega[0.05,0.5] 0.446 0.216 0.090 0.429 0.877 0.00341 0.016 4026 1.002" "omega[0.5,1] 0.153 0.152 0.003 0.103 0.571 0.00326 0.021 2187 1.001" @@ -143,7 +143,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.227 0.195 0.039 0.173 0.731 0.00234 0.012 6897 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.478 0.271 0.061 0.453 0.969 0.00403 0.015 4515 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -157,7 +157,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.216 0.180 0.041 0.165 0.684 0.00205 0.011 7706 1.001" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.1] 0.653 0.233 0.174 0.685 0.985 0.00369 0.016 4000 1.000" "omega[0.1,1] 0.368 0.217 0.054 0.336 0.845 0.00320 0.015 4615 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -172,7 +172,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.245 0.213 0.039 0.185 0.788 0.00246 0.012 7507 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.462 0.270 0.055 0.428 0.962 0.00404 0.015 4475 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -186,7 +186,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.228 0.195 0.040 0.171 0.730 0.00230 0.012 7133 1.000" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.625 0.243 0.144 0.655 0.984 0.00397 0.016 3744 1.000" "omega[0.05,1] 0.306 0.208 0.035 0.259 0.793 0.00319 0.015 4251 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -201,7 +201,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.259 0.235 0.042 0.194 0.851 0.00270 0.011 7566 1.001" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.5] 0.644 0.237 0.165 0.673 0.987 0.00379 0.016 3907 1.001" "omega[0.5,1] 0.222 0.200 0.006 0.160 0.742 0.00381 0.019 2749 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -216,7 +216,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "tau 0.243 0.215 0.040 0.184 0.773 0.00240 0.011 8021 1.000" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.731 0.200 0.275 0.772 0.991 0.00324 0.016 3823 1.003" "omega[0.05,0.5] 0.478 0.217 0.109 0.467 0.894 0.00355 0.016 3721 1.001" "omega[0.5,1] 0.167 0.158 0.004 0.118 0.588 0.00295 0.019 2884 1.003" @@ -270,7 +270,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu -0.405 0.221 -0.849 -0.401 0.010 0.00257 0.012 7436 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.001" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.540 0.264 0.084 0.542 0.975 0.00378 0.014 4852 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -284,7 +284,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu -0.375 0.217 -0.817 -0.370 0.026 0.00255 0.012 7228 1.001" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.001" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.1] 0.685 0.222 0.209 0.724 0.988 0.00352 0.016 3990 1.003" "omega[0.1,1] 0.418 0.220 0.072 0.395 0.870 0.00316 0.014 4838 1.002" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -299,7 +299,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu -0.405 0.235 -0.869 -0.405 0.050 0.00278 0.012 7137 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.582 0.259 0.099 0.599 0.981 0.00401 0.015 4169 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -313,7 +313,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu -0.352 0.235 -0.819 -0.346 0.094 0.00287 0.012 6687 1.000" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.676 0.226 0.191 0.712 0.987 0.00366 0.016 3808 1.000" "omega[0.05,1] 0.396 0.223 0.053 0.370 0.856 0.00346 0.016 4159 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -328,7 +328,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu -0.364 0.254 -0.845 -0.369 0.148 0.00299 0.012 7216 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.001" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.5] 0.692 0.220 0.205 0.728 0.988 0.00349 0.016 3965 1.000" "omega[0.5,1] 0.304 0.232 0.009 0.255 0.823 0.00381 0.016 3699 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -343,7 +343,7 @@ "Parameter estimates:" " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu -0.327 0.252 -0.807 -0.331 0.180 0.00316 0.013 6357 1.000" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.752 0.192 0.300 0.796 0.992 0.00332 0.017 3359 1.001" "omega[0.05,0.5] 0.520 0.215 0.126 0.523 0.913 0.00360 0.017 3567 1.001" "omega[0.5,1] 0.231 0.190 0.006 0.183 0.685 0.00325 0.017 3425 1.001" @@ -399,7 +399,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu -0.383 0.262 -0.905 -0.382 0.120 0.00302 0.012 7509 1.000" "tau 0.190 0.181 0.036 0.138 0.656 0.00212 0.012 7276 1.001" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.560 0.260 0.097 0.569 0.979 0.00380 0.015 4697 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -414,7 +414,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu -0.357 0.256 -0.872 -0.354 0.116 0.00306 0.012 7033 1.000" "tau 0.188 0.171 0.037 0.139 0.621 0.00214 0.013 6374 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.1] 0.699 0.214 0.231 0.734 0.988 0.00356 0.017 3606 1.000" "omega[0.1,1] 0.428 0.219 0.080 0.408 0.873 0.00339 0.015 4168 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -430,7 +430,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu -0.354 0.286 -0.895 -0.358 0.215 0.00350 0.012 6657 1.000" "tau 0.211 0.214 0.039 0.148 0.743 0.00251 0.012 7256 1.001" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,1] 0.583 0.258 0.100 0.604 0.980 0.00377 0.015 4675 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" @@ -445,7 +445,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu -0.304 0.284 -0.841 -0.309 0.272 0.00376 0.013 5705 1.000" "tau 0.205 0.211 0.038 0.146 0.705 0.00251 0.012 7040 1.000" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.682 0.224 0.202 0.719 0.988 0.00371 0.017 3654 1.001" "omega[0.05,1] 0.401 0.225 0.056 0.376 0.861 0.00343 0.015 4312 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -461,7 +461,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu -0.251 0.370 -0.868 -0.285 0.589 0.00579 0.016 4075 1.000" "tau 0.247 0.257 0.036 0.166 0.907 0.00374 0.015 4707 1.000" -"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.000" +"omega[0,0.05] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.05,0.5] 0.688 0.221 0.204 0.726 0.988 0.00342 0.016 4147 1.001" "omega[0.5,1] 0.281 0.228 0.006 0.222 0.799 0.00436 0.019 2730 1.000" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." @@ -477,7 +477,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu -0.217 0.367 -0.834 -0.246 0.607 0.00550 0.015 4442 1.000" "tau 0.242 0.257 0.039 0.164 0.881 0.00366 0.014 4944 1.001" -"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.001" +"omega[0,0.025] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.025,0.05] 0.751 0.191 0.308 0.791 0.992 0.00310 0.016 3793 1.001" "omega[0.05,0.5] 0.524 0.213 0.133 0.528 0.908 0.00340 0.016 3908 1.001" "omega[0.5,1] 0.211 0.186 0.004 0.156 0.669 0.00338 0.018 3035 1.000" diff --git a/tests/results/summary.individual/7.txt b/tests/results/summary.individual/7.txt index 72f7f97a..e8719e9f 100644 --- a/tests/results/summary.individual/7.txt +++ b/tests/results/summary.individual/7.txt @@ -15,7 +15,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.361 0.292 -0.249 0.370 0.895 0.00357 0.012 6664 1.000" "tau 0.216 0.209 0.038 0.153 0.767 0.00263 0.013 6290 1.000" -"omega[0,0.1] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.001" +"omega[0,0.1] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.1,1] 0.624 0.245 0.139 0.649 0.984 0.00359 0.015 4649 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" diff --git a/tests/results/summary.individual/9.txt b/tests/results/summary.individual/9.txt index 00c71f95..89980344 100644 --- a/tests/results/summary.individual/9.txt +++ b/tests/results/summary.individual/9.txt @@ -17,7 +17,7 @@ " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" "mu 0.361 0.292 -0.249 0.370 0.895 0.00357 0.012 6664 1.000" "tau 0.216 0.209 0.038 0.153 0.767 0.00263 0.013 6290 1.000" -"omega[0,0.1] 1.000 0.000 1.000 1.000 1.000 NA NA NA 1.001" +"omega[0,0.1] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" "omega[0.1,1] 0.624 0.245 0.139 0.649 0.984 0.00359 0.015 4649 1.001" "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." "" diff --git a/tests/results/summary.models/13.txt b/tests/results/summary.models/13.txt index a791f4bc..5c7c9175 100644 --- a/tests/results/summary.models/13.txt +++ b/tests/results/summary.models/13.txt @@ -5,43 +5,43 @@ "" "Robust Bayesian meta-analysis" "Models overview:" -" Model Prior Effect Prior Heterogeneity Prior Var. allocation Prior Bias Prior prob. log(marglik)" -" 1 Spike(0) Spike(0) 0.125 -0.83" -" 2 Spike(0) Spike(0) omega[two-sided: .05] ~ CumDirichlet(1, 1) 0.010 -0.28" -" 3 Spike(0) Spike(0) omega[two-sided: .1, .05] ~ CumDirichlet(1, 1, 1) 0.010 -0.41" -" 4 Spike(0) Spike(0) omega[one-sided: .05] ~ CumDirichlet(1, 1) 0.010 -0.29" -" 5 Spike(0) Spike(0) omega[one-sided: .05, .025] ~ CumDirichlet(1, 1, 1) 0.010 -0.01" -" 6 Spike(0) Spike(0) omega[one-sided: .5, .05] ~ CumDirichlet(1, 1, 1) 0.010 0.54" -" 7 Spike(0) Spike(0) omega[one-sided: .5, .05, .025] ~ CumDirichlet(1, 1, 1, 1) 0.010 0.77" -" 8 Spike(0) Spike(0) PET ~ Cauchy(0, 1)[0, Inf] 0.031 0.15" -" 9 Spike(0) Spike(0) PEESE ~ Cauchy(0, 5)[0, Inf] 0.031 -0.88" -" 10 Spike(0) InvGamma(1, 0.15) Beta(1, 1) 0.125 -0.58" -" 11 Spike(0) InvGamma(1, 0.15) Beta(1, 1) omega[two-sided: .05] ~ CumDirichlet(1, 1) 0.010 -0.34" -" 12 Spike(0) InvGamma(1, 0.15) Beta(1, 1) omega[two-sided: .1, .05] ~ CumDirichlet(1, 1, 1) 0.010 -0.53" -" 13 Spike(0) InvGamma(1, 0.15) Beta(1, 1) omega[one-sided: .05] ~ CumDirichlet(1, 1) 0.010 -0.27" -" 14 Spike(0) InvGamma(1, 0.15) Beta(1, 1) omega[one-sided: .05, .025] ~ CumDirichlet(1, 1, 1) 0.010 -0.05" -" 15 Spike(0) InvGamma(1, 0.15) Beta(1, 1) omega[one-sided: .5, .05] ~ CumDirichlet(1, 1, 1) 0.010 0.61" -" 16 Spike(0) InvGamma(1, 0.15) Beta(1, 1) omega[one-sided: .5, .05, .025] ~ CumDirichlet(1, 1, 1, 1) 0.010 0.81" -" 17 Spike(0) InvGamma(1, 0.15) Beta(1, 1) PET ~ Cauchy(0, 1)[0, Inf] 0.031 -0.04" -" 18 Spike(0) InvGamma(1, 0.15) Beta(1, 1) PEESE ~ Cauchy(0, 5)[0, Inf] 0.031 -0.88" -" 19 Normal(0, 1) Spike(0) 0.125 -0.03" -" 20 Normal(0, 1) Spike(0) omega[two-sided: .05] ~ CumDirichlet(1, 1) 0.010 -0.10" -" 21 Normal(0, 1) Spike(0) omega[two-sided: .1, .05] ~ CumDirichlet(1, 1, 1) 0.010 -0.41" -" 22 Normal(0, 1) Spike(0) omega[one-sided: .05] ~ CumDirichlet(1, 1) 0.010 -0.28" -" 23 Normal(0, 1) Spike(0) omega[one-sided: .05, .025] ~ CumDirichlet(1, 1, 1) 0.010 -0.31" -" 24 Normal(0, 1) Spike(0) omega[one-sided: .5, .05] ~ CumDirichlet(1, 1, 1) 0.010 0.22" -" 25 Normal(0, 1) Spike(0) omega[one-sided: .5, .05, .025] ~ CumDirichlet(1, 1, 1, 1) 0.010 0.27" -" 26 Normal(0, 1) Spike(0) PET ~ Cauchy(0, 1)[0, Inf] 0.031 -0.61" -" 27 Normal(0, 1) Spike(0) PEESE ~ Cauchy(0, 5)[0, Inf] 0.031 -1.54" -" 28 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) 0.125 -0.37" -" 29 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) omega[two-sided: .05] ~ CumDirichlet(1, 1) 0.010 -0.52" -" 30 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) omega[two-sided: .1, .05] ~ CumDirichlet(1, 1, 1) 0.010 -0.84" -" 31 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) omega[one-sided: .05] ~ CumDirichlet(1, 1) 0.010 -0.64" -" 32 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) omega[one-sided: .05, .025] ~ CumDirichlet(1, 1, 1) 0.010 -0.67" -" 33 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) omega[one-sided: .5, .05] ~ CumDirichlet(1, 1, 1) 0.010 -0.03" -" 34 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) omega[one-sided: .5, .05, .025] ~ CumDirichlet(1, 1, 1, 1) 0.010 0.04" -" 35 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) PET ~ Cauchy(0, 1)[0, Inf] 0.031 -0.83" -" 36 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) PEESE ~ Cauchy(0, 5)[0, Inf] 0.031 -1.66" +" Model Prior Effect Prior Heterogeneity Prior Hierarchical Prior Bias Prior prob. log(marglik)" +" 1 Spike(0) Spike(0) 0.125 -0.83" +" 2 Spike(0) Spike(0) omega[two-sided: .05] ~ CumDirichlet(1, 1) 0.010 -0.28" +" 3 Spike(0) Spike(0) omega[two-sided: .1, .05] ~ CumDirichlet(1, 1, 1) 0.010 -0.41" +" 4 Spike(0) Spike(0) omega[one-sided: .05] ~ CumDirichlet(1, 1) 0.010 -0.29" +" 5 Spike(0) Spike(0) omega[one-sided: .05, .025] ~ CumDirichlet(1, 1, 1) 0.010 -0.01" +" 6 Spike(0) Spike(0) omega[one-sided: .5, .05] ~ CumDirichlet(1, 1, 1) 0.010 0.54" +" 7 Spike(0) Spike(0) omega[one-sided: .5, .05, .025] ~ CumDirichlet(1, 1, 1, 1) 0.010 0.77" +" 8 Spike(0) Spike(0) PET ~ Cauchy(0, 1)[0, Inf] 0.031 0.15" +" 9 Spike(0) Spike(0) PEESE ~ Cauchy(0, 5)[0, Inf] 0.031 -0.88" +" 10 Spike(0) InvGamma(1, 0.15) Beta(1, 1) 0.125 -0.58" +" 11 Spike(0) InvGamma(1, 0.15) Beta(1, 1) omega[two-sided: .05] ~ CumDirichlet(1, 1) 0.010 -0.34" +" 12 Spike(0) InvGamma(1, 0.15) Beta(1, 1) omega[two-sided: .1, .05] ~ CumDirichlet(1, 1, 1) 0.010 -0.53" +" 13 Spike(0) InvGamma(1, 0.15) Beta(1, 1) omega[one-sided: .05] ~ CumDirichlet(1, 1) 0.010 -0.27" +" 14 Spike(0) InvGamma(1, 0.15) Beta(1, 1) omega[one-sided: .05, .025] ~ CumDirichlet(1, 1, 1) 0.010 -0.05" +" 15 Spike(0) InvGamma(1, 0.15) Beta(1, 1) omega[one-sided: .5, .05] ~ CumDirichlet(1, 1, 1) 0.010 0.61" +" 16 Spike(0) InvGamma(1, 0.15) Beta(1, 1) omega[one-sided: .5, .05, .025] ~ CumDirichlet(1, 1, 1, 1) 0.010 0.81" +" 17 Spike(0) InvGamma(1, 0.15) Beta(1, 1) PET ~ Cauchy(0, 1)[0, Inf] 0.031 -0.04" +" 18 Spike(0) InvGamma(1, 0.15) Beta(1, 1) PEESE ~ Cauchy(0, 5)[0, Inf] 0.031 -0.88" +" 19 Normal(0, 1) Spike(0) 0.125 -0.03" +" 20 Normal(0, 1) Spike(0) omega[two-sided: .05] ~ CumDirichlet(1, 1) 0.010 -0.10" +" 21 Normal(0, 1) Spike(0) omega[two-sided: .1, .05] ~ CumDirichlet(1, 1, 1) 0.010 -0.41" +" 22 Normal(0, 1) Spike(0) omega[one-sided: .05] ~ CumDirichlet(1, 1) 0.010 -0.28" +" 23 Normal(0, 1) Spike(0) omega[one-sided: .05, .025] ~ CumDirichlet(1, 1, 1) 0.010 -0.31" +" 24 Normal(0, 1) Spike(0) omega[one-sided: .5, .05] ~ CumDirichlet(1, 1, 1) 0.010 0.22" +" 25 Normal(0, 1) Spike(0) omega[one-sided: .5, .05, .025] ~ CumDirichlet(1, 1, 1, 1) 0.010 0.27" +" 26 Normal(0, 1) Spike(0) PET ~ Cauchy(0, 1)[0, Inf] 0.031 -0.61" +" 27 Normal(0, 1) Spike(0) PEESE ~ Cauchy(0, 5)[0, Inf] 0.031 -1.54" +" 28 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) 0.125 -0.37" +" 29 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) omega[two-sided: .05] ~ CumDirichlet(1, 1) 0.010 -0.52" +" 30 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) omega[two-sided: .1, .05] ~ CumDirichlet(1, 1, 1) 0.010 -0.84" +" 31 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) omega[one-sided: .05] ~ CumDirichlet(1, 1) 0.010 -0.64" +" 32 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) omega[one-sided: .05, .025] ~ CumDirichlet(1, 1, 1) 0.010 -0.67" +" 33 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) omega[one-sided: .5, .05] ~ CumDirichlet(1, 1, 1) 0.010 -0.03" +" 34 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) omega[one-sided: .5, .05, .025] ~ CumDirichlet(1, 1, 1, 1) 0.010 0.04" +" 35 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) PET ~ Cauchy(0, 1)[0, Inf] 0.031 -0.83" +" 36 Normal(0, 1) InvGamma(1, 0.15) Beta(1, 1) PEESE ~ Cauchy(0, 5)[0, Inf] 0.031 -1.66" " Post. prob. Inclusion BF" " 0.076 0.574" " 0.011 1.057" diff --git a/tests/results/summary.models/14.txt b/tests/results/summary.models/14.txt new file mode 100644 index 00000000..05b1b202 --- /dev/null +++ b/tests/results/summary.models/14.txt @@ -0,0 +1,40 @@ +"Call:" +"RoBMA.reg(formula = ~mod_cat + mod_con, data = df_reg, priors_bias = NULL, " +" parallel = TRUE, seed = 1)" +"" +"Robust Bayesian meta-analysis" +"Models overview:" +" Model Prior intercept Prior mod_cat Prior mod_con Prior Heterogeneity Prior Bias Prior prob. log(marglik) Post. prob." +" 1 Spike(0) mean difference contrast: mSpike(0) Spike(0) Spike(0) 0.062 -485.86 0.000" +" 2 Spike(0) mean difference contrast: mSpike(0) Spike(0) InvGamma(1, 0.15) 0.062 -9.89 0.000" +" 3 Normal(0, 1) mean difference contrast: mSpike(0) Spike(0) Spike(0) 0.062 -489.55 0.000" +" 4 Normal(0, 1) mean difference contrast: mSpike(0) Spike(0) InvGamma(1, 0.15) 0.062 -11.49 0.000" +" 5 Spike(0) mean difference contrast: mNormal(0, 0.25) Spike(0) Spike(0) 0.062 18.06 0.476" +" 6 Spike(0) mean difference contrast: mNormal(0, 0.25) Spike(0) InvGamma(1, 0.15) 0.062 14.62 0.015" +" 7 Normal(0, 1) mean difference contrast: mNormal(0, 0.25) Spike(0) Spike(0) 0.062 14.36 0.012" +" 8 Normal(0, 1) mean difference contrast: mNormal(0, 0.25) Spike(0) InvGamma(1, 0.15) 0.062 11.05 0.000" +" 9 Spike(0) mean difference contrast: mSpike(0) Normal(0, 0.25) Spike(0) 0.062 -28.92 0.000" +" 10 Spike(0) mean difference contrast: mSpike(0) Normal(0, 0.25) InvGamma(1, 0.15) 0.062 2.10 0.000" +" 11 Normal(0, 1) mean difference contrast: mSpike(0) Normal(0, 0.25) Spike(0) 0.062 -32.61 0.000" +" 12 Normal(0, 1) mean difference contrast: mSpike(0) Normal(0, 0.25) InvGamma(1, 0.15) 0.062 -0.53 0.000" +" 13 Spike(0) mean difference contrast: mNormal(0, 0.25) Normal(0, 0.25) Spike(0) 0.062 18.03 0.463" +" 14 Spike(0) mean difference contrast: mNormal(0, 0.25) Normal(0, 0.25) InvGamma(1, 0.15) 0.062 14.96 0.022" +" 15 Normal(0, 1) mean difference contrast: mNormal(0, 0.25) Normal(0, 0.25) Spike(0) 0.062 14.34 0.012" +" 16 Normal(0, 1) mean difference contrast: mNormal(0, 0.25) Normal(0, 0.25) InvGamma(1, 0.15) 0.062 11.42 0.001" +" Inclusion BF" +" 0.000" +" 0.000" +" 0.000" +" 0.000" +" 13.608" +" 0.233" +" 0.180" +" 0.006" +" 0.000" +" 0.000" +" 0.000" +" 0.000" +" 12.936" +" 0.330" +" 0.175" +" 0.009" diff --git a/tests/results/summary.models/15.txt b/tests/results/summary.models/15.txt new file mode 100644 index 00000000..187e33ff --- /dev/null +++ b/tests/results/summary.models/15.txt @@ -0,0 +1,25 @@ +"Call:" +"RoBMA.reg(formula = ~mod_con, data = df_reg, priors = list(mod_con = list(null = prior(\"normal\", " +" list(0, 0.05)), alt = prior(\"normal\", list(0.3, 0.15)))), " +" priors_heterogeneity = NULL, priors_bias = list(prior_weightfunction(distribution = \"two.sided\", " +" parameters = list(alpha = c(1, 1), steps = c(0.05)), " +" prior_weights = 1/2), prior_PET(distribution = \"Cauchy\", " +" parameters = list(0, 1), truncation = list(0, Inf), prior_weights = 1/2)), " +" priors_effect_null = NULL, parallel = TRUE, seed = 1)" +"" +"Robust Bayesian meta-analysis" +"Models overview:" +" Model Prior intercept Prior mod_con Prior Heterogeneity Prior Bias Prior prob. log(marglik) Post. prob." +" 1 Normal(0, 1) Normal(0, 0.05) Spike(0) 0.250 -115.99 0.000" +" 2 Normal(0, 1) Normal(0, 0.05) Spike(0) omega[two-sided: .05] ~ CumDirichlet(1, 1) 0.125 -115.22 0.000" +" 3 Normal(0, 1) Normal(0, 0.05) Spike(0) PET ~ Cauchy(0, 1)[0, Inf] 0.125 -116.13 0.000" +" 4 Normal(0, 1) Normal(0.3, 0.15) Spike(0) 0.250 17.07 0.453" +" 5 Normal(0, 1) Normal(0.3, 0.15) Spike(0) omega[two-sided: .05] ~ CumDirichlet(1, 1) 0.125 17.50 0.350" +" 6 Normal(0, 1) Normal(0.3, 0.15) Spike(0) PET ~ Cauchy(0, 1)[0, Inf] 0.125 16.93 0.196" +" Inclusion BF" +" 0.000" +" 0.000" +" 0.000" +" 2.489" +" 3.774" +" 1.709" diff --git a/tests/results/summary/13.txt b/tests/results/summary/13.txt index 5b52c8b3..14abd23c 100644 --- a/tests/results/summary/13.txt +++ b/tests/results/summary/13.txt @@ -9,6 +9,7 @@ "Effect 18/36 0.500 0.492 0.967" "Heterogeneity 18/36 0.500 0.469 0.882" "Bias 32/36 0.500 0.536 1.156" +"Hierarchical 18/36 0.500 0.469 0.882" "" "Model-averaged estimates:" " Mean Median 0.025 0.975" diff --git a/tests/results/summary/14.txt b/tests/results/summary/14.txt new file mode 100644 index 00000000..ae7b0981 --- /dev/null +++ b/tests/results/summary/14.txt @@ -0,0 +1,30 @@ +"Call:" +"RoBMA.reg(formula = ~mod_cat + mod_con, data = df_reg, priors_bias = NULL, " +" parallel = TRUE, seed = 1)" +"" +"Robust Bayesian meta-analysis" +"Components summary:" +" Models Prior prob. Post. prob. Inclusion BF" +"Effect 8/16 0.500 0.024 0.025" +"Heterogeneity 8/16 0.500 0.038 0.039" +"Bias 0/16 0.000 0.000 0.000" +"" +"Meta-regression components summary:" +" Models Prior prob. Post. prob. Inclusion BF" +"mod_cat 8/16 0.500 1.000 16741134.735" +"mod_con 8/16 0.500 0.497 0.987" +"" +"Model-averaged estimates:" +" Mean Median 0.025 0.975" +"mu 0.000 0.000 0.000 0.000" +"tau 0.002 0.000 0.000 0.040" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"" +"Model-averaged meta-regression estimates:" +" Mean Median 0.025 0.975" +"intercept 0.000 0.000 0.000 0.000" +"mod_cat [dif: A] -0.883 -0.909 -1.006 -0.685" +"mod_cat [dif: B] 0.001 0.001 -0.071 0.071" +"mod_cat [dif: C] 0.882 0.908 0.681 1.008" +"mod_con 0.056 0.000 -0.003 0.229" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." diff --git a/tests/results/summary/15.txt b/tests/results/summary/15.txt new file mode 100644 index 00000000..2bc78c01 --- /dev/null +++ b/tests/results/summary/15.txt @@ -0,0 +1,35 @@ +"Call:" +"RoBMA.reg(formula = ~mod_con, data = df_reg, priors = list(mod_con = list(null = prior(\"normal\", " +" list(0, 0.05)), alt = prior(\"normal\", list(0.3, 0.15)))), " +" priors_heterogeneity = NULL, priors_bias = list(prior_weightfunction(distribution = \"two.sided\", " +" parameters = list(alpha = c(1, 1), steps = c(0.05)), " +" prior_weights = 1/2), prior_PET(distribution = \"Cauchy\", " +" parameters = list(0, 1), truncation = list(0, Inf), prior_weights = 1/2)), " +" priors_effect_null = NULL, parallel = TRUE, seed = 1)" +"" +"Robust Bayesian meta-analysis" +"Components summary:" +" Models Prior prob. Post. prob. Inclusion BF" +"Effect 6/6 1.000 1.000 Inf" +"Heterogeneity 0/6 0.000 0.000 0.000" +"Bias 4/6 0.500 0.547 1.205" +"" +"Meta-regression components summary:" +" Models Prior prob. Post. prob. Inclusion BF" +"mod_con 3/6 0.500 1.000 5.371429e+57" +"" +"Model-averaged estimates:" +" Mean Median 0.025 0.975" +"mu -0.025 -0.006 -0.272 0.046" +"tau 0.000 0.000 0.000 0.000" +"omega[0,0.05] 1.000 1.000 1.000 1.000" +"omega[0.05,1] 0.805 1.000 0.083 1.000" +"PET 0.272 0.000 0.000 2.867" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." +"(Estimated publication weights omega correspond to two-sided p-values.)" +"" +"Model-averaged meta-regression estimates:" +" Mean Median 0.025 0.975" +"intercept -0.025 -0.006 -0.272 0.046" +"mod_con 0.924 0.924 0.877 0.973" +"The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." diff --git a/tests/testthat/_snaps/2-distributions/rwnorm-1.svg b/tests/testthat/_snaps/2-distributions/rwnorm-1.svg index e094fd07..076a4e7e 100644 --- a/tests/testthat/_snaps/2-distributions/rwnorm-1.svg +++ b/tests/testthat/_snaps/2-distributions/rwnorm-1.svg @@ -51,13 +51,13 @@ 0.3 0.4 -density.default(x = rwnorm(10000, 0, 1, steps = c(0.5), omega = c(1, +density(x = rwnorm(10000, 0, 1, steps = c(0.5), omega = c(1, 1), type = "one.sided")) N = 10000 Bandwidth = 0.1438 Density - + diff --git a/tests/testthat/_snaps/2-distributions/rwnorm-2.svg b/tests/testthat/_snaps/2-distributions/rwnorm-2.svg index ba6c8184..47717482 100644 --- a/tests/testthat/_snaps/2-distributions/rwnorm-2.svg +++ b/tests/testthat/_snaps/2-distributions/rwnorm-2.svg @@ -49,13 +49,13 @@ 0.15 0.20 -density.default(x = rwnorm(10000, 1, 2, steps = c(0.5), omega = c(1, +density(x = rwnorm(10000, 1, 2, steps = c(0.5), omega = c(1, 1), type = "two.sided")) N = 10000 Bandwidth = 0.2826 Density - + diff --git a/tests/testthat/_snaps/6-plots/ggplot-forest1-14.svg b/tests/testthat/_snaps/6-plots/ggplot-forest1-14.svg new file mode 100644 index 00000000..fe5a1972 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-forest1-14.svg @@ -0,0 +1,188 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Model-Averaged +Study 1 +Study 2 +Study 3 +Study 4 +Study 5 +Study 6 +Study 7 +Study 8 +Study 9 +Study 10 +Study 11 +Study 12 +Study 13 +Study 14 +Study 15 + 0.00 [ 0.00, 0.00] +-1.00 [-1.21, -0.80] +-1.00 [-1.21, -0.80] +-1.00 [-1.21, -0.80] +-1.00 [-1.21, -0.80] +-1.00 [-1.21, -0.80] + 0.00 [-0.20, 0.20] + 0.00 [-0.20, 0.20] + 0.00 [-0.20, 0.20] + 0.00 [-0.20, 0.20] + 0.00 [-0.20, 0.20] + 1.00 [ 0.80, 1.21] + 1.00 [ 0.80, 1.21] + 1.00 [ 0.80, 1.21] + 1.00 [ 0.80, 1.21] + 1.00 [ 0.80, 1.21] + + + + + + + +-1.5 +-1 +-0.5 +0 +0.5 +1 +1.5 +Cohen's + +d +ggplot_forest1_14 + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-forest1-15.svg b/tests/testthat/_snaps/6-plots/ggplot-forest1-15.svg new file mode 100644 index 00000000..f9b3e44c --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-forest1-15.svg @@ -0,0 +1,180 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Model-Averaged +Study 1 +Study 2 +Study 3 +Study 4 +Study 5 +Study 6 +Study 7 +Study 8 +Study 9 +Study 10 +Study 11 +Study 12 +Study 13 +Study 14 +Study 15 +-0.03 [-0.27, 0.05] +-1.57 [-1.79, -1.35] +-1.34 [-1.56, -1.13] +-1.12 [-1.33, -0.91] +-0.89 [-1.10, -0.69] +-0.67 [-0.88, -0.47] +-0.45 [-0.65, -0.25] +-0.22 [-0.42, -0.03] + 0.00 [-0.20, 0.20] + 0.22 [ 0.03, 0.42] + 0.45 [ 0.25, 0.65] + 0.67 [ 0.47, 0.88] + 0.89 [ 0.69, 1.10] + 1.12 [ 0.91, 1.33] + 1.34 [ 1.13, 1.56] + 1.57 [ 1.35, 1.79] + + + + + +-2 +-1 +0 +1 +2 +Cohen's + +d +ggplot_forest1_15 + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-forest2-14.svg b/tests/testthat/_snaps/6-plots/ggplot-forest2-14.svg new file mode 100644 index 00000000..b4465266 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-forest2-14.svg @@ -0,0 +1,188 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Conditional +Study 1 +Study 2 +Study 3 +Study 4 +Study 5 +Study 6 +Study 7 +Study 8 +Study 9 +Study 10 +Study 11 +Study 12 +Study 13 +Study 14 +Study 15 + 0.00 [-0.05, 0.05] +-1.00 [-1.21, -0.80] +-1.00 [-1.21, -0.80] +-1.00 [-1.21, -0.80] +-1.00 [-1.21, -0.80] +-1.00 [-1.21, -0.80] + 0.00 [-0.20, 0.20] + 0.00 [-0.20, 0.20] + 0.00 [-0.20, 0.20] + 0.00 [-0.20, 0.20] + 0.00 [-0.20, 0.20] + 1.00 [ 0.80, 1.21] + 1.00 [ 0.80, 1.21] + 1.00 [ 0.80, 1.21] + 1.00 [ 0.80, 1.21] + 1.00 [ 0.80, 1.21] + + + + + + + +-1.5 +-1 +-0.5 +0 +0.5 +1 +1.5 +Cohen's + +d +ggplot_forest2_14 + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-forest2-15.svg b/tests/testthat/_snaps/6-plots/ggplot-forest2-15.svg new file mode 100644 index 00000000..ef14bc77 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-forest2-15.svg @@ -0,0 +1,180 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Conditional +Study 1 +Study 2 +Study 3 +Study 4 +Study 5 +Study 6 +Study 7 +Study 8 +Study 9 +Study 10 +Study 11 +Study 12 +Study 13 +Study 14 +Study 15 +-0.03 [-0.27, 0.05] +-1.57 [-1.79, -1.35] +-1.34 [-1.56, -1.13] +-1.12 [-1.33, -0.91] +-0.89 [-1.10, -0.69] +-0.67 [-0.88, -0.47] +-0.45 [-0.65, -0.25] +-0.22 [-0.42, -0.03] + 0.00 [-0.20, 0.20] + 0.22 [ 0.03, 0.42] + 0.45 [ 0.25, 0.65] + 0.67 [ 0.47, 0.88] + 0.89 [ 0.69, 1.10] + 1.12 [ 0.91, 1.33] + 1.34 [ 1.13, 1.56] + 1.57 [ 1.35, 1.79] + + + + + +-2 +-1 +0 +1 +2 +Cohen's + +d +ggplot_forest2_15 + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-models1-14.svg b/tests/testthat/_snaps/6-plots/ggplot-models1-14.svg new file mode 100644 index 00000000..44a37b38 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-models1-14.svg @@ -0,0 +1,191 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Model-Averaged +Model 16 +Model 15 +Model 14 +Model 13 +Model 12 +Model 11 +Model 10 +Model 9 +Model 8 +Model 7 +Model 6 +Model 5 +Model 4 +Model 3 +Model 2 +Model 1 +0.00 [0.00, 0.00] +0.00 [-0.06, 0.06] +0.00 [-0.05, 0.05] +0.00 [ 0.00, 0.00] +0.00 [ 0.00, 0.00] +0.00 [-0.15, 0.15] +0.00 [-0.05, 0.05] +0.00 [ 0.00, 0.00] +0.00 [ 0.00, 0.00] +0.00 [-0.05, 0.06] +0.00 [-0.05, 0.05] +0.00 [ 0.00, 0.00] +0.00 [ 0.00, 0.00] +0.00 [-0.42, 0.42] +0.00 [-0.05, 0.05] +0.00 [ 0.00, 0.00] +0.00 [ 0.00, 0.00] +BF = 0.01 [0.06 -> 0.00] +BF = 0.18 [0.06 -> 0.01] +BF = 0.33 [0.06 -> 0.02] +BF = 12.94 [0.06 -> 0.46] +BF = 0.00 [0.06 -> 0.00] +BF = 0.00 [0.06 -> 0.00] +BF = 0.00 [0.06 -> 0.00] +BF = 0.00 [0.06 -> 0.00] +BF = 0.01 [0.06 -> 0.00] +BF = 0.18 [0.06 -> 0.01] +BF = 0.23 [0.06 -> 0.02] +BF = 13.61 [0.06 -> 0.48] +BF = 0.00 [0.06 -> 0.00] +BF = 0.00 [0.06 -> 0.00] +BF = 0.00 [0.06 -> 0.00] +BF = 0.00 [0.06 -> 0.00] + + + + + + + +-0.6 +-0.4 +-0.2 +0 +0.2 +0.4 +0.6 +Cohen's + +d +ggplot_models1_14 + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-models1-15.svg b/tests/testthat/_snaps/6-plots/ggplot-models1-15.svg new file mode 100644 index 00000000..c2cc74e7 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-models1-15.svg @@ -0,0 +1,129 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Model-Averaged +Model 6 +Model 5 +Model 4 +Model 3 +Model 2 +Model 1 +-0.03 [-0.27, 0.05] +-0.12 [-0.55, 0.02] + 0.00 [-0.05, 0.05] + 0.00 [-0.05, 0.05] +-0.12 [-0.54, 0.02] + 0.00 [-0.05, 0.05] + 0.00 [-0.05, 0.05] +BF = 1.71 [0.12 -> 0.20] +BF = 3.77 [0.12 -> 0.35] +BF = 2.49 [0.25 -> 0.45] +BF = 0.00 [0.12 -> 0.00] +BF = 0.00 [0.12 -> 0.00] +BF = 0.00 [0.25 -> 0.00] + + + + + + + + +-0.6 +-0.5 +-0.4 +-0.3 +-0.2 +-0.1 +0 +0.1 +Cohen's + +d +ggplot_models1_15 + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-models2-14.svg b/tests/testthat/_snaps/6-plots/ggplot-models2-14.svg new file mode 100644 index 00000000..682af499 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-models2-14.svg @@ -0,0 +1,143 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Model-Averaged +Model 16 +Model 15 +Model 12 +Model 11 +Model 8 +Model 7 +Model 4 +Model 3 +0.00 [-0.05, 0.05] +0.00 [-0.06, 0.06] +0.00 [-0.05, 0.05] +0.00 [-0.15, 0.15] +0.00 [-0.05, 0.05] +0.00 [-0.05, 0.06] +0.00 [-0.05, 0.05] +0.00 [-0.42, 0.42] +0.00 [-0.05, 0.05] +BF = 0.01 [0.06 -> 0.00] +BF = 0.18 [0.06 -> 0.01] +BF = 0.00 [0.06 -> 0.00] +BF = 0.00 [0.06 -> 0.00] +BF = 0.01 [0.06 -> 0.00] +BF = 0.18 [0.06 -> 0.01] +BF = 0.00 [0.06 -> 0.00] +BF = 0.00 [0.06 -> 0.00] + + + + + + + +-0.6 +-0.4 +-0.2 +0 +0.2 +0.4 +0.6 +Cohen's + +d +ggplot_models2_14 + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-models2-15.svg b/tests/testthat/_snaps/6-plots/ggplot-models2-15.svg new file mode 100644 index 00000000..9c390eea --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-models2-15.svg @@ -0,0 +1,129 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Model-Averaged +Model 6 +Model 5 +Model 4 +Model 3 +Model 2 +Model 1 +-0.03 [-0.27, 0.05] +-0.12 [-0.55, 0.02] + 0.00 [-0.05, 0.05] + 0.00 [-0.05, 0.05] +-0.12 [-0.54, 0.02] + 0.00 [-0.05, 0.05] + 0.00 [-0.05, 0.05] +BF = 1.71 [0.12 -> 0.20] +BF = 3.77 [0.12 -> 0.35] +BF = 2.49 [0.25 -> 0.45] +BF = 0.00 [0.12 -> 0.00] +BF = 0.00 [0.12 -> 0.00] +BF = 0.00 [0.25 -> 0.00] + + + + + + + + +-0.6 +-0.5 +-0.4 +-0.3 +-0.2 +-0.1 +0 +0.1 +Cohen's + +d +ggplot_models2_15 + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-mu1-14.svg b/tests/testthat/_snaps/6-plots/ggplot-mu1-14.svg new file mode 100644 index 00000000..11098b04 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-mu1-14.svg @@ -0,0 +1,92 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.0 +0.1 +0.2 +0.3 +0.4 + + + + + + + + + + + +0 +0.2 +0.4 +0.6 +0.8 +1 + + + + + +-0.10 +-0.05 +0.00 +0.05 +0.10 +Cohen's + +d +Density +Probability + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-mu1-15.svg b/tests/testthat/_snaps/6-plots/ggplot-mu1-15.svg new file mode 100644 index 00000000..972e9ca8 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-mu1-15.svg @@ -0,0 +1,100 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +2 +4 +6 +8 +10 +12 +14 + + + + + + + + + + + + + + + + +-1.2 +-1.0 +-0.8 +-0.6 +-0.4 +-0.2 +0.0 +0.2 +Cohen's + +d +Density + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-mu2-14.svg b/tests/testthat/_snaps/6-plots/ggplot-mu2-14.svg new file mode 100644 index 00000000..8ae97543 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-mu2-14.svg @@ -0,0 +1,103 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.0 +0.1 +0.2 +0.3 +0.4 + + + + + + + + + + + +0 +0.2 +0.4 +0.6 +0.8 +1 + + + + + + + +-3 +-2 +-1 +0 +1 +2 +3 +Cohen's + +d +Density +Probability + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-mu2-15.svg b/tests/testthat/_snaps/6-plots/ggplot-mu2-15.svg new file mode 100644 index 00000000..6fb4772c --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-mu2-15.svg @@ -0,0 +1,97 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +2 +4 +6 +8 +10 +12 +14 + + + + + + + + + + + + + + + +-3 +-2 +-1 +0 +1 +2 +3 +Cohen's + +d +Density + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-mu3-14.svg b/tests/testthat/_snaps/6-plots/ggplot-mu3-14.svg new file mode 100644 index 00000000..352d64b1 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-mu3-14.svg @@ -0,0 +1,84 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +5 +10 +15 +20 + + + + + + + + + + + + +-0.15 +-0.10 +-0.05 +0.00 +0.05 +0.10 +0.15 +Cohen's + +d +Density + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-mu3-15.svg b/tests/testthat/_snaps/6-plots/ggplot-mu3-15.svg new file mode 100644 index 00000000..972e9ca8 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-mu3-15.svg @@ -0,0 +1,100 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +2 +4 +6 +8 +10 +12 +14 + + + + + + + + + + + + + + + + +-1.2 +-1.0 +-0.8 +-0.6 +-0.4 +-0.2 +0.0 +0.2 +Cohen's + +d +Density + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-mu4-14.svg b/tests/testthat/_snaps/6-plots/ggplot-mu4-14.svg new file mode 100644 index 00000000..3f1129d7 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-mu4-14.svg @@ -0,0 +1,85 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +5 +10 +15 +20 + + + + + + + + + + + + +-3 +-2 +-1 +0 +1 +2 +3 +Cohen's + +d +Density + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-mu4-15.svg b/tests/testthat/_snaps/6-plots/ggplot-mu4-15.svg new file mode 100644 index 00000000..6fb4772c --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-mu4-15.svg @@ -0,0 +1,97 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +2 +4 +6 +8 +10 +12 +14 + + + + + + + + + + + + + + + +-3 +-2 +-1 +0 +1 +2 +3 +Cohen's + +d +Density + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-omega1-15.svg b/tests/testthat/_snaps/6-plots/ggplot-omega1-15.svg new file mode 100644 index 00000000..54a31ccb --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-omega1-15.svg @@ -0,0 +1,77 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 + + + + + + + + + +0 +0.05 +1 +p +-value +Probability +Selection Models + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-omega2-15.svg b/tests/testthat/_snaps/6-plots/ggplot-omega2-15.svg new file mode 100644 index 00000000..68b577a9 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-omega2-15.svg @@ -0,0 +1,79 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 + + + + + + + + + +0 +0.05 +1 +p +-value +Probability +Selection Models + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-omega3-15.svg b/tests/testthat/_snaps/6-plots/ggplot-omega3-15.svg new file mode 100644 index 00000000..88933fe2 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-omega3-15.svg @@ -0,0 +1,77 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 + + + + + + + + + +0 +0.05 +1 +p +-value +Probability +Selection Models + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-omega4-15.svg b/tests/testthat/_snaps/6-plots/ggplot-omega4-15.svg new file mode 100644 index 00000000..919dbd5b --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-omega4-15.svg @@ -0,0 +1,79 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 + + + + + + + + + +0 +0.05 +1 +p +-value +Probability +Selection Models + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-reg-con-1-14.svg b/tests/testthat/_snaps/6-plots/ggplot-reg-con-1-14.svg new file mode 100644 index 00000000..27df7f85 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-reg-con-1-14.svg @@ -0,0 +1,118 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.0 +0.5 +1.0 +1.5 +2.0 +2.5 +3.0 + + + + + + + + + + + + + + +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 + + + + + + + + +-0.2 +-0.1 +0.0 +0.1 +0.2 +0.3 +0.4 +0.5 +mod_con + +( +Cohen's + +d +) +Density +Probability + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-reg-con-2-14.svg b/tests/testthat/_snaps/6-plots/ggplot-reg-con-2-14.svg new file mode 100644 index 00000000..80954dbf --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-reg-con-2-14.svg @@ -0,0 +1,125 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.0 +0.5 +1.0 +1.5 +2.0 +2.5 +3.0 + + + + + + + + + + + + + + +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 + + + + + + + + + +-0.8 +-0.6 +-0.4 +-0.2 +0.0 +0.2 +0.4 +0.6 +0.8 +mod_con + +( +Cohen's + +d +) +Density +Probability + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-reg-con-3-14.svg b/tests/testthat/_snaps/6-plots/ggplot-reg-con-3-14.svg new file mode 100644 index 00000000..fbbcc1dc --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-reg-con-3-14.svg @@ -0,0 +1,100 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +1 +2 +3 +4 +5 +6 + + + + + + + + + + + + + + + +-0.2 +-0.1 +0.0 +0.1 +0.2 +0.3 +0.4 +0.5 +mod_con + +( +Cohen's + +d +) +Density + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-reg-con-4-14.svg b/tests/testthat/_snaps/6-plots/ggplot-reg-con-4-14.svg new file mode 100644 index 00000000..32df125f --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-reg-con-4-14.svg @@ -0,0 +1,105 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +1 +2 +3 +4 +5 +6 + + + + + + + + + + + + + + + + +-0.8 +-0.6 +-0.4 +-0.2 +0.0 +0.2 +0.4 +0.6 +0.8 +mod_con + +( +Cohen's + +d +) +Density + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-reg-con-alt-1-15.svg b/tests/testthat/_snaps/6-plots/ggplot-reg-con-alt-1-15.svg new file mode 100644 index 00000000..38d55dc8 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-reg-con-alt-1-15.svg @@ -0,0 +1,84 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +5 +10 +15 +20 + + + + + + + + + + + +0.80 +0.85 +0.90 +0.95 +1.00 +1.05 +mod_con + +( +Cohen's + +d +) +Density + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-reg-con-alt-2-15.svg b/tests/testthat/_snaps/6-plots/ggplot-reg-con-alt-2-15.svg new file mode 100644 index 00000000..32505af9 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-reg-con-alt-2-15.svg @@ -0,0 +1,93 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +5 +10 +15 +20 + + + + + + + + + + + + + +-0.2 +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +1.2 +mod_con + +( +Cohen's + +d +) +Density + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-reg-con-alt-3-15.svg b/tests/testthat/_snaps/6-plots/ggplot-reg-con-alt-3-15.svg new file mode 100644 index 00000000..38d55dc8 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-reg-con-alt-3-15.svg @@ -0,0 +1,84 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +5 +10 +15 +20 + + + + + + + + + + + +0.80 +0.85 +0.90 +0.95 +1.00 +1.05 +mod_con + +( +Cohen's + +d +) +Density + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-reg-con-alt-4-15.svg b/tests/testthat/_snaps/6-plots/ggplot-reg-con-alt-4-15.svg new file mode 100644 index 00000000..98c420a8 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-reg-con-alt-4-15.svg @@ -0,0 +1,93 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +5 +10 +15 +20 + + + + + + + + + + + + + +-0.2 +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +1.2 +mod_con + +( +Cohen's + +d +) +Density + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-reg-fac-1-14.svg b/tests/testthat/_snaps/6-plots/ggplot-reg-fac-1-14.svg new file mode 100644 index 00000000..be447eb3 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-reg-fac-1-14.svg @@ -0,0 +1,100 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +1 +2 +3 +4 + + + + + + + + + + + + +-1.5 +-1.0 +-0.5 +0.0 +0.5 +1.0 +1.5 +mod_cat + +( +Cohen's + +d +) +Density + + + + + + + + A + B + C + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-reg-fac-2-14.svg b/tests/testthat/_snaps/6-plots/ggplot-reg-fac-2-14.svg new file mode 100644 index 00000000..a1a23155 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-reg-fac-2-14.svg @@ -0,0 +1,117 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +1 +2 +3 +4 + + + + + + + + + + + +0 +0.1 +0.2 +0.3 +0.4 +0.5 + + + + + + + +-1.5 +-1.0 +-0.5 +0.0 +0.5 +1.0 +1.5 +mod_cat + +( +Cohen's + +d +) +Density +Probability + + + + + + + + A + B + C + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-reg-fac-3-14.svg b/tests/testthat/_snaps/6-plots/ggplot-reg-fac-3-14.svg new file mode 100644 index 00000000..be447eb3 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-reg-fac-3-14.svg @@ -0,0 +1,100 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +1 +2 +3 +4 + + + + + + + + + + + + +-1.5 +-1.0 +-0.5 +0.0 +0.5 +1.0 +1.5 +mod_cat + +( +Cohen's + +d +) +Density + + + + + + + + A + B + C + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-reg-fac-4-14.svg b/tests/testthat/_snaps/6-plots/ggplot-reg-fac-4-14.svg new file mode 100644 index 00000000..5bc92164 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-reg-fac-4-14.svg @@ -0,0 +1,101 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +1 +2 +3 +4 + + + + + + + + + + + + +-1.5 +-1.0 +-0.5 +0.0 +0.5 +1.0 +1.5 +mod_cat + +( +Cohen's + +d +) +Density + + + + + + + + A + B + C + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-tau1-14.svg b/tests/testthat/_snaps/6-plots/ggplot-tau1-14.svg new file mode 100644 index 00000000..16d3b8c1 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-tau1-14.svg @@ -0,0 +1,104 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 + + + + + + + + + + + +0 +0.2 +0.4 +0.6 +0.8 +1 + + + + + + + +0.00 +0.05 +0.10 +0.15 +0.20 +0.25 +0.30 +τ + +( +Cohen's + +d +) +Density +Probability + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-tau1-15.svg b/tests/testthat/_snaps/6-plots/ggplot-tau1-15.svg new file mode 100644 index 00000000..afd16108 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-tau1-15.svg @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 + + + + + + + + +-1 +0 +τ + +( +Cohen's + +d +) +Probability + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-tau2-14.svg b/tests/testthat/_snaps/6-plots/ggplot-tau2-14.svg new file mode 100644 index 00000000..5f52466d --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-tau2-14.svg @@ -0,0 +1,95 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.0 +0.5 +1.0 +1.5 +2.0 + + + + + + + + + + + +0 +0.2 +0.4 +0.6 +0.8 +1 + + + + +0 +5 +10 +15 +τ + +( +Cohen's + +d +) +Density +Probability + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-tau2-15.svg b/tests/testthat/_snaps/6-plots/ggplot-tau2-15.svg new file mode 100644 index 00000000..a50caac7 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-tau2-15.svg @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 + + + + + + + + +-1 +0 +τ + +( +Cohen's + +d +) +Probability + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-tau3-14.svg b/tests/testthat/_snaps/6-plots/ggplot-tau3-14.svg new file mode 100644 index 00000000..51a13f72 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-tau3-14.svg @@ -0,0 +1,88 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +5 +10 +15 +20 +25 + + + + + + + + + + + + +0.00 +0.05 +0.10 +0.15 +0.20 +0.25 +τ + +( +Cohen's + +d +) +Density + + diff --git a/tests/testthat/_snaps/6-plots/ggplot-tau4-14.svg b/tests/testthat/_snaps/6-plots/ggplot-tau4-14.svg new file mode 100644 index 00000000..faf5bcac --- /dev/null +++ b/tests/testthat/_snaps/6-plots/ggplot-tau4-14.svg @@ -0,0 +1,81 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +5 +10 +15 +20 +25 + + + + + + + + + + +0 +5 +10 +15 +τ + +( +Cohen's + +d +) +Density + + diff --git a/tests/testthat/_snaps/6-plots/mm-ggplot-mod-cat-1.svg b/tests/testthat/_snaps/6-plots/mm-ggplot-mod-cat-1.svg new file mode 100644 index 00000000..00ee48cf --- /dev/null +++ b/tests/testthat/_snaps/6-plots/mm-ggplot-mod-cat-1.svg @@ -0,0 +1,108 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +2 +4 +6 +8 +10 +12 + + + + + + + + + + + + + + +-1.5 +-1.0 +-0.5 +0.0 +0.5 +1.0 +1.5 +mod_cat + +( +Cohen's + +d +) +Density + + + + + + + +A +B +C + + diff --git a/tests/testthat/_snaps/6-plots/mm-ggplot-mod-cat-2.svg b/tests/testthat/_snaps/6-plots/mm-ggplot-mod-cat-2.svg new file mode 100644 index 00000000..ec4899cf --- /dev/null +++ b/tests/testthat/_snaps/6-plots/mm-ggplot-mod-cat-2.svg @@ -0,0 +1,116 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +2 +4 +6 +8 +10 +12 + + + + + + + + + + + + + +-6 +-4 +-2 +0 +2 +4 +mod_cat + +( +Cohen's + +d +) +Density + + + + + + + + + + + + + + + + +A +B +C + + diff --git a/tests/testthat/_snaps/6-plots/mm-ggplot-mod-cat-3.svg b/tests/testthat/_snaps/6-plots/mm-ggplot-mod-cat-3.svg new file mode 100644 index 00000000..eb49843b --- /dev/null +++ b/tests/testthat/_snaps/6-plots/mm-ggplot-mod-cat-3.svg @@ -0,0 +1,106 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +5 +10 +15 +20 +25 + + + + + + + + + + + +-1.0 +-0.5 +0.0 +0.5 +1.0 +mod_cat + +( +ρ +) +Density + + + + + + + + + + + + + + + + +A +B +C + + diff --git a/tests/testthat/_snaps/6-plots/mm-ggplot-mod-con-1.svg b/tests/testthat/_snaps/6-plots/mm-ggplot-mod-con-1.svg new file mode 100644 index 00000000..48cfa293 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/mm-ggplot-mod-con-1.svg @@ -0,0 +1,116 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +100 +200 +300 +400 +500 +600 +700 + + + + + + + + + + + + + +-1.0 +-0.5 +0.0 +0.5 +1.0 +mod_con + +( +Cohen's + +d +) +Density + + + + + + + + + + + + + + + + +-1SD +0SD +1SD + + diff --git a/tests/testthat/_snaps/6-plots/mm-ggplot-mod-con-2.svg b/tests/testthat/_snaps/6-plots/mm-ggplot-mod-con-2.svg new file mode 100644 index 00000000..8135d728 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/mm-ggplot-mod-con-2.svg @@ -0,0 +1,78 @@ + + + + + + + + + + + + + + + + + + + + +-2.0 +-1.5 +-1.0 +-0.5 +0.0 +0.5 +1.0 + + + + + + + + + +0 +2 +4 +6 +8 +10 +12 +14 +mod_con + +( +Cohen's + +d +) +Density + + + + + + + + + + + + + +-1SD +0SD +1SD + + diff --git a/tests/testthat/_snaps/6-plots/plot-petpeese5-1.svg b/tests/testthat/_snaps/6-plots/plot-petpeese5-1.svg index 2eb2107c..9df2c34b 100644 --- a/tests/testthat/_snaps/6-plots/plot-petpeese5-1.svg +++ b/tests/testthat/_snaps/6-plots/plot-petpeese5-1.svg @@ -31,15 +31,23 @@ 0.6 0.8 1.0 - + - - - + + + + + + + 0.00 -0.05 -0.10 -0.15 +0.02 +0.04 +0.06 +0.08 +0.10 +0.12 +0.14 PET-PEESE Standard error Effect size @@ -50,10 +58,10 @@ - - - - - + + + + + diff --git a/tests/testthat/_snaps/6-plots/plot-petpeese6-1.svg b/tests/testthat/_snaps/6-plots/plot-petpeese6-1.svg index 2d793a01..abe099d4 100644 --- a/tests/testthat/_snaps/6-plots/plot-petpeese6-1.svg +++ b/tests/testthat/_snaps/6-plots/plot-petpeese6-1.svg @@ -33,15 +33,21 @@ 0.20 0.25 0.30 - + - - - + + + + + + 0.00 -0.05 -0.10 -0.15 +0.05 +0.10 +0.15 +0.20 +0.25 +0.30 PET-PEESE Standard error Effect size @@ -54,10 +60,10 @@ - - - - - + + + + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-con-1-14.svg b/tests/testthat/_snaps/6-plots/plot-reg-con-1-14.svg new file mode 100644 index 00000000..15c1a283 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-con-1-14.svg @@ -0,0 +1,84 @@ + + + + + + + + + + + + +mod_con + +( +Cohen's + +d +) +Density + + + + + + + +-0.1 +0.0 +0.1 +0.2 +0.3 +0.4 + + + + + + + + +0 +0.5 +1 +1.5 +2 +2.5 +3 + + + + + + + + +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +Probability + + + + + + + + + + + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-con-2-14.svg b/tests/testthat/_snaps/6-plots/plot-reg-con-2-14.svg new file mode 100644 index 00000000..8c0976f3 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-con-2-14.svg @@ -0,0 +1,89 @@ + + + + + + + + + + + + +mod_con + +( +Cohen's + +d +) +Density + + + + + + + + +-0.6 +-0.4 +-0.2 +0.0 +0.2 +0.4 +0.6 + + + + + + + + +0 +0.5 +1 +1.5 +2 +2.5 +3 + + + + + + + + +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +Probability + + + + + + + + + + + + + + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-con-3-14.svg b/tests/testthat/_snaps/6-plots/plot-reg-con-3-14.svg new file mode 100644 index 00000000..6eb2135a --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-con-3-14.svg @@ -0,0 +1,64 @@ + + + + + + + + + + + + + + + + + + + +-0.1 +0.0 +0.1 +0.2 +0.3 +0.4 + + + + + + + +0 +1 +2 +3 +4 +5 +mod_con + +( +Cohen's + +d +) +Density + + + + + + + + + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-con-4-14.svg b/tests/testthat/_snaps/6-plots/plot-reg-con-4-14.svg new file mode 100644 index 00000000..6fe4d1a1 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-con-4-14.svg @@ -0,0 +1,67 @@ + + + + + + + + + + + + + + + + + + + + +-0.6 +-0.4 +-0.2 +0.0 +0.2 +0.4 +0.6 + + + + + + + +0 +1 +2 +3 +4 +5 +mod_con + +( +Cohen's + +d +) +Density + + + + + + + + + + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-con-5-14.svg b/tests/testthat/_snaps/6-plots/plot-reg-con-5-14.svg new file mode 100644 index 00000000..d53899ca --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-con-5-14.svg @@ -0,0 +1,81 @@ + + + + + + + + + + + + +mod_con + +( +log +( +OR +) +) +Density + + + + + + + +-0.2 +0.0 +0.2 +0.4 +0.6 +0.8 + + + + + + +0 +0.5 +1 +1.5 +2 + + + + + + + + +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +Probability + + + + + + + + + + + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-con-6-14.svg b/tests/testthat/_snaps/6-plots/plot-reg-con-6-14.svg new file mode 100644 index 00000000..bddc772c --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-con-6-14.svg @@ -0,0 +1,87 @@ + + + + + + + + + + + + +mod_con + +( +ρ +) +Density + + + + + + + + +-0.3 +-0.2 +-0.1 +0.0 +0.1 +0.2 +0.3 + + + + + + + + +0 +1 +2 +3 +4 +5 +6 + + + + + + + + +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +Probability + + + + + + + + + + + + + + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-con-alt-1-15.svg b/tests/testthat/_snaps/6-plots/plot-reg-con-alt-1-15.svg new file mode 100644 index 00000000..ea0f6bc5 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-con-alt-1-15.svg @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + +0.85 +0.90 +0.95 +1.00 + + + + + +0 +5 +10 +15 +mod_con + +( +Cohen's + +d +) +Density + + + + + + + + + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-con-alt-2-15.svg b/tests/testthat/_snaps/6-plots/plot-reg-con-alt-2-15.svg new file mode 100644 index 00000000..dcce17e5 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-con-alt-2-15.svg @@ -0,0 +1,61 @@ + + + + + + + + + + + + + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 + + + + + +0 +5 +10 +15 +mod_con + +( +Cohen's + +d +) +Density + + + + + + + + + + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-con-alt-3-15.svg b/tests/testthat/_snaps/6-plots/plot-reg-con-alt-3-15.svg new file mode 100644 index 00000000..ea0f6bc5 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-con-alt-3-15.svg @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + +0.85 +0.90 +0.95 +1.00 + + + + + +0 +5 +10 +15 +mod_con + +( +Cohen's + +d +) +Density + + + + + + + + + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-con-alt-4-15.svg b/tests/testthat/_snaps/6-plots/plot-reg-con-alt-4-15.svg new file mode 100644 index 00000000..8589e5bb --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-con-alt-4-15.svg @@ -0,0 +1,61 @@ + + + + + + + + + + + + + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 + + + + + +0 +5 +10 +15 +mod_con + +( +Cohen's + +d +) +Density + + + + + + + + + + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-con-alt-5-15.svg b/tests/testthat/_snaps/6-plots/plot-reg-con-alt-5-15.svg new file mode 100644 index 00000000..af58d6b0 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-con-alt-5-15.svg @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + +1.5 +1.6 +1.7 +1.8 + + + + + + +0 +2 +4 +6 +8 +mod_con + +( +log +( +OR +) +) +Density + + + + + + + + + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-con-alt-6-15.svg b/tests/testthat/_snaps/6-plots/plot-reg-con-alt-6-15.svg new file mode 100644 index 00000000..212d8346 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-con-alt-6-15.svg @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + +0.0 +0.1 +0.2 +0.3 +0.4 + + + + + + +0 +10 +20 +30 +40 +mod_con + +( +ρ +) +Density + + + + + + + + + + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-fac-1-14.svg b/tests/testthat/_snaps/6-plots/plot-reg-fac-1-14.svg new file mode 100644 index 00000000..73904f3f --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-fac-1-14.svg @@ -0,0 +1,66 @@ + + + + + + + + + + + + + + + + + + +-1.0 +-0.5 +0.0 +0.5 +1.0 + + + + + +0 +1 +2 +3 +mod_cat + +( +Cohen's + +d +) +Density + + + + + + + + + + + + + + A + B + C + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-fac-2-14.svg b/tests/testthat/_snaps/6-plots/plot-reg-fac-2-14.svg new file mode 100644 index 00000000..2d847914 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-fac-2-14.svg @@ -0,0 +1,85 @@ + + + + + + + + + + + + +mod_cat + +( +Cohen's + +d +) +Density + + + + + + +-1.0 +-0.5 +0.0 +0.5 +1.0 + + + + + + +0 +1 +2 +3 +4 + + + + + + + +0 +0.1 +0.2 +0.3 +0.4 +0.5 +Probability + + + + + + + + + + + + + + + + + A + B + C + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-fac-3-14.svg b/tests/testthat/_snaps/6-plots/plot-reg-fac-3-14.svg new file mode 100644 index 00000000..73904f3f --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-fac-3-14.svg @@ -0,0 +1,66 @@ + + + + + + + + + + + + + + + + + + +-1.0 +-0.5 +0.0 +0.5 +1.0 + + + + + +0 +1 +2 +3 +mod_cat + +( +Cohen's + +d +) +Density + + + + + + + + + + + + + + A + B + C + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-fac-4-14.svg b/tests/testthat/_snaps/6-plots/plot-reg-fac-4-14.svg new file mode 100644 index 00000000..3d203778 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-fac-4-14.svg @@ -0,0 +1,67 @@ + + + + + + + + + + + + + + + + + + +-1.0 +-0.5 +0.0 +0.5 +1.0 + + + + + +0 +1 +2 +3 +mod_cat + +( +Cohen's + +d +) +Density + + + + + + + + + + + + + + + A + B + C + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-fac-5-14.svg b/tests/testthat/_snaps/6-plots/plot-reg-fac-5-14.svg new file mode 100644 index 00000000..aa4346dd --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-fac-5-14.svg @@ -0,0 +1,69 @@ + + + + + + + + + + + + + + + + + + +-2 +-1 +0 +1 +2 + + + + + + +0.0 +0.5 +1.0 +1.5 +2.0 +mod_cat + +( +log +( +OR +) +) +Density + + + + + + + + + + + + + + A + B + C + + diff --git a/tests/testthat/_snaps/6-plots/plot-reg-fac-6-14.svg b/tests/testthat/_snaps/6-plots/plot-reg-fac-6-14.svg new file mode 100644 index 00000000..1f0808c0 --- /dev/null +++ b/tests/testthat/_snaps/6-plots/plot-reg-fac-6-14.svg @@ -0,0 +1,83 @@ + + + + + + + + + + + + +mod_cat + +( +ρ +) +Density + + + + + + +-0.4 +-0.2 +0.0 +0.2 +0.4 + + + + + + +0 +2 +4 +6 +8 + + + + + + + +0 +0.1 +0.2 +0.3 +0.4 +0.5 +Probability + + + + + + + + + + + + + + + + + A + B + C + + diff --git a/tests/testthat/_snaps/6-plots/plot-tau5-1.svg b/tests/testthat/_snaps/6-plots/plot-tau5-1.svg index e7062c8d..236d3576 100644 --- a/tests/testthat/_snaps/6-plots/plot-tau5-1.svg +++ b/tests/testthat/_snaps/6-plots/plot-tau5-1.svg @@ -67,7 +67,7 @@ - + diff --git a/tests/testthat/_snaps/6-plots/plot-tau6-1.svg b/tests/testthat/_snaps/6-plots/plot-tau6-1.svg index 2514f930..50c37451 100644 --- a/tests/testthat/_snaps/6-plots/plot-tau6-1.svg +++ b/tests/testthat/_snaps/6-plots/plot-tau6-1.svg @@ -81,7 +81,7 @@ - + diff --git a/tests/testthat/_snaps/7-diagnostics/ggplot-autocorrelation-mu.svg b/tests/testthat/_snaps/7-diagnostics/ggplot-autocorrelation-mu.svg index d349e19b..58c791b5 100644 --- a/tests/testthat/_snaps/7-diagnostics/ggplot-autocorrelation-mu.svg +++ b/tests/testthat/_snaps/7-diagnostics/ggplot-autocorrelation-mu.svg @@ -21,73 +21,159 @@ - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -0.00 -0.25 -0.50 -0.75 -1.00 - - - - - - - - - -0 -10 -20 -30 -Lag -Avg. autocorrelation -Model - -36 -: - -Cohen's - -d +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 + + + + + + + + + + + + + +0 +5 +10 +15 +20 +25 +30 +Lag +Autocorrelation(mu) +Model 36 diff --git a/tests/testthat/_snaps/7-diagnostics/ggplot-chains-mu.svg b/tests/testthat/_snaps/7-diagnostics/ggplot-chains-mu.svg index 6688e6f8..05c1cbb0 100644 --- a/tests/testthat/_snaps/7-diagnostics/ggplot-chains-mu.svg +++ b/tests/testthat/_snaps/7-diagnostics/ggplot-chains-mu.svg @@ -21,49 +21,69 @@ - - + + - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + --2 --1 -0 -1 -2 - - - - - - - - -0 -2000 -4000 -Iterations -Cohen's - -d - -chain - - - - - - -1 -2 -3 -Model 36 +-3 +-2 +-1 +0 +1 +2 +3 + + + + + + + + + + + + + +0 +1000 +2000 +3000 +4000 +5000 +Iteration +mu +Model 36 diff --git a/tests/testthat/_snaps/7-diagnostics/ggplot-chains-omega-1.svg b/tests/testthat/_snaps/7-diagnostics/ggplot-chains-omega-1.svg index b93950c6..1f6660ab 100644 --- a/tests/testthat/_snaps/7-diagnostics/ggplot-chains-omega-1.svg +++ b/tests/testthat/_snaps/7-diagnostics/ggplot-chains-omega-1.svg @@ -21,50 +21,65 @@ - - + + - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + -0.2 -0.4 -0.6 -0.8 -1 - - - - - - - - -0 -2000 -4000 -Iterations - -ω - -[0.025,0.05] - -chain - - - - - - -1 -2 -3 -Model 34 +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 + + + + + + + + + + + + +0 +1000 +2000 +3000 +4000 +5000 +Iteration +omega[0.025,0.05] +Model 34 diff --git a/tests/testthat/_snaps/7-diagnostics/ggplot-chains-omega-2.svg b/tests/testthat/_snaps/7-diagnostics/ggplot-chains-omega-2.svg index 71f5b6b8..2a33fd57 100644 --- a/tests/testthat/_snaps/7-diagnostics/ggplot-chains-omega-2.svg +++ b/tests/testthat/_snaps/7-diagnostics/ggplot-chains-omega-2.svg @@ -21,52 +21,65 @@ - - + + - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + -0 -0.2 -0.4 -0.6 -0.8 -1 - - - - - - - - - -0 -2000 -4000 -Iterations - -ω - -[0.05,0.5] - -chain - - - - - - -1 -2 -3 -Model 34 +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 + + + + + + + + + + + + +0 +1000 +2000 +3000 +4000 +5000 +Iteration +omega[0.05,0.5] +Model 34 diff --git a/tests/testthat/_snaps/7-diagnostics/ggplot-chains-omega-3.svg b/tests/testthat/_snaps/7-diagnostics/ggplot-chains-omega-3.svg index 6957d3d2..d5aebc7e 100644 --- a/tests/testthat/_snaps/7-diagnostics/ggplot-chains-omega-3.svg +++ b/tests/testthat/_snaps/7-diagnostics/ggplot-chains-omega-3.svg @@ -21,52 +21,65 @@ - - + + - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + -0 -0.2 -0.4 -0.6 -0.8 -1 - - - - - - - - - -0 -2000 -4000 -Iterations - -ω - -[0.5,1] - -chain - - - - - - -1 -2 -3 -Model 34 +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 + + + + + + + + + + + + +0 +1000 +2000 +3000 +4000 +5000 +Iteration +omega[0.5,1] +Model 34 diff --git a/tests/testthat/_snaps/7-diagnostics/ggplot-chains-peese.svg b/tests/testthat/_snaps/7-diagnostics/ggplot-chains-peese.svg index c1f56187..aa6d4210 100644 --- a/tests/testthat/_snaps/7-diagnostics/ggplot-chains-peese.svg +++ b/tests/testthat/_snaps/7-diagnostics/ggplot-chains-peese.svg @@ -21,59 +21,73 @@ - - + + - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -0 -2 -4 -6 -8 -10 -12 -14 - - - - - - - - - - - -0 -2000 -4000 -Iterations -PEESE - -( -Cohen's - -d -) - -chain - - - - - - -1 -2 -3 -Model 36 +0 +2 +4 +6 +8 +10 +12 +14 + + + + + + + + + + + + + + +0 +1000 +2000 +3000 +4000 +5000 +Iteration +PEESE +Model 36 diff --git a/tests/testthat/_snaps/7-diagnostics/ggplot-chains-pet.svg b/tests/testthat/_snaps/7-diagnostics/ggplot-chains-pet.svg index 8232ee98..5773f407 100644 --- a/tests/testthat/_snaps/7-diagnostics/ggplot-chains-pet.svg +++ b/tests/testthat/_snaps/7-diagnostics/ggplot-chains-pet.svg @@ -21,49 +21,69 @@ - - + + - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -0 -1 -2 -3 -4 -5 - - - - - - - - - -0 -2000 -4000 -Iterations -PET - -chain - - - - - - -1 -2 -3 -Model 35 +0 +1 +2 +3 +4 +5 +6 + + + + + + + + + + + + + +0 +1000 +2000 +3000 +4000 +5000 +Iteration +PET +Model 35 diff --git a/tests/testthat/_snaps/7-diagnostics/ggplot-chains-tau.svg b/tests/testthat/_snaps/7-diagnostics/ggplot-chains-tau.svg index a7b78a38..a0a1d531 100644 --- a/tests/testthat/_snaps/7-diagnostics/ggplot-chains-tau.svg +++ b/tests/testthat/_snaps/7-diagnostics/ggplot-chains-tau.svg @@ -18,60 +18,76 @@ - + - - + + - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -0 -1 -2 -3 -4 -5 -6 - - - - - - - - - - -0 -2000 -4000 -Iterations -τ - -( -Cohen's - -d -) - -chain - - - - - - -1 -2 -3 -Model 36 +0 +1 +2 +3 +4 +5 +6 +7 + + + + + + + + + + + + + + +0 +1000 +2000 +3000 +4000 +5000 +Iteration +tau +Model 36 diff --git a/tests/testthat/_snaps/7-diagnostics/ggplot-densities-mu.svg b/tests/testthat/_snaps/7-diagnostics/ggplot-densities-mu.svg index 3649a7b5..96695592 100644 --- a/tests/testthat/_snaps/7-diagnostics/ggplot-densities-mu.svg +++ b/tests/testthat/_snaps/7-diagnostics/ggplot-densities-mu.svg @@ -21,66 +21,77 @@ - - + + - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -0 -0.2 -0.4 -0.6 -0.8 -1 -1.2 -1.4 - - - - - - - - - - - - - - - --3 --2 --1 -0 -1 -2 -3 -Cohen's - -d -Density - -chain - - - - - - -1 -2 -3 -Model 36 +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +1.2 +1.4 + + + + + + + + + + + + + + + +-3 +-2 +-1 +0 +1 +2 +3 +mu +Density +Model 36 diff --git a/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-mu.svg b/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-mu.svg index 88d26c9a..4ae50089 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-mu.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-mu.svg @@ -18,74 +18,137 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -11 -13 -15 -17 -19 -21 -23 -25 -27 -29 - - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 -1.0 -Lag -Avg. autocorrelation -Cohen's - -d + + + + + + + + +0 +5 +10 +15 +20 +25 +30 + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +Normal +(0, 1) +Lag +Autocorrelation(mu) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-omega-2.svg b/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-omega-2.svg index e2251ae4..2522eae5 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-omega-2.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-omega-2.svg @@ -18,75 +18,145 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -11 -13 -15 -17 -19 -21 -23 -25 -27 -29 - - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 -1.0 -Lag -Avg. autocorrelation - -ω - -[0.05,1] + + + + + + + + +0 +5 +10 +15 +20 +25 +30 + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +ω +two-sided: +.05 + +~ + +CumDirichlet +( +1, 1 +) +Lag +Autocorrelation(omega[0.05,1]) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-omega.svg b/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-omega.svg index 9841e800..b9854637 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-omega.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-omega.svg @@ -18,6 +18,34 @@ + + + + + + + + +0 +5 +10 +15 +20 +25 +30 + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 @@ -25,89 +53,155 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +ω +one-sided: +.5, .05, .025 + +~ + +CumDirichlet +( +1, 1, 1, 1 +) +Lag +Autocorrelation(omega[0.025,0.05]) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 -11 -12 -13 -14 -15 -16 -17 -18 -19 -20 -21 -22 -23 -24 -25 -26 -27 -28 -29 -30 - - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 -1.0 -Lag -Avg. autocorrelation - -ω - -[0.025,0.05] + + + + + + + + +0 +5 +10 +15 +20 +25 +30 + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 @@ -115,89 +209,150 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +ω +one-sided: +.5, .05, .025 + +~ + +CumDirichlet +( +1, 1, 1, 1 +) +Lag +Autocorrelation(omega[0.05,0.5]) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 -11 -12 -13 -14 -15 -16 -17 -18 -19 -20 -21 -22 -23 -24 -25 -26 -27 -28 -29 -30 - - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 -1.0 -Lag -Avg. autocorrelation - -ω - -[0.05,0.5] + + + + + + + + +0 +5 +10 +15 +20 +25 +30 + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 @@ -205,88 +360,112 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +ω +one-sided: +.5, .05, .025 + +~ + +CumDirichlet +( +1, 1, 1, 1 +) +Lag +Autocorrelation(omega[0.5,1]) - -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 -11 -12 -13 -14 -15 -16 -17 -18 -19 -20 -21 -22 -23 -24 -25 -26 -27 -28 -29 -30 - - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 -1.0 -Lag -Avg. autocorrelation - -ω - -[0.5,1] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-peese.svg b/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-peese.svg index a685306f..70724eb1 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-peese.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-peese.svg @@ -18,78 +18,143 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -11 -13 -15 -17 -19 -21 -23 -25 -27 -29 - - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 -1.0 -Lag -Avg. autocorrelation -PEESE - -( -Cohen's - -d -) + + + + + + + + +0 +5 +10 +15 +20 +25 +30 + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +PEESE ~ +Cauchy +(0, 5) +[ +0 +, + +] +Lag +Autocorrelation(PEESE) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-pet.svg b/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-pet.svg index 64a83833..def13083 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-pet.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-pet.svg @@ -18,72 +18,143 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -11 -13 -15 -17 -19 -21 -23 -25 -27 -29 - - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 -1.0 -Lag -Avg. autocorrelation -PET + + + + + + + + +0 +5 +10 +15 +20 +25 +30 + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +PET ~ +Cauchy +(0, 1) +[ +0 +, + +] +Lag +Autocorrelation(PET) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-tau.svg b/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-tau.svg index 8c1b2b73..cbb0a162 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-tau.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-autocorrelation-tau.svg @@ -18,78 +18,137 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -11 -13 -15 -17 -19 -21 -23 -25 -27 -29 - - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 -1.0 -Lag -Avg. autocorrelation -τ - -( -Cohen's - -d -) + + + + + + + + +0 +5 +10 +15 +20 +25 +30 + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +InvGamma +(1, 0.15) +Lag +Autocorrelation(tau) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-chains-mu.svg b/tests/testthat/_snaps/7-diagnostics/plot-chains-mu.svg index fa7b0a7e..70e002aa 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-chains-mu.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-chains-mu.svg @@ -18,44 +18,43 @@ - - - - - - - -0 -1000 -2000 -3000 -4000 -5000 - - - - - - --2 --1 -0 -1 -2 + + + + + + + +0 +1000 +2000 +3000 +4000 +5000 + + + + + + +-2 +-1 +0 +1 +2 +Normal +(0, 1) +Iteration +mu - - + + - - - - - - -Cohen's - -d + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-chains-omega-2.svg b/tests/testthat/_snaps/7-diagnostics/plot-chains-omega-2.svg index 36e497e4..2a1a4479 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-chains-omega-2.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-chains-omega-2.svg @@ -18,47 +18,53 @@ - - - - - - - -0 -1000 -2000 -3000 -4000 -5000 - - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 -1.0 + + + + + + + +0 +1000 +2000 +3000 +4000 +5000 + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +ω +two-sided: +.05 + +~ + +CumDirichlet +( +1, 1 +) +Iteration +omega[0.05,1] - - + + - - - - - - - -ω - -[0.05,1] + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-chains-omega.svg b/tests/testthat/_snaps/7-diagnostics/plot-chains-omega.svg index 3b53ee56..02432999 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-chains-omega.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-chains-omega.svg @@ -18,32 +18,32 @@ - - - - - - - -0 -1000 -2000 -3000 -4000 -5000 - - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 -1.0 + + + + + + + +0 +1000 +2000 +3000 +4000 +5000 + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 @@ -51,57 +51,63 @@ +ω +one-sided: +.5, .05, .025 + +~ + +CumDirichlet +( +1, 1, 1, 1 +) +Iteration +omega[0.025,0.05] - - + + - - - - - - - -ω - -[0.025,0.05] + + + + - - + + - + - - - - - - - -0 -1000 -2000 -3000 -4000 -5000 - - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 -1.0 + + + + + + + +0 +1000 +2000 +3000 +4000 +5000 + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 @@ -109,50 +115,56 @@ +ω +one-sided: +.5, .05, .025 + +~ + +CumDirichlet +( +1, 1, 1, 1 +) +Iteration +omega[0.05,0.5] - - - - - - - -ω - -[0.05,0.5] + + + + - - + + - + - - - - - - - -0 -1000 -2000 -3000 -4000 -5000 - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 + + + + + + + +0 +1000 +2000 +3000 +4000 +5000 + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 @@ -160,16 +172,22 @@ +ω +one-sided: +.5, .05, .025 + +~ + +CumDirichlet +( +1, 1, 1, 1 +) +Iteration +omega[0.5,1] - - - - - - - -ω - -[0.5,1] + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-chains-peese.svg b/tests/testthat/_snaps/7-diagnostics/plot-chains-peese.svg index 5f67733e..04e63683 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-chains-peese.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-chains-peese.svg @@ -18,54 +18,55 @@ - - - - - - - -0 -1000 -2000 -3000 -4000 -5000 - - - - - - - - - -0 -2 -4 -6 -8 -10 -12 -14 + + + + + + + +0 +1000 +2000 +3000 +4000 +5000 + + + + + + + + + +0 +2 +4 +6 +8 +10 +12 +14 +PEESE ~ +Cauchy +(0, 5) +[ +0 +, + +] +Iteration +PEESE - - + + - - - - - - -PEESE - -( -Cohen's - -d -) + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-chains-pet.svg b/tests/testthat/_snaps/7-diagnostics/plot-chains-pet.svg index c5121bcc..767fcd18 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-chains-pet.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-chains-pet.svg @@ -18,46 +18,53 @@ - - - - - - - -0 -1000 -2000 -3000 -4000 -5000 - - - - - - - - -0.0 -0.5 -1.0 -1.5 -2.0 -2.5 -3.0 + + + + + + + +0 +1000 +2000 +3000 +4000 +5000 + + + + + + + + +0.0 +0.5 +1.0 +1.5 +2.0 +2.5 +3.0 +PET ~ +Cauchy +(0, 1) +[ +0 +, + +] +Iteration +PET - - + + - - - - - - -PET + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-chains-reg-mod-con.svg b/tests/testthat/_snaps/7-diagnostics/plot-chains-reg-mod-con.svg new file mode 100644 index 00000000..ea3f1516 --- /dev/null +++ b/tests/testthat/_snaps/7-diagnostics/plot-chains-reg-mod-con.svg @@ -0,0 +1,81 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.80 +0.85 +0.90 +0.95 +1.00 +1.05 + + + + + + + + + + + +0 +10000 +20000 +30000 +40000 +Iteration +mod_con +Model 6 + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-chains-reg-mu.svg b/tests/testthat/_snaps/7-diagnostics/plot-chains-reg-mu.svg new file mode 100644 index 00000000..31a8d466 --- /dev/null +++ b/tests/testthat/_snaps/7-diagnostics/plot-chains-reg-mu.svg @@ -0,0 +1,89 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +-1.2 +-1.0 +-0.8 +-0.6 +-0.4 +-0.2 +0.0 +0.2 + + + + + + + + + + + + + +0 +10000 +20000 +30000 +40000 +Iteration +intercept +Model 6 + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-chains-reg-omega.svg b/tests/testthat/_snaps/7-diagnostics/plot-chains-reg-omega.svg new file mode 100644 index 00000000..726dd5eb --- /dev/null +++ b/tests/testthat/_snaps/7-diagnostics/plot-chains-reg-omega.svg @@ -0,0 +1,85 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 + + + + + + + + + + + + +0 +1000 +2000 +3000 +4000 +5000 +Iteration +omega[0.05,1] +Model 5 + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-chains-reg-pet.svg b/tests/testthat/_snaps/7-diagnostics/plot-chains-reg-pet.svg new file mode 100644 index 00000000..0668258a --- /dev/null +++ b/tests/testthat/_snaps/7-diagnostics/plot-chains-reg-pet.svg @@ -0,0 +1,89 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +2 +4 +6 +8 +10 +12 +14 + + + + + + + + + + + + + +0 +10000 +20000 +30000 +40000 +Iteration +PET +Model 6 + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-chains-tau.svg b/tests/testthat/_snaps/7-diagnostics/plot-chains-tau.svg index 999f5cf1..7b6bdf7d 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-chains-tau.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-chains-tau.svg @@ -18,50 +18,45 @@ - - - - - - - -0 -1000 -2000 -3000 -4000 -5000 - - - - - - - -0 -1 -2 -3 -4 -5 + + + + + + + +0 +1000 +2000 +3000 +4000 +5000 + + + + + + + +0 +1 +2 +3 +4 +5 +InvGamma +(1, 0.15) +Iteration +tau - - + + - - - - - - -τ - -( -Cohen's - -d -) + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-densities-mu.svg b/tests/testthat/_snaps/7-diagnostics/plot-densities-mu.svg index c071ccac..c59bea5b 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-densities-mu.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-densities-mu.svg @@ -18,52 +18,45 @@ - - - - - - - --2 --1 -0 -1 -2 -3 - - - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 -1.0 -1.2 + + + + + + +-2 +-1 +0 +1 +2 + + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +1.2 +Normal +(0, 1) +mu +Density - - + + - - - - - - - - - -Density -Cohen's - -d + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-densities-omega-2.svg b/tests/testthat/_snaps/7-diagnostics/plot-densities-omega-2.svg index b4cefe8b..30e734fd 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-densities-omega-2.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-densities-omega-2.svg @@ -18,47 +18,49 @@ - - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 -1.0 - - - - - -0.0 -0.5 -1.0 -1.5 + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 + + + + + +0.0 +0.5 +1.0 +1.5 +ω +two-sided: +.05 + +~ + +CumDirichlet +( +1, 1 +) +omega[0.05,1] +Density - - + + - - - - - - - - - -Density - -ω - -[0.05,1] + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-densities-omega.svg b/tests/testthat/_snaps/7-diagnostics/plot-densities-omega.svg index 8c2d0d28..62f63c2f 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-densities-omega.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-densities-omega.svg @@ -18,30 +18,30 @@ - - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 -1.0 - - - - - - -0.0 -0.5 -1.0 -1.5 -2.0 + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 + + + + + + +0.0 +0.5 +1.0 +1.5 +2.0 @@ -49,57 +49,59 @@ +ω +one-sided: +.5, .05, .025 + +~ + +CumDirichlet +( +1, 1, 1, 1 +) +omega[0.025,0.05] +Density - - + + - - - - - - - - - -Density - -ω - -[0.025,0.05] + + + + - - + + - + - - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 -1.0 - - - - - -0.0 -0.5 -1.0 -1.5 + + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 + + + + + +0.0 +0.5 +1.0 +1.5 @@ -107,56 +109,56 @@ +ω +one-sided: +.5, .05, .025 + +~ + +CumDirichlet +( +1, 1, 1, 1 +) +omega[0.05,0.5] +Density - - - - - - - - - -Density - -ω - -[0.05,0.5] + + + + - - + + - + - - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 -1.0 - - - - - - - -0 -1 -2 -3 -4 -5 + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 + + + + + + + +0 +1 +2 +3 +4 +5 @@ -164,20 +166,22 @@ +ω +one-sided: +.5, .05, .025 + +~ + +CumDirichlet +( +1, 1, 1, 1 +) +omega[0.5,1] +Density - - - - - - - - - -Density - -ω - -[0.5,1] + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-densities-peese.svg b/tests/testthat/_snaps/7-diagnostics/plot-densities-peese.svg index a085acaa..5098466f 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-densities-peese.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-densities-peese.svg @@ -18,56 +18,53 @@ - - - - - - - - - -0 -2 -4 -6 -8 -10 -12 -14 - - - - - - -0.0 -0.1 -0.2 -0.3 -0.4 + + + + + + + + + +0 +2 +4 +6 +8 +10 +12 +14 + + + + + + +0.0 +0.1 +0.2 +0.3 +0.4 +PEESE ~ +Cauchy +(0, 5) +[ +0 +, + +] +PEESE +Density - - + + - - - - - - - - - -Density -PEESE - -( -Cohen's - -d -) + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-densities-pet.svg b/tests/testthat/_snaps/7-diagnostics/plot-densities-pet.svg index 6a3eedab..87c94b79 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-densities-pet.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-densities-pet.svg @@ -18,50 +18,51 @@ - - - - - - - - - -0.0 -0.5 -1.0 -1.5 -2.0 -2.5 -3.0 -3.5 - - - - - - -0.0 -0.2 -0.4 -0.6 -0.8 + + + + + + + + +0.0 +0.5 +1.0 +1.5 +2.0 +2.5 +3.0 + + + + + + +0.0 +0.2 +0.4 +0.6 +0.8 +PET ~ +Cauchy +(0, 1) +[ +0 +, + +] +PET +Density - - + + - - - - - - - - - -Density -PET + + + + diff --git a/tests/testthat/_snaps/7-diagnostics/plot-densities-tau.svg b/tests/testthat/_snaps/7-diagnostics/plot-densities-tau.svg index 9d346ef8..1ebdad53 100644 --- a/tests/testthat/_snaps/7-diagnostics/plot-densities-tau.svg +++ b/tests/testthat/_snaps/7-diagnostics/plot-densities-tau.svg @@ -18,56 +18,47 @@ - - - - - - - -0 -1 -2 -3 -4 -5 - - - - - - - - -0.0 -0.5 -1.0 -1.5 -2.0 -2.5 -3.0 + + + + + + + +0 +1 +2 +3 +4 +5 + + + + + + + + +0.0 +0.5 +1.0 +1.5 +2.0 +2.5 +3.0 +InvGamma +(1, 0.15) +tau +Density - - + + - - - - - - - - - -Density -τ - -( -Cohen's - -d -) + + + + diff --git a/tests/testthat/test-0-CRAN.R b/tests/testthat/test-0-CRAN.R index 813f7875..fce98a2d 100644 --- a/tests/testthat/test-0-CRAN.R +++ b/tests/testthat/test-0-CRAN.R @@ -61,4 +61,60 @@ test_that("Basic functionality works", { "\033[0;31mModel (6): ESS 28 is lower than the set target (500).\033[0m" , "\033[0;31mThere were another 29 warnings. To see all warnings call 'check_RoBMA(fit)'.\033[0m") ) + + + df_reg <- data.frame( + d = d, + se = se_d(d, n), + mod_con = scale(1:3), + mod_cat = c("A", "A", "B") + ) + + fit_reg <- suppressWarnings(RoBMA.reg(~ mod_cat + mod_con, data = df_reg, priors_bias = NULL, chains = 1, burnin = 50, sample = 100, autofit = FALSE, seed = 1)) + + expect_equal( + capture_output_lines(summary(fit_reg), print = TRUE, width = 150), + c("Call:" , + "RoBMA.reg(formula = ~mod_cat + mod_con, data = df_reg, priors_bias = NULL, " , + " chains = 1, sample = 100, burnin = 50, autofit = FALSE, seed = 1)" , + "" , + "Robust Bayesian meta-analysis" , + "Components summary:" , + " Models Prior prob. Post. prob. Inclusion BF" , + "Effect 8/16 0.500 0.267 0.364" , + "Heterogeneity 8/16 0.500 0.401 0.671" , + "Bias 0/16 0.000 0.000 0.000" , + "" , + "Meta-regression components summary:" , + " Models Prior prob. Post. prob. Inclusion BF" , + "mod_cat 8/16 0.500 0.470 0.886" , + "mod_con 8/16 0.500 0.446 0.805" , + "" , + "Model-averaged estimates:" , + " Mean Median 0.025 0.975" , + "mu 0.048 0.000 -0.300 0.667" , + "tau 0.094 0.000 0.000 0.581" , + "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale).", + "\033[0;31mModel (2): ESS 44 is lower than the set target (500).\033[0m" , + "\033[0;31mModel (3): ESS 45 is lower than the set target (500).\033[0m" , + "\033[0;31mModel (4): ESS 58 is lower than the set target (500).\033[0m" , + "\033[0;31mModel (5): ESS 4 is lower than the set target (500).\033[0m" , + "\033[0;31mModel (6): ESS 6 is lower than the set target (500).\033[0m" , + "\033[0;31mThere were another 9 warnings. To see all warnings call 'check_RoBMA(fit)'.\033[0m" , + "" , + "Model-averaged meta-regression estimates:" , + " Mean Median 0.025 0.975" , + "intercept 0.048 0.000 -0.300 0.667" , + "mod_cat [dif: A] 0.000 0.000 -0.357 0.459" , + "mod_cat [dif: B] 0.000 0.000 -0.459 0.357" , + "mod_con 0.005 0.000 -0.341 0.343" , + "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale).", + "\033[0;31mModel (2): ESS 44 is lower than the set target (500).\033[0m" , + "\033[0;31mModel (3): ESS 45 is lower than the set target (500).\033[0m" , + "\033[0;31mModel (4): ESS 58 is lower than the set target (500).\033[0m" , + "\033[0;31mModel (5): ESS 4 is lower than the set target (500).\033[0m" , + "\033[0;31mModel (6): ESS 6 is lower than the set target (500).\033[0m" , + "\033[0;31mThere were another 9 warnings. To see all warnings call 'check_RoBMA(fit)'.\033[0m" + )) + }) diff --git a/tests/testthat/test-2-distributions.R b/tests/testthat/test-2-distributions.R index 683322be..45fce366 100644 --- a/tests/testthat/test-2-distributions.R +++ b/tests/testthat/test-2-distributions.R @@ -98,7 +98,7 @@ test_that("Density function works", { }) test_that("Random number generator function works", { - + skip_on_os("linux") set.seed(1) # verify (visually) no-weights against standard normal diff --git a/tests/testthat/test-3-setup.R b/tests/testthat/test-3-setup.R index 0d2b8509..983bea28 100644 --- a/tests/testthat/test-3-setup.R +++ b/tests/testthat/test-3-setup.R @@ -163,6 +163,221 @@ test_that("Model preview works", { ) }) +test_that("RoBMA.reg model preview works", { + + # also test for model generation as it calls RoBMA.reg function within + + df_reg <- data.frame( + d = c(rep(-1, 5), rep(0, 5), rep(1, 5)), + se = rep(0.1, 15), + mod_cat = c(rep("A", 5), rep("B", 5), rep("C", 5)), + mod_con = c((1:15)/15) + ) + + expect_equal( + capture_output_lines(check_setup.reg(~ mod_cat + mod_con, data = df_reg, models = FALSE), print = TRUE, width = 150), + c("Robust Bayesian meta-regression (set-up)", + "Components summary:" , + " Models Prior prob." , + "Effect 72/144 0.500" , + "Heterogeneity 72/144 0.500" , + "Bias 128/144 0.500" , + "" , + "Meta-regression components summary:" , + " Models Prior prob." , + "mod_cat 72/144 0.500" , + "mod_con 72/144 0.500" + ) + ) + + expect_equal( + capture_output_lines(check_setup.reg(~ 1, data = df_reg, models = FALSE), print = TRUE, width = 150), + c("Robust Bayesian meta-regression (set-up)", + "Components summary:" , + " Models Prior prob." , + "Effect 18/36 0.500" , + "Heterogeneity 18/36 0.500" , + "Bias 32/36 0.500" , + "" , + "Meta-regression components summary:" , + "[1] Models Prior prob." , + "<0 rows> (or 0-length row.names)" + )) + + expect_equal( + capture_output_lines(check_setup.reg(~ mod_cat + mod_con, data = df_reg, models = TRUE, priors_bias = NULL, priors_heterogeneity = NULL), print = TRUE, width = 150), + c("Robust Bayesian meta-regression (set-up)" , + "Components summary:" , + " Models Prior prob." , + "Effect 4/8 0.500" , + "Heterogeneity 0/8 0.000" , + "Bias 0/8 0.000" , + "" , + "Meta-regression components summary:" , + " Models Prior prob." , + "mod_cat 4/8 0.500" , + "mod_con 4/8 0.500" , + "" , + "Models overview:" , + " Model Prior mod_cat Prior mod_cat Prior mod_cat Prior Heterogeneity Prior Bias Prior prob.", + " 1 Spike(0) Spike(0) Spike(0) Spike(0) 0.125", + " 2 Normal(0, 1) Spike(0) Spike(0) Spike(0) 0.125", + " 3 Spike(0) mean difference contrast: mNormal(0, 0.25) Spike(0) Spike(0) 0.125", + " 4 Normal(0, 1) mean difference contrast: mNormal(0, 0.25) Spike(0) Spike(0) 0.125", + " 5 Spike(0) Spike(0) Normal(0, 0.25) Spike(0) 0.125", + " 6 Normal(0, 1) Spike(0) Normal(0, 0.25) Spike(0) 0.125", + " 7 Spike(0) mean difference contrast: mNormal(0, 0.25) Normal(0, 0.25) Spike(0) 0.125", + " 8 Normal(0, 1) mean difference contrast: mNormal(0, 0.25) Normal(0, 0.25) Spike(0) 0.125" + ) + ) + + expect_equal( + capture_output_lines(check_setup.reg(~ mod_cat + mod_con, data = df_reg, models = TRUE, + priors_bias = NULL, priors_heterogeneity = NULL, + priors = list( + "mod_cat" = prior_factor("beta", list(1, 1), contrast = "treatment", prior_weights = 1/2), + "mod_con" = list( + "null" = prior("normal", list(0, 0.01)), + "alt" = prior("normal", list(0.10, 0.30)) + ) + )), print = TRUE, width = 150), + c("Robust Bayesian meta-regression (set-up)" , + "Components summary:" , + " Models Prior prob." , + "Effect 4/8 0.500" , + "Heterogeneity 0/8 0.000" , + "Bias 0/8 0.000" , + "" , + "Meta-regression components summary:" , + " Models Prior prob." , + "mod_cat 4/8 0.667" , + "mod_con 4/8 0.500" , + "" , + "Models overview:" , + " Model Prior mod_cat Prior mod_cat Prior mod_cat Prior Heterogeneity Prior Bias Prior prob.", + " 1 Spike(0) Spike(0) Normal(0, 0.01) Spike(0) 0.167", + " 2 Normal(0, 1) Spike(0) Normal(0, 0.01) Spike(0) 0.167", + " 3 Spike(0) treatment contrast: Beta(1, 1) Normal(0, 0.01) Spike(0) 0.083", + " 4 Normal(0, 1) treatment contrast: Beta(1, 1) Normal(0, 0.01) Spike(0) 0.083", + " 5 Spike(0) Spike(0) Normal(0.1, 0.3) Spike(0) 0.167", + " 6 Normal(0, 1) Spike(0) Normal(0.1, 0.3) Spike(0) 0.167", + " 7 Spike(0) treatment contrast: Beta(1, 1) Normal(0.1, 0.3) Spike(0) 0.083", + " 8 Normal(0, 1) treatment contrast: Beta(1, 1) Normal(0.1, 0.3) Spike(0) 0.083" + ) + ) + + expect_equal( + capture_output_lines(check_setup.reg(~ mod_cat + mod_con, data = df_reg, models = TRUE, + priors_bias = NULL, priors_heterogeneity = NULL, + priors = list( + "mod_cat" = list( + "alt" = prior_factor("beta", list(1, 1), contrast = "treatment", prior_weights = 1/2) + ), + "mod_con" = list( + "null" = prior("normal", list(0, 0.01)), + "alt" = prior("normal", list(0.10, 0.30)) + ) + )), print = TRUE, width = 150), + c("Robust Bayesian meta-regression (set-up)" , + "Components summary:" , + " Models Prior prob." , + "Effect 2/4 0.500" , + "Heterogeneity 0/4 0.000" , + "Bias 0/4 0.000" , + "" , + "Meta-regression components summary:" , + " Models Prior prob." , + "mod_con 2/4 0.500" , + "" , + "Models overview:" , + " Model Prior mod_cat Prior mod_cat Prior mod_cat Prior Heterogeneity Prior Bias Prior prob.", + " 1 Spike(0) treatment contrast: Beta(1, 1) Normal(0, 0.01) Spike(0) 0.250", + " 2 Normal(0, 1) treatment contrast: Beta(1, 1) Normal(0, 0.01) Spike(0) 0.250", + " 3 Spike(0) treatment contrast: Beta(1, 1) Normal(0.1, 0.3) Spike(0) 0.250", + " 4 Normal(0, 1) treatment contrast: Beta(1, 1) Normal(0.1, 0.3) Spike(0) 0.250" + ) + ) + + expect_equal( + capture_output_lines(check_setup.reg(~ mod_cat + mod_con, data = df_reg, models = TRUE, + priors_bias = NULL, priors_heterogeneity = NULL, + priors = list( + "mod_cat" = list( + "null" = prior_factor("beta", list(1, 1), contrast = "treatment", prior_weights = 1/2) + ), + "mod_con" = list( + "null" = prior("normal", list(0, 0.01)), + "alt" = prior("normal", list(0.10, 0.30)) + ) + )), print = TRUE, width = 150), + c("Robust Bayesian meta-regression (set-up)" , + "Components summary:" , + " Models Prior prob." , + "Effect 2/4 0.500" , + "Heterogeneity 0/4 0.000" , + "Bias 0/4 0.000" , + "" , + "Meta-regression components summary:" , + " Models Prior prob." , + "mod_con 2/4 0.500" , + "" , + "Models overview:" , + " Model Prior mod_cat Prior mod_cat Prior mod_cat Prior Heterogeneity Prior Bias Prior prob.", + " 1 Spike(0) treatment contrast: Beta(1, 1) Normal(0, 0.01) Spike(0) 0.250", + " 2 Normal(0, 1) treatment contrast: Beta(1, 1) Normal(0, 0.01) Spike(0) 0.250", + " 3 Spike(0) treatment contrast: Beta(1, 1) Normal(0.1, 0.3) Spike(0) 0.250", + " 4 Normal(0, 1) treatment contrast: Beta(1, 1) Normal(0.1, 0.3) Spike(0) 0.250" + ) + ) + + expect_equal( + capture_output_lines(check_setup.reg(~ mod_cat + mod_con, data = df_reg, models = TRUE, test_predictors = FALSE, + priors_bias = NULL, priors_heterogeneity = NULL), print = TRUE, width = 150), + c("Robust Bayesian meta-regression (set-up)" , + "Components summary:" , + " Models Prior prob." , + "Effect 1/2 0.500" , + "Heterogeneity 0/2 0.000" , + "Bias 0/2 0.000" , + "" , + "Meta-regression components summary:" , + "[1] Models Prior prob." , + "<0 rows> (or 0-length row.names)" , + "" , + "Models overview:" , + " Model Prior mod_cat Prior mod_cat Prior mod_cat Prior Heterogeneity Prior Bias Prior prob.", + " 1 Spike(0) mean difference contrast: mNormal(0, 0.25) Normal(0, 0.25) Spike(0) 0.500", + " 2 Normal(0, 1) mean difference contrast: mNormal(0, 0.25) Normal(0, 0.25) Spike(0) 0.500" + ) + ) + + + expect_equal( + capture_output_lines(check_setup.reg(~ mod_cat + mod_con, data = df_reg, models = TRUE, test_predictors = "mod_cat", + priors_bias = NULL, priors_heterogeneity = NULL), print = TRUE, width = 150), + c("Robust Bayesian meta-regression (set-up)" , + "Components summary:" , + " Models Prior prob." , + "Effect 2/4 0.500" , + "Heterogeneity 0/4 0.000" , + "Bias 0/4 0.000" , + "" , + "Meta-regression components summary:" , + " Models Prior prob." , + "mod_cat 2/4 0.500" , + "" , + "Models overview:" , + " Model Prior mod_cat Prior mod_cat Prior mod_cat Prior Heterogeneity Prior Bias Prior prob.", + " 1 Spike(0) Spike(0) Normal(0, 0.25) Spike(0) 0.250", + " 2 Normal(0, 1) Spike(0) Normal(0, 0.25) Spike(0) 0.250", + " 3 Spike(0) mean difference contrast: mNormal(0, 0.25) Normal(0, 0.25) Spike(0) 0.250", + " 4 Normal(0, 1) mean difference contrast: mNormal(0, 0.25) Normal(0, 0.25) Spike(0) 0.250" + ) + ) + + +}) + test_that("Set autofit control works", { expect_error(set_autofit_control(max_Rhat = .99), "Checking 'autofit_control':\n\tThe 'max_Rhat' must be equal or higher than 1.") diff --git a/tests/testthat/test-4-fit.R b/tests/testthat/test-4-fit.R index 4195009c..c457e31e 100644 --- a/tests/testthat/test-4-fit.R +++ b/tests/testthat/test-4-fit.R @@ -3,7 +3,7 @@ skip_on_cran() skip_on_covr() # test objects -saved_files <- paste0("fit_", 1:13, ".RDS") +saved_files <- paste0("fit_", 1:15, ".RDS") saved_fits <- list() for(i in seq_along(saved_files)){ saved_fits[[i]] <- readRDS(file = file.path("../results/fits", saved_files[i])) @@ -18,7 +18,7 @@ remove_time <- function(fit){ } return(fit) } -clean_all <- function(fit, only_samples = TRUE, remove_call = FALSE){ +clean_all <- function(fit, only_samples = TRUE, remove_call = FALSE){ if(only_samples){ fit$data <- NULL fit$add_info <- NULL @@ -195,15 +195,13 @@ test_that("3-level models work", { test_that("weighted models work", { - # all weights == 1 should correspond to fit1 temp_data <- combine_data( - d = d, - se = d_se, - study_ids = seq_along(d) + d = c(d[1], rep(d[2], 2), rep(d[3], 3)), + se = c(d_se[1], rep(d_se[2], 2), rep(d_se[3], 3)), + weight = c(1, rep(1/2, 2), rep(1/3, 3)) ) - attr(temp_data, "all_independent") <- FALSE - fit1w <- try_parallel(suppressWarnings(RoBMA(data = temp_data, weighted = TRUE, seed = 1, parallel = TRUE))) + fit1w <- try_parallel(suppressWarnings(RoBMA(data = temp_data, seed = 1, parallel = TRUE))) fit1w <- remove_time(fit1w) expect_equal(clean_all(saved_fits[[1]], remove_call = TRUE), clean_all(fit1w, remove_call = TRUE)) @@ -212,15 +210,65 @@ test_that("weighted models work", { expect_true(all(grepl("dwn", sapply(c(8:10, 17:19, 26:28, 35:36), function(i) as.character(fit1w$models[[i]]$fit$model))))) }) +test_that("BMA regression work", { + + df_reg <- data.frame( + d = c(rep(-1, 5), rep(0, 5), rep(1, 5)), + se = rep(0.1, 15), + mod_cat = c(rep("A", 5), rep("B", 5), rep("C", 5)), + mod_con = c((1:15)/15) + ) + + fit_14 <- try_parallel(suppressWarnings(RoBMA.reg(~ mod_cat + mod_con, data = df_reg, priors_bias = NULL, seed = 1, parallel = TRUE))) + fit_14 <- remove_time(fit_14) + expect_equal(clean_all(saved_fits[[14]]), clean_all(fit_14)) +}) + +test_that("RoBMA (simplified) regression with custom priors work", { + + df_reg <- data.frame( + d = scale(c((1:15)/15)), + se = rep(0.1, 15), + mod_con = scale(c((1:15)/15)) + ) + + fit_15 <- try_parallel(suppressWarnings(RoBMA.reg(~ mod_con, data = df_reg, + priors = list( + "mod_con" = list( + "null" = prior("normal", list(0, 0.05)), + "alt" = prior("normal", list(0.30, 0.15)) + ) + ), + priors_effect_null = NULL, + priors_heterogeneity = NULL, + priors_bias = list( + prior_weightfunction(distribution = "two.sided", parameters = list(alpha = c(1, 1), steps = c(0.05)), prior_weights = 1/2), + prior_PET(distribution = "Cauchy", parameters = list(0, 1), truncation = list(0, Inf), prior_weights = 1/2) + ), + seed = 1, parallel = TRUE))) + fit_15 <- remove_time(fit_15) + expect_equal(clean_all(saved_fits[[15]]), clean_all(fit_15)) +}) + #### creating / updating the test settings #### if(FALSE){ - saved_fits <- list(fit1, fit2, fit3, fit4, fit5, fit6, fit7, fit8, fit9, fit10, fit11, fit12, fit13) + saved_fits <- list(fit1, fit2, fit3, fit4, fit5, fit6, fit7, fit8, fit9, fit10, fit11, fit12, fit13, fit_14, fit_15) for(i in 1:length(saved_fits)){ saved_fits[[i]] <- remove_time(saved_fits[[i]]) } for(i in 1:length(saved_fits)){ - saveRDS(saved_fits[[i]], file = file.path("tests/results/fits/", paste0("fit_",i,".RDS")), compress = "xz") + saveRDS(saved_fits[[i]], file = file.path("tests/results/fits/", paste0("fit_",i,".RDS")), compress = "xz") + } + + # package version update + # test objects + saved_files <- paste0("fit_", 1:15, ".RDS") + saved_fits <- list() + for(i in seq_along(saved_files)){ + temp_fit <- readRDS(file = file.path("tests/results/fits", saved_files[i])) + temp_fit <- RoBMA:::.update_object(temp_fit) + saveRDS(temp_fit, file = file.path("tests/results/fits/", paste0("fit_",i,".RDS")), compress = "xz") } } diff --git a/tests/testthat/test-5-methods.R b/tests/testthat/test-5-methods.R index c4aa3c67..423889bb 100644 --- a/tests/testthat/test-5-methods.R +++ b/tests/testthat/test-5-methods.R @@ -3,7 +3,7 @@ skip_on_cran() # the summary tables and print functions are imported from BayesTools and tested henceforth # test objects - assuming that the fit function worked properly -saved_files <- paste0("fit_", 1:13, ".RDS") +saved_files <- paste0("fit_", 1:15, ".RDS") saved_fits <- list() for(i in seq_along(saved_files)){ saved_fits[[i]] <- readRDS(file = file.path("../results/fits", saved_files[i])) @@ -224,8 +224,8 @@ test_that("Individual summary functions work", { "" , "Parameter estimates:" , " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" , - "mu 0.176 0.134 -0.097 0.177 0.428 0.00153 0.000 8076 1.000" , - "tau 0.106 0.102 0.019 0.075 0.369 0.00124 0.000 6744 1.000" , + "mu 0.174 0.134 -0.097 0.177 0.428 0.00153 0.011 8076 1.000" , + "tau 0.106 0.102 0.019 0.075 0.369 0.00124 0.012 6744 1.000" , "omega[0,0.1] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" , "omega[0.1,1] 0.500 0.000 0.500 0.500 0.500 NA NA NA NA" , "The estimates are summarized on the Fisher's z scale (priors were specified on the Cohen's d scale)." @@ -242,8 +242,8 @@ test_that("Individual summary functions work", { "" , "Parameter estimates:" , " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" , - "mu 0.090 0.184 -0.335 0.108 0.403 0.00357 0.000 2783 1.003" , - "tau 0.115 0.121 0.018 0.078 0.429 0.00146 0.000 6854 1.000" , + "mu 0.090 0.184 -0.335 0.108 0.403 0.00357 0.019 2783 1.003" , + "tau 0.115 0.121 0.018 0.078 0.429 0.00146 0.012 6854 1.000" , "PET 0.730 0.646 0.023 0.556 2.439 0.01383 0.021 2184 1.002" , "The estimates are summarized on the Fisher's z scale (priors were specified on the Cohen's d scale).", "" , @@ -256,9 +256,9 @@ test_that("Individual summary functions work", { "" , "Parameter estimates:" , " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" , - "mu 0.102 0.179 -0.304 0.117 0.411 0.00311 0.000 3482 1.001" , - "tau 0.130 0.151 0.020 0.085 0.490 0.00228 0.000 4430 1.001" , - "PEESE 3.107 2.647 0.100 2.467 9.645 0.05316 0.000 2480 1.000" , + "mu 0.101 0.179 -0.304 0.117 0.411 0.00311 0.017 3482 1.001" , + "tau 0.130 0.151 0.020 0.085 0.490 0.00228 0.015 4430 1.001" , + "PEESE 3.107 2.647 0.100 2.467 9.645 0.05316 0.020 2480 1.000" , "The estimates are summarized on the Fisher's z scale (priors were specified on the Cohen's d scale)." )) @@ -279,8 +279,8 @@ test_that("Individual summary functions work", { "" , "Parameter estimates:" , " Mean SD lCI Median uCI error(MCMC) error(MCMC)/SD ESS R-hat" , - "mu 0.176 0.134 -0.097 0.177 0.428 0.00153 0.000 8076 1.000" , - "tau 0.106 0.102 0.019 0.075 0.369 0.00124 0.000 6744 1.000" , + "mu 0.174 0.134 -0.097 0.177 0.428 0.00153 0.011 8076 1.000" , + "tau 0.106 0.102 0.019 0.075 0.369 0.00124 0.012 6744 1.000" , "omega[0,0.1] 1.000 0.000 1.000 1.000 1.000 NA NA NA NA" , "omega[0.1,1] 0.500 0.000 0.500 0.500 0.500 NA NA NA NA" , "The estimates are summarized on the Fisher's z scale (priors were specified on the Cohen's d scale)." @@ -349,10 +349,73 @@ test_that("Interpret functions work", { }) +test_that("Marginal summary functions work", { + + expect_error(marginal_summary(saved_fits[[1]]), "'marginal_summary' function is available only for RoBMA regression models") + + expect_equal( + capture_output_lines(marginal_summary(saved_fits[[14]]), print = TRUE, width = 150), + c("Call:" , + "RoBMA.reg(formula = ~mod_cat + mod_con, data = df_reg, priors_bias = NULL, " , + " parallel = TRUE, seed = 1)" , + "" , + "Robust Bayesian meta-analysis" , + "Model-averaged marginal estimates:" , + " Mean Median 0.025 0.975 Inclusion BF" , + "intercept 0.000 0.000 0.000 0.000 0.026" , + "mod_cat[A] -0.883 -0.909 -1.007 -0.685 Inf" , + "mod_cat[B] 0.001 0.001 -0.071 0.071 0.085" , + "mod_cat[C] 0.882 0.908 0.681 1.008 Inf" , + "mod_con[-1SD] -0.056 0.000 -0.230 0.009 0.632" , + "mod_con[0SD] 0.000 0.000 0.000 0.000 0.174" , + "mod_con[1SD] 0.056 0.000 -0.011 0.229 0.638" , + "The estimates are summarized on the Cohen's d scale (priors were specified on the Cohen's d scale)." , + "\033[0;31mmu_mod_cat[A]: Posterior samples do not span both sides of the null hypothesis. The Savage-Dickey density ratio is likely to be overestimated.\033[0m" , + "\033[0;31mmu_mod_cat[C]: Posterior samples do not span both sides of the null hypothesis. The Savage-Dickey density ratio is likely to be overestimated.\033[0m" , + "\033[0;31mmu_mod_con[0SD]: There is a considerable cluster of posterior samples at the exact null hypothesis values. The Savage-Dickey density ratio is likely to be invalid.\033[0m", + "\033[0;31mmu_mod_con[0SD]: There is a considerable cluster of prior samples at the exact null hypothesis values. The Savage-Dickey density ratio is likely to be invalid.\033[0m" + ) + ) + + expect_equal( + capture_output_lines(marginal_summary(saved_fits[[15]], conditional = TRUE, output_scale = "r"), print = TRUE, width = 150), + c("Call:" , + "RoBMA.reg(formula = ~mod_con, data = df_reg, priors = list(mod_con = list(null = prior(\"normal\", " , + " list(0, 0.05)), alt = prior(\"normal\", list(0.3, 0.15)))), " , + " priors_heterogeneity = NULL, priors_bias = list(prior_weightfunction(distribution = \"two.sided\", " , + " parameters = list(alpha = c(1, 1), steps = c(0.05)), " , + " prior_weights = 1/2), prior_PET(distribution = \"Cauchy\", " , + " parameters = list(0, 1), truncation = list(0, Inf), prior_weights = 1/2)), " , + " priors_effect_null = NULL, parallel = TRUE, seed = 1)" , + "" , + "Robust Bayesian meta-analysis" , + "Model-averaged marginal estimates:" , + " Mean Median 0.025 0.975 Inclusion BF" , + "intercept -0.013 -0.003 -0.135 0.023 0.029" , + "mod_con[-1SD] -0.428 -0.422 -0.514 -0.394 Inf" , + "mod_con[0SD] -0.013 -0.003 -0.135 0.023 0.029" , + "mod_con[1SD] 0.409 0.417 0.310 0.443 71.703" , + "The estimates are summarized on the correlation scale (priors were specified on the Cohen's d scale)." , + "\033[0;31mmu_mod_con[-1SD]: Posterior samples do not span both sides of the null hypothesis. The Savage-Dickey density ratio is likely to be overestimated.\033[0m", + "" , + "Conditional marginal estimates:" , + " Mean Median 0.025 0.975 Inclusion BF" , + "intercept -0.013 -0.003 -0.135 0.023 0.029" , + "mod_con[-1SD] -0.428 -0.422 -0.514 -0.394 Inf" , + "mod_con[0SD] -0.013 -0.003 -0.135 0.023 0.029" , + "mod_con[1SD] 0.409 0.417 0.310 0.443 71.703" , + "The estimates are summarized on the correlation scale (priors were specified on the Cohen's d scale)." , + "\033[0;31mmu_mod_con[-1SD]: Posterior samples do not span both sides of the null hypothesis. The Savage-Dickey density ratio is likely to be overestimated.\033[0m" + ) + ) + + +}) + #### creating / updating the test settings #### if(FALSE){ - saved_files <- paste0("fit_", 1:13, ".RDS") + saved_files <- paste0("fit_", 1:15, ".RDS") saved_fits <- list() for(i in seq_along(saved_files)){ saved_fits[[i]] <- readRDS(file = file.path("tests/results/fits", saved_files[i])) diff --git a/tests/testthat/test-6-plots.R b/tests/testthat/test-6-plots.R index 8200c540..8fea240a 100644 --- a/tests/testthat/test-6-plots.R +++ b/tests/testthat/test-6-plots.R @@ -3,19 +3,20 @@ skip_on_cran() # the plotting functions are imported from BayesTools and tested henceforth # test objects - assuming that the fit function worked properly -saved_files <- paste0("fit_", 1:13, ".RDS") +saved_files <- paste0("fit_", 1:15, ".RDS") saved_fits <- list() for(i in seq_along(saved_files)){ saved_fits[[i]] <- readRDS(file = file.path("../results/fits", saved_files[i])) } # alternative components present in the models: -effect <- c(1, 2, 3, 4, 5, 6, 7, 10, 11, 12, 13) -heterogeneity <- c(1, 2, 3, 4, 5, 6, 7, 10, 11, 12, 13) -weightfunctions <- c(1, 2, 4, 5, 6, 7, 10, 11, 13) -PETPEESE <- c(1, 3, 4, 5, 6, 7, 11, 13) -no_weightfunctions <- c(3, 8, 12) -no_PETPEESE <- c(2, 8, 10, 12) +effect <- c(1, 2, 3, 4, 5, 6, 7, 10, 11, 12, 13, 14, 15) +heterogeneity <- c(1, 2, 3, 4, 5, 6, 7, 10, 11, 12, 13, 14) +weightfunctions <- c(1, 2, 4, 5, 6, 7, 10, 11, 13, 15) +PETPEESE <- c(1, 3, 4, 5, 6, 7, 11, 13, 15) +no_weightfunctions <- c(3, 8, 12, 14) +no_PETPEESE <- c(2, 8, 10, 12, 14) +metaregression <- c(14, 15) test_that("Parameter plots work", { @@ -149,6 +150,49 @@ test_that("Parameter plots work", { ### 3-level structure expect_doppelganger(paste0("plot_rho_",13), function()plot(saved_fits[[13]], "rho")) expect_doppelganger(paste0("plot_rho2_",13), function()plot(saved_fits[[13]], "rho")) + + ### meta-regression parameter plots + i <- 14 + set.seed(1) + + # factors + expect_doppelganger(paste0("ggplot_reg-fac-1_",i), plot(saved_fits[[i]], "mod_cat", plot_type = "ggplot")) + expect_doppelganger(paste0("ggplot_reg-fac-2_",i), plot(saved_fits[[i]], "mod_cat", prior = TRUE, plot_type = "ggplot")) + expect_doppelganger(paste0("ggplot_reg-fac-3_",i), plot(saved_fits[[i]], "mod_cat", conditional = TRUE, plot_type = "ggplot")) + expect_doppelganger(paste0("ggplot_reg-fac-4_",i), plot(saved_fits[[i]], "mod_cat", conditional = TRUE, prior = TRUE, plot_type = "ggplot")) + expect_doppelganger(paste0("plot_reg-fac-1_",i), function()plot(saved_fits[[i]], "mod_cat")) + expect_doppelganger(paste0("plot_reg-fac-2_",i), function()plot(saved_fits[[i]], "mod_cat", prior = TRUE)) + expect_doppelganger(paste0("plot_reg-fac-3_",i), function()plot(saved_fits[[i]], "mod_cat", conditional = TRUE)) + expect_doppelganger(paste0("plot_reg-fac-4_",i), function()plot(saved_fits[[i]], "mod_cat", conditional = TRUE, prior = TRUE)) + expect_doppelganger(paste0("plot_reg-fac-5_",i), function()plot(saved_fits[[i]], "mod_cat", output_scale = "logOR")) + expect_doppelganger(paste0("plot_reg-fac-6_",i), function()plot(saved_fits[[i]], "mod_cat", output_scale = "r", prior = TRUE)) + + # continuous + expect_doppelganger(paste0("ggplot_reg-con-1_",i), plot(saved_fits[[i]], "mod_con", plot_type = "ggplot")) + expect_doppelganger(paste0("ggplot_reg-con-2_",i), plot(saved_fits[[i]], "mod_con", prior = TRUE, plot_type = "ggplot")) + expect_doppelganger(paste0("ggplot_reg-con-3_",i), plot(saved_fits[[i]], "mod_con", conditional = TRUE, plot_type = "ggplot")) + expect_doppelganger(paste0("ggplot_reg-con-4_",i), plot(saved_fits[[i]], "mod_con", conditional = TRUE, prior = TRUE, plot_type = "ggplot")) + expect_doppelganger(paste0("plot_reg-con-1_",i), function()plot(saved_fits[[i]], "mod_con")) + expect_doppelganger(paste0("plot_reg-con-2_",i), function()plot(saved_fits[[i]], "mod_con", prior = TRUE)) + expect_doppelganger(paste0("plot_reg-con-3_",i), function()plot(saved_fits[[i]], "mod_con", conditional = TRUE)) + expect_doppelganger(paste0("plot_reg-con-4_",i), function()plot(saved_fits[[i]], "mod_con", conditional = TRUE, prior = TRUE)) + expect_doppelganger(paste0("plot_reg-con-5_",i), function()plot(saved_fits[[i]], "mod_con", output_scale = "logOR")) + expect_doppelganger(paste0("plot_reg-con-6_",i), function()plot(saved_fits[[i]], "mod_con", output_scale = "r", prior = TRUE)) + + # continuous, alternative only + i <- 15 + expect_doppelganger(paste0("ggplot_reg-con-alt-1_",i), plot(saved_fits[[i]], "mod_con", plot_type = "ggplot")) + expect_doppelganger(paste0("ggplot_reg-con-alt-2_",i), plot(saved_fits[[i]], "mod_con", prior = TRUE, plot_type = "ggplot")) + expect_doppelganger(paste0("ggplot_reg-con-alt-3_",i), plot(saved_fits[[i]], "mod_con", conditional = TRUE, plot_type = "ggplot")) + expect_doppelganger(paste0("ggplot_reg-con-alt-4_",i), plot(saved_fits[[i]], "mod_con", conditional = TRUE, prior = TRUE, plot_type = "ggplot")) + expect_doppelganger(paste0("plot_reg-con-alt-1_",i), function()plot(saved_fits[[i]], "mod_con")) + expect_doppelganger(paste0("plot_reg-con-alt-2_",i), function()plot(saved_fits[[i]], "mod_con", prior = TRUE)) + expect_doppelganger(paste0("plot_reg-con-alt-3_",i), function()plot(saved_fits[[i]], "mod_con", conditional = TRUE)) + expect_doppelganger(paste0("plot_reg-con-alt-4_",i), function()plot(saved_fits[[i]], "mod_con", conditional = TRUE, prior = TRUE)) + expect_doppelganger(paste0("plot_reg-con-alt-5_",i), function()plot(saved_fits[[i]], "mod_con", output_scale = "logOR")) + expect_doppelganger(paste0("plot_reg-con-alt-6_",i), function()plot(saved_fits[[i]], "mod_con", output_scale = "r", prior = TRUE)) + + plot(saved_fits[[14]], "mod_cat", conditional = TRUE) }) @@ -221,3 +265,15 @@ test_that("Forest plots work", { }) +test_that("Marginal posterior plots work", { + + + expect_error(marginal_plot(saved_fits[[1]]), "'marginal_plot' function is available only for RoBMA regression models") + expect_error(marginal_plot(saved_fits[[14]], "mu"), "The 'mu' values are not recognized by the 'parameter' argument.") + + expect_doppelganger("mm_ggplot_mod_cat_1", marginal_plot(saved_fits[[14]], "mod_cat", plot_type = "ggplot")) + expect_doppelganger("mm_ggplot_mod_cat_2", marginal_plot(saved_fits[[14]], "mod_cat", prior = TRUE, plot_type = "ggplot")) + expect_doppelganger("mm_ggplot_mod_cat_3", marginal_plot(saved_fits[[14]], "mod_cat", prior = TRUE, plot_type = "ggplot", output_scale = "r")) + expect_doppelganger("mm_ggplot_mod_con_1", marginal_plot(saved_fits[[14]], "mod_con", prior = TRUE, plot_type = "ggplot", xlim = c(-1, 1))) + expect_doppelganger("mm_ggplot_mod_con_2", function()marginal_plot(saved_fits[[15]], "mod_con", conditional = TRUE)) +}) diff --git a/tests/testthat/test-7-diagnostics.R b/tests/testthat/test-7-diagnostics.R index 927d9c52..505972f6 100644 --- a/tests/testthat/test-7-diagnostics.R +++ b/tests/testthat/test-7-diagnostics.R @@ -2,7 +2,7 @@ context("(7) Diagnostics plots") skip_on_cran() # test objects - assuming that the fit function worked properly -saved_files <- paste0("fit_", 1, ".RDS") +saved_files <- paste0("fit_", c(1, 15), ".RDS") saved_fits <- list() for(i in seq_along(saved_files)){ saved_fits[[i]] <- readRDS(file = file.path("../results/fits", saved_files[i])) @@ -10,6 +10,7 @@ for(i in seq_along(saved_files)){ test_that("Diagnostic plots work", { + # RoBMA chains_mu <- diagnostics(saved_fits[[1]], "mu", "chains", plot_type = "ggplot") chains_tau <- diagnostics(saved_fits[[1]], "tau", "chains", plot_type = "ggplot") chains_omega <- diagnostics(saved_fits[[1]], "omega", "chains", plot_type = "ggplot") @@ -87,4 +88,15 @@ test_that("Diagnostic plots work", { expect_doppelganger(paste0("plot_chains_PEESE"), function()diagnostics(saved_fits[[1]], "PEESE", "chains", show_models = 36)) expect_doppelganger(paste0("plot_autocorrelation_PEESE"), function()diagnostics(saved_fits[[1]], "PEESE", "autocorrelation", show_models = 36)) expect_doppelganger(paste0("plot_densities_PEESE"), function()diagnostics(saved_fits[[1]], "PEESE", "densities", show_models = 36)) + + ### RoBMA.reg + chains_mu <- diagnostics(saved_fits[[2]], "mu", "chains", plot_type = "ggplot") + chains_omega <- diagnostics(saved_fits[[2]], "omega", "chains", plot_type = "ggplot") + chains_PET <- diagnostics(saved_fits[[2]], "PET", "chains", plot_type = "ggplot") + chains_mod_con <- diagnostics(saved_fits[[2]], "mod_con", "chains", plot_type = "ggplot") + + expect_doppelganger("plot_chains.reg_mu", chains_mu[[6]]) + expect_doppelganger("plot_chains.reg_omega", chains_omega[[5]]) + expect_doppelganger("plot_chains.reg_PET", chains_PET[[6]]) + expect_doppelganger("plot_chains.reg_mod_con", chains_mod_con[[6]]) }) diff --git a/tools/check.env b/tools/check.env new file mode 100644 index 00000000..df651179 --- /dev/null +++ b/tools/check.env @@ -0,0 +1,2 @@ +# skip documentation checks because of missing contrast files +_R_CHECK_RD_CONTENTS_=false \ No newline at end of file diff --git a/vignettes/HierarchicalBMA.Rmd b/vignettes/HierarchicalBMA.Rmd new file mode 100644 index 00000000..6c99db66 --- /dev/null +++ b/vignettes/HierarchicalBMA.Rmd @@ -0,0 +1,241 @@ +--- +title: "Hierarchical Bayesian Model-Averaged Meta-Analysis" +author: "František Bartoš" +date: "`r Sys.Date()`" +output: + rmarkdown::html_vignette: + self_contained: yes +bibliography: ../inst/REFERENCES.bib +csl: ../inst/apa.csl +vignette: > + %\VignetteIndexEntry{Hierarchical Bayesian Model-Averaged Meta-Analysis} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} + %\VignetteEngine{knitr::rmarkdown_notangle} +--- + +```{r setup, include = FALSE} +is_check <- ("CheckExEnv" %in% search()) || + any(c("_R_CHECK_TIMINGS_", "_R_CHECK_LICENSE_") %in% names(Sys.getenv())) || + !file.exists("../models/HierarchicalBMA/fit.RDS") +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>", + eval = !is_check, + dev = "png") +if(.Platform$OS.type == "windows"){ + knitr::opts_chunk$set(dev.args = list(type = "cairo")) +} +``` +```{r include = FALSE} +library(RoBMA) +# we pre-load the RoBMA models, the fitting time is around 2-5 minutes +fit.0 <- readRDS(file = "../models/HierarchicalBMA/fit.0.RDS") +fit <- readRDS(file = "../models/HierarchicalBMA/fit.RDS") +fit_BMA <- readRDS(file = "../models/HierarchicalBMA/fit_BMA.RDS") +hierarchical_test <- readRDS(file = "../models/HierarchicalBMA/hierarchical_test.RDS") +``` + +```{r include = FALSE, eval = FALSE} +# R package version updating +library(RoBMA) + +data("dat.konstantopoulos2011", package = "metadat") +dat <- dat.konstantopoulos2011 + +fit.0 <- RoBMA(d = dat$yi, v = dat$vi, + priors_effect_null = NULL, + priors_heterogeneity_null = NULL, + priors_bias = NULL, + parallel = TRUE, seed = 1) + +fit <- RoBMA(d = dat$yi, v = dat$vi, study_ids = dat$district, + priors_effect_null = NULL, + priors_heterogeneity_null = NULL, + priors_bias = NULL, + parallel = TRUE, seed = 1) + +fit_BMA <- RoBMA(d = dat$yi, v = dat$vi, study_ids = dat$district, + priors_bias = NULL, + parallel = TRUE, seed = 1) + +hierarchical_test <- RoBMA(d = dat$yi, v = dat$vi, study_ids = dat$district, + priors_heterogeneity_null = NULL, + priors_hierarchical_null = prior(distribution = "spike", parameters = list("location" = 0)), + priors_bias = NULL, + parallel = TRUE, seed = 1) + +saveRDS(fit.0, file = "../models/HierarchicalBMA/fit.0.RDS", compress = "xz") +saveRDS(fit, file = "../models/HierarchicalBMA/fit.RDS", compress = "xz") +saveRDS(fit_BMA, file = "../models/HierarchicalBMA/fit_BMA.RDS", compress = "xz") +saveRDS(hierarchical_test, file = "../models/HierarchicalBMA/hierarchical_test.RDS", compress = "xz") +``` + +Hierarchical (or multilevel/3-level) meta-analysis adjusts for the dependency of effect sizes due to clustering in the data. For example, effect size estimates from multiple experiments reported in the same manuscript might be expected to be more similar than effect sizes from a different paper [@konstantopoulos2011fixed]. This vignette illustrates how to deal with such dependencies among effect size estimates (in cases with simple nested structure) using the Bayesian model-averaged meta-analysis (BMA) [@gronau2017bayesian; @gronau2020primer; @bartos2021bayesian]. (See other vignettes for more details on BMA: [Reproducing BMA](ReproducingBMA.html) or [Informed BMA in medicine](MedicineBMA.html).) + +First, we introduce the example data set. Second, we illustrate the frequentist hierarchical meta-analysis with the `metafor` R package and discuss the results. Third, we outline the hierarchical meta-analysis parameterization. Fourth, we estimate the Bayesian model-averaged hierarchical meta-analysis. Finally, we end up discussing further extension and publication bias adjustment. + +### Example Data Set + +We use the `dat.konstantopoulos2011` data set from the `metadat` R package [@metadat] that is used for the same functionality in the metafor [@metafor] R package. We roughly follow the example in the data set's help file, `?dat.konstantopoulos2011`. The data set consists of 56 studies estimating the effects of modified school calendars on students achievement. The 56 studies were run in individual schools, which can be grouped into 11 districts. We might expect more similar effect size estimates from schools in the same district -- in other words, the effect size estimates from the same district might not be completely independent. Consequently, we might want to adjust for this dependency (clustering) between the effect size estimates to draw a more appropriate inference. + +First, we load the data set, assign it to the `dat` object, and inspect the first few rows. +```{r} +data("dat.konstantopoulos2011", package = "metadat") +dat <- dat.konstantopoulos2011 + +head(dat) +``` + +In the following analyses, we use the following variables: + + - `yi`, standardized mean differences, + - `vi`, sampling variances of the standardized mean differences, + - `district`, district id which distinguishes among the districts, + - and `school`, that distinguishes among different schools within the same district. + +### Frequentist Hierarchical Meta-Analysis with `metafor` + +We follow the data set's help file and fit a simple random effects meta-analysis using the `rma()` function from `metafor` package. This model ignores the dependency between effect size estimates. We use this simple model as our starting point and as a comparison with the later models. +``` {r} +fit_metafor.0 <- metafor::rma(yi = yi, vi = vi, data = dat) +fit_metafor.0 +``` +The model summary returns a small but statistically significant effect size estimate $\mu = 0.128$ ($\text{se} = 0.044$) and a considerable heterogeneity estimate $\tau = 0.297$. + +We extend the model to account for the hierarchical structure of the data, i.e., schools within districts, by using the `rma.mv()` function from the `metafor` package and extending it with the `random = ~ school | district` argument. +``` {r} +fit_metafor <- metafor::rma.mv(yi, vi, random = ~ school | district, data = dat) +fit_metafor +``` +We find that accounting for the hierarchical structure of the data results in (1) a slightly larger effect size estimate ($\mu = 0.187$) and (2) larger standard error of the effect size estimate ($\text{se} = 0.085$). The larger standard error is a natural consequence of accounting for the dependency between the effect sizes. Since the effect sizes are dependent, they do not contribute independent information. Specifying the hierarchical model then accounts for the dependency by estimating similarity between the estimates from the same cluster (school) and discounting the information borrowed from each estimate. The estimate of the similarity among estimates from the same cluster is summarized in the `\rho = 0.666` estimate. + +### Specifications of Hierarchical Meta-Analysis + +We specify a simple hierarchical meta-analytic model (see @konstantopoulos2011fixed for an example). Using distributional notation, we can describe the data generating process as a multi-stage sampling procedure. In a nutshell, we assume the existence of an overall mean effect $\mu$. Next, we assume that the effect sizes in each district $k = 1, \dots, K$, $\gamma_k$, systematically differ from the mean effect, with the variance of the district-level effects summarized with heterogeneity $\tau_{b}$ (as between). Furthermore, we assume that the true effects $\theta_{k,j}$ of each study $j = 1, \dots J_k$ systematically differ from the district-level effect, with the variance of the study effects from the district-level effect summarized with heterogeneity $\tau_{w}$ (as within). Finally, the observed effect sizes $y_{k,j}$ that differ from the true effects $y_{k,j}$ due to random errors $\text{se}_{k,j}$. + +Mathematically, we can describe such a model as: +$$ +\begin{aligned} + \gamma_k &\sim \text{N}(\mu, \tau_b^2),\\ + \theta_{k,j} &\sim \text{N}(\gamma_k, \tau_w^2),\\ + y_{k,j} &\sim \text{N}(\theta_{k,j}, \text{se}_{k,j}).\\ +\end{aligned} +$$ +Where N() denotes a normal distribution with mean and variance. + +Conveniently, and with a bit of algebra, we do not need to estimate the district-level and true study effects. Instead, we marginalize them out, and we sample the observed effect sizes from each district $y_{k,.}$ directly from a multivariate normal distributions, MN(), with a common mean $\mu$ and covariance matrix S: +$$ +\begin{aligned} + y_{k,.} &\sim \text{MN}(\mu, \text{S}),\\ + \text{S} &= \begin{bmatrix} + \tau_b^2 + \tau_w^2 + \text{se}_1^2 & \tau_w^2 & \dots & \tau_w^2 \\ + \tau_w^2 & \tau_b^2 + \tau_w^2 + \text{se}_2^2 & \dots & \tau_w^2 \\ + \dots & \dots & \dots & \dots \\ + \tau_w^2 & \tau_w^2 & \dots & \tau_b^2 + \tau_w^2 + \text{se}_{J_k}^2 & \\ + \end{bmatrix}. +\end{aligned} +$$ +The random effects marginalization is helpful as it allows us to sample much fewer parameters from the posterior distribution (which significantly simplifies marginal likelihood estimation via bridge sampling). Furthermore, the marginalization allows us to properly specify selection model publication bias adjustment models -- the marginalization propagates the selection process up through all the sampling steps at once (we cannot proceed with the sequential sampling as the selection procedure on the observed effect sizes modifies the sampling distributions of all the preceding levels). + +We can further re-parameterize the model by performing the following substitution, +$$ +\begin{aligned} + \tau^2 &= \tau_b^2 + \tau_w^2,\\ + \rho &= \frac{\tau_w^2}{\tau_b^2 + \tau_w^2}, +\end{aligned} +$$ +and specifying the covariance matrix using the inter-study correlation $\rho$, total heterogeneity $\tau$, and the standard errors $\text{se}_{.}$: +$$ +\begin{aligned} + \text{S} &= \begin{bmatrix} + \tau^2 + \text{se}_1^2 & \rho\tau^2 & \dots & \rho\tau^2 \\ + \rho\tau^2 & \tau^2 + \text{se}_2^2 & \dots & \rho\tau^2 \\ + \dots & \dots & \dots & \dots \\ + \rho\tau^2 & \rho\tau^2 & \dots & \tau^2 + \text{se}_{J_k}^2 & \\ + \end{bmatrix}. +\end{aligned} +$$ +This specification corresponds to the compound symmetry covariance matrix of random effects, the default settings in the `metafor::rma.mv()` function. More importantly, it allows us to easily specify prior distributions on the correlation coefficient $\rho$ and the total heterogeneity $\tau$. + +### Hierarchical Bayesian Model-Averaged Meta-Analysis with `RoBMA` + +Before we estimate the complete Hierarchical Bayesian Model-Averaged Meta-Analysis (hBMA) with the `RoBMA` package, we quickly repeat the simpler models we estimated with the `metafor` package in the previous section. + +#### Bayesian Random Effects Meta-Analysis +First, we estimate a simple Bayesian random effects meta-analysis (corresponding to `fit_metafor.0`). We use `the RoBMA()` function and specify the effect sizes and sampling variances via the `d = dat$yi` and `v = dat$vi` arguments. We set the `priors_effect_null`, `priors_heterogeneity_null`, and `priors_bias` arguments to null to omit models assuming the absence of the effect, heterogeneity, and the publication bias adjustment components. +``` r +fit.0 <- RoBMA(d = dat$yi, v = dat$vi, + priors_effect_null = NULL, + priors_heterogeneity_null = NULL, + priors_bias = NULL, + parallel = TRUE, seed = 1) +``` +We generate a complete summary for the only estimated model by adding the `type = "individual"` argument to the `summary()` function. +``` {r} +summary(fit.0, type = "individual") +``` +We verify that the effect size, $\mu = 0.126$ ($\text{95% CI } [0.041, 0.211]$), and heterogeneity, $\tau = 0.292$ ($\text{95% CI } [0.233, 0.364]$), estimates closely correspond to the frequentist results (as we would expect from parameter estimates under weakly informative priors). + +#### Hierarchical Bayesian Random Effects Meta-Analysis +Second, we account for the clustered effect size estimates within districts by extending the previous function call with the `study_ids = dat$district` argument. This allows us to estimate the hierarchical Bayesian random effects meta-analysis (corresponding to `fit_metafor`). We use the default prior distribution for the correlation parameter `\rho \sim \text{Beta}(1, 1)`, set via the `priors_hierarchical` argument, which restricts the correlation to be positive and uniformly distributed on interval $(0, 1)$. +``` r +fit <- RoBMA(d = dat$yi, v = dat$vi, study_ids = dat$district, + priors_effect_null = NULL, + priors_heterogeneity_null = NULL, + priors_bias = NULL, + parallel = TRUE, seed = 1) +``` +Again, we generate the complete summary for the only estimated model, +``` {r} +summary(fit, type = "individual") +``` +and verify that our estimates, again, correspond to the frequentist counterparts, with the estimated effect size, $\mu = 0.181$ ($\text{95% CI } [0.017, 0.346]$), heterogeneity, $\tau = 0.308$ ($\text{95% CI } [0.223, 0.442]$), and correlation, $\rho = 0.627$ ($\text{95% CI } [0.320, 0.864]$). + +We can further visualize the prior and posterior distribution of the $\rho$ parameter using the `plot()` function. +```{r fig_rho, dpi = 300, fig.width = 4, fig.height = 3, out.width = "50%", fig.align = "center"} +par(mar = c(2, 4, 0, 0)) +plot(fit, parameter = "rho", prior = TRUE) +``` + +#### Hierarchical Bayesian Model-Averaged Meta-Analysis +Third, we extend the previous model into a model ensemble that also includes models assuming the absence of the effect and/or heterogeneity (we do not incorporate models assuming presence of publication bias due to computational complexity explained in the summary). Including those additional models allows us to evaluate evidence in favor of the effect and heterogeneity. Furthermore, specifying all those addition models allows us to incorporate the uncertainty about the specified models and weight the posterior distribution according to how well the models predicted the data. We estimate the remaining models by removing the `priors_effect_null` and `priors_heterogeneity_null` arguments from the previous function calls, which include the previously omitted models of no effect and/or no heterogeneity. +``` r +fit_BMA <- RoBMA(d = dat$yi, v = dat$vi, study_ids = dat$district, + priors_bias = NULL, + parallel = TRUE, seed = 1) +``` +Now we generate a summary for the complete model-averaged ensemble by not specifying any additional arguments in the `summary()` function. +``` {r} +summary(fit_BMA) +``` +We find the ensemble contains four models, the combination of models assuming the presence/absence of the effect/heterogeneity, each with equal prior model probabilities. Importantly, the models assuming heterogeneity are also specified with the hierarchical structure and account for the clustering. A comparison of the specified models reveals weak evidence against the effect, $\text{BF}_{10} = 0.917$, and extreme evidence for the presence of heterogeneity, $\text{BF}_{\text{rf}} = 9.3\times10^{92}$. Moreover, we find the `Hierarchical` component summary with the same values as in the `heterogeneity` component summary. The reason for this is that the default settings specify models with the hierarchical structure in all models assuming the presence of heterogeneity. + +We also obtain the model-averaged posterior estimates that combine the posterior estimates from all models according to the posterior model probabilities, the effect size, $\mu = 0.087$ ($\text{95% CI } [0.000, 0.314]$), heterogeneity, $\tau = 0.326$ ($\text{95% CI } [0.231, 0.472]$), and correlation, $\rho = 0.659$ ($\text{95% CI } [0.354, 0.879]$). + +#### Testing the Presence of Clustering +In the previous analyses, we assumed that the effect sizes are indeed clustered within the districts, and we only adjusted for the clustering. However, the effect sizes within the same cluster do not necessarily need to be more similar than effect sizes across different clusters. Now, we specify a model ensemble that allows us to test this assumption by specifying two sets of random effect meta-analytic models. The first set of models assumes that there is indeed clustering and that the correlation of random effects is uniformly distributed on the $(0, 1)$ interval (as in the previous analyses). The second set of models assumes that there is no clustering, i.e., the correlation of random effects $\rho = 0$, which simplifies the structured covariance matrix to a diagonal matrix. Again, we model average across models assuming the presence and absence of the effect to account for the model uncertainty. + +To specify this 'special' model ensemble with the `RoBMA()` function, we need to modify the previous model call in the following ways. We removed the fixed effect models by specifying the `priors_heterogeneity_null - NULL` argument.$^1$ Furthermore, we specify the prior distribution for models assuming the absence of the hierarchical structure by adding the `priors_hierarchical_null = prior(distribution = "spike", parameters = list("location" = 0))` argument. +```r +hierarchical_test <- RoBMA(d = dat$yi, v = dat$vi, study_ids = dat$district, + priors_heterogeneity_null = NULL, + priors_hierarchical_null = prior(distribution = "spike", parameters = list("location" = 0)), + priors_bias = NULL, + parallel = TRUE, seed = 1) + +``` +``` {r} +summary(hierarchical_test) +``` +We summarize the resulting model ensemble and find out that the `Hierarchical` component is no longer equivalent to the `Heterogeneity` component -- the new model specification allowed us to compare random effect models assuming the presence of the hierarchical structure to random effect models assuming the absence of the hierarchical structure. The resulting inclusion Bayes factor of the hierarchical structure shows extreme evidence in favor of clustering of the effect sizes, $\text{BF}_{\rho\bar{\rho}} = 4624$, i.e., there is extreme evidence that the intervention results in more similar effects within the districts. + +### Summary + +We illustrated how to estimated estimate a hierarchical Bayesian model-averaged meta-analysis using the `RoBMA` package. The hBMA model allows us to test for the presence vs absence of the effect and heterogeneity while simultaneously adjusting for clustered effect size estimates. While the the current implementation allows us to draw a fully Bayesian inference, incorporate prior information, and acknowledge model uncertainty, it has a few limitations in contrast to the `metafor` package. E.g., the `RoBMA` package only allows a simple nested random effects (i.e., estimates within studies, schools within districts etc). The simple nesting allows us to break the full covariance matrix into per cluster block matrices which speeds the already demanding computation. Furthermore, the computation complexity significantly increases when considering selection models as we need to compute an exponentially increasing number of multivariate normal probabilities with the increasing cluster size (existence of clusters with more than 4 studies makes the current implementation impractically). The current limitations are however not the end of the road as we explore other approaches (e.g., only specifying PET-PEESE style publication bias adjustment, and other dependency adjustments) in another [vignette](MetaRegression.html). + +### Footnotes + +$^1$ We could also model-average across the hierarchical structure assuming fixed effect models, i.e., $\tau \sim f(.)$ and $\rho = 1$. However specifying such a model ensemble is a beyond scope of this vignette, see [Custom ensembles](CustomEnsembles.html) vignette for some hints. + +### References diff --git a/vignettes/MedicineBMA.Rmd b/vignettes/MedicineBMA.Rmd index a385af00..7440bc51 100644 --- a/vignettes/MedicineBMA.Rmd +++ b/vignettes/MedicineBMA.Rmd @@ -17,7 +17,7 @@ vignette: > ```{r setup, include = FALSE} is_check <- ("CheckExEnv" %in% search()) || any(c("_R_CHECK_TIMINGS_", "_R_CHECK_LICENSE_") %in% names(Sys.getenv())) || - !file.exists("../models/ReproducingBMA/BMA_PowerPoseTest.RDS") + !file.exists("../models/MedicineBMA/fit_BMA.RDS") knitr::opts_chunk$set( collapse = TRUE, comment = "#>", diff --git a/vignettes/ReproducingBMA.Rmd b/vignettes/ReproducingBMA.Rmd index e2e04e02..8d60bcbe 100644 --- a/vignettes/ReproducingBMA.Rmd +++ b/vignettes/ReproducingBMA.Rmd @@ -68,7 +68,7 @@ saveRDS(fit_RoBMA_test, file = "../models/ReproducingBMA/PowerPoseTest.RDS") saveRDS(fit_RoBMA_est, file = "../models/ReproducingBMA/PowerPoseEst.RDS") ``` -By default, the package estimates an ensemble of 36 meta-analytic models and provides functions for convenient manipulation with the fitted object. However, it has been built in a way that it can be used as a framework for estimating any combination of meta-analytic models (or a single model). Here, we illustrate how to build a custom ensemble of meta-analytic models - specifically the same ensemble that is used in 'classical' Bayesian Model-Averaged Meta-Analysis [@gronau2017bayesian; @gronau2020primer; @bartos2021bayesian]. See [this vignette](CustomEnsembles.html) if you are interested in building more customized ensembles or @bartos2020adjusting for a tutorial on fitting (custom) models in JASP. +By default, the RoBMA package estimates an ensemble of 36 meta-analytic models and provides functions for convenient manipulation with the fitted object. However, the package has been designed so it can be used as a framework for estimating any combination of meta-analytic models (or a single model). Here, we illustrate how to build a custom ensemble of meta-analytic models - specifically the same ensemble that is used in 'classical' Bayesian Model-Averaged Meta-Analysis [@gronau2017bayesian; @gronau2020primer; @bartos2021bayesian]. See [this vignette](CustomEnsembles.html) if you are interested in building more customized ensembles or @bartos2020adjusting for a tutorial on fitting (custom) models in JASP. ### Reproducing Bayesian Model-Averaged Meta-Analysis (BMA) diff --git a/vignettes/Tutorial.Rmd b/vignettes/Tutorial.Rmd new file mode 100644 index 00000000..89d47d59 --- /dev/null +++ b/vignettes/Tutorial.Rmd @@ -0,0 +1,270 @@ +--- +title: "Tutorial: Adjusting for Publication Bias in JASP and R - Selection Models, PET-PEESE, and Robust Bayesian Meta-Analysis" +author: "František Bartoš, Maximilian Maier, Daniel S. Quintana & Eric-Jan Wagenmakers" +date: "2022" +output: + rmarkdown::html_vignette: + self_contained: yes +bibliography: ../inst/REFERENCES.bib +csl: ../inst/apa.csl +vignette: > + %\VignetteIndexEntry{Tutorial: Adjusting for Publication Bias in JASP and R - Selection Models, PET-PEESE, and Robust Bayesian Meta-Analysis} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} + %\VignetteEngine{knitr::rmarkdown_notangle} +--- + +```{r setup, include = FALSE} +is_check <- ("CheckExEnv" %in% search()) || + any(c("_R_CHECK_TIMINGS_", "_R_CHECK_LICENSE_") %in% names(Sys.getenv())) || + !file.exists("../models/Tutorial/fit_RoBMA_Lui2015.RDS") +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>", + eval = !is_check, + dev = "png") +if(.Platform$OS.type == "windows"){ + knitr::opts_chunk$set(dev.args = list(type = "cairo")) +} +``` +```{r include=FALSE} +knitr::opts_chunk$set(echo = TRUE) +data("Lui2015", package = "RoBMA") +df <- Lui2015 +# preload the fitted model +fit_RoBMA <- readRDS(file = "../models/Tutorial/fit_RoBMA_Lui2015.RDS") +fit_RoBMA2 <- readRDS(file = "../models/Tutorial/fit_RoBMA_perinull_Lui2015.RDS") +``` +```{r include = FALSE, eval = FALSE} +# R package version updating +library(RoBMA) +data("Lui2015", package = "RoBMA") +df <- Lui2015 +fit_RoBMA <- RoBMA(r = df$r, n = df$n, seed = 1, model = "PSMA", parallel = TRUE, save = "min") + +fit_RoBMA2 <- RoBMA(r = df$r, n = df$n, seed = 2, parallel = TRUE, save = "min", + priors_effect = prior("normal", parameters = list(mean = 0.60, sd = 0.20), truncation = list(0, Inf)), + priors_effect_null = prior("normal", parameters = list(mean = 0, sd = 0.10))) + +saveRDS(fit_RoBMA, file = "../models/Tutorial/fit_RoBMA_Lui2015.RDS") +saveRDS(fit_RoBMA2, file = "../models/Tutorial/fit_RoBMA_perinull_Lui2015.RDS") +``` + +**This R markdown file accompanies the tutorial [Adjusting for publication bias in JASP and R: Selection models, PET-PEESE, and robust Bayesian meta-analysis](https://doi.org/10.1177/25152459221109259) published in *Advances in Methods and Practices in Psychological Science* [@bartos2020adjusting].** + + +The following R-markdown file illustrates how to: + +- Load a CSV file into R, +- Transform effect sizes, +- Perform a random effect meta-analysis, +- Adjust for publication bias with: + - PET-PEESE [@stanley2014meta; @stanley2017limitations], + - Selection models [@iyengar1988selection; @vevea1995general], + - Robust Bayesian meta-analysis (RoBMA) [@maier2020robust; @bartos2021no]. + +See the full paper for additional details regarding the data set, methods, and interpretation. + + +### Set-up + +Before we start, we need to install `JAGS` (which is needed for installation of the `RoBMA` package) and the R packages that we use in the analysis. Specifically the `RoBMA`, `weightr`, and `metafor` R packages. + +JAGS can be downloaded from the [JAGS website](https://sourceforge.net/projects/mcmc-jags/). Subsequently, we install the R packages with the `install.packages()` function. + +```{r +install.packages(c("RoBMA", "weightr", "metafor")) +``` +If you happen to use the new M1 Mac machines with Apple silicon, see [this blogpost](https://www.dsquintana.blog/jags-apple-silicon-m1-mac/) outlining how to install JAGS on M1. In short, you will have to install Intel version of R (Intel/x86-64) from [CRAN](https://cran.r-project.org/bin/macosx/), not the Arm64 (Apple silicon) version. + +Once all of the packages are installed, we can load them into the workspace with the `library()` function. + +```{r, message = FALSE} +library("metafor") +library("weightr") +library("RoBMA") +``` + + +### Lui (2015) +@lui2015intergenerational studied how the acculturation mismatch (AM) that is the result of the contrast between the collectivist cultures of Asian and Latin immigrant groups and the individualist culture in the United States correlates with intergenerational cultural conflict (ICC). @lui2015intergenerational meta-analyzed 18 independent studies correlating AM with ICC. A standard reanalysis indicates a significant effect of AM on increased ICC, r = 0.250, p < .001. + +#### Data manipulation +First, we load the Lui2015.csv file into R with the `read.csv()` function and inspect the first six data entries with the `head()` function (the data set is also included in the package and can accessed via the `data("Lui2015", package = "RoBMA")` call). + +```r +df <- read.csv(file = "Lui2015.csv") +``` +```{r} +head(df) +``` + +We see that the data set contains three columns. The first column called `r` contains the effect sizes coded as correlation coefficients, the second column called `n` contains the sample sizes, and the third column called `study` contains names of the individual studies. + +We can access the individual variables using the data set name and the dollar (`$`) sign followed by the name of the column. For example, we can print all of the effect sizes with the `df$r` command. + +```{r} +df$r +``` + +The printed output shows that the data set contains mostly positive effect sizes with the largest correlation coefficient r = 0.54. + +#### Effect size transformations +Before we start analyzing the data, we transform the effect sizes from correlation coefficients $\rho$ to Fisher's *z*. Correlation coefficients are not well suited for meta-analysis because (1) they are bounded to a range (-1, 1) with non-linear increases near the boundaries and (2) the standard error of the correlation coefficients is related to the effect size. Fisher's *z* transformation mitigates both issues. It unwinds the (-1, 1) range to ($-\infty$, $\infty$), makes the sampling distribution approximately normal, and breaks the dependency between standard errors and effect sizes. + +To apply the transformation, we use the `combine_data()` function from the `RoBMA` package. We pass the correlation coefficients into the `r` argument, the sample sizes to the `n` argument and set the `transformation` argument to `"fishers_z"` (the `study_names` argument is optional). The function `combine_data()` then saves the transformed effect size estimates into a data frame called `dfz`, where the `y` column corresponds to Fisher's *z* transformation of the correlation coefficient and `se` column corresponds to the standard error of Fisher's *z*. + +```{r} +dfz <- combine_data(r = df$r, n = df$n, study_names = df$study, transformation = "fishers_z") +head(dfz) +``` + +We can also transform the effect sizes according to Cohen's *d* transformation (which we utilize later to fit the selection models). + +```{r} +dfd <- combine_data(r = df$r, n = df$n, study_names = df$study, transformation = "cohens_d") +head(dfd) +``` + + +#### Re analysis with random effect meta-analysis +We now estimate a random effect meta-analysis with the `rma()` function imported from the `metafor` package [@metafor] and verify that we arrive at the same results as reported in the @lui2015intergenerational paper. The `yi` argument is used to pass the column name containing effect sizes, the `sei` argument is used to pass the column name containing standard errors, and the `data` argument is used to pass the data frame containing both variables. + +```{r} +fit_rma <- rma(yi = y, sei = se, data = dfz) +fit_rma +``` + +Indeed, we find that the effect size estimate from the random effect meta-analysis corresponds to the one reported in the @lui2015intergenerational. It is important to remember that we used Fisher's *z* to estimate the models; therefore, the estimated results are on the Fisher's *z* scale. To transform the effect size estimate to the correlation coefficients, we can use the `z2r()` functionfrom the `RoBMA` package, + +```{r} +z2r(fit_rma$b) +``` + +Transforming the effect size estimate results in the correlation coefficient $\rho$ = 0.25. + + +### PET-PEESE +The first publication bias adjustment that we perform is PET-PEESE. PET-PEESE adjusts for the relationship between effect sizes and standard errors. To our knowledge, PET-PEESE is not currently implemented in any R-package. However, since PET and PEESE are weighted regressions of effect sizes on standard errors (PET) or standard errors squared (PEESE), we can estimate both PET and PEESE models with the `lm()` function. Inside of the `lm()` function call, we specify that `y` is the response variable (left hand side of the `~` sign) and `se` is the predictor (the right-hand side). Furthermore, we specify the `weight` argument that allows us to weight the meta-regression by inverse variance and set the `data = dfz` argument, which specifies that all of the variables come from the transformed, `dfz`, data set. + +```{r} +fit_PET <- lm(y ~ se, weights = 1/se^2, data = dfz) +summary(fit_PET) +``` + +The `summary()` function allows us to explore details of the fitted model. The `(Intercept)` coefficient refers to the meta-analytic effect size (corrected for the correlation with standard errors). Again, it is important to keep in mind that the effect size estimate is on the Fisher's *z* scale. We obtain the estimate on correlation scale with the `z2r()` function (we pass the estimated effect size using the `summary(fit_PET)$coefficients["(Intercept)", "Estimate"]` command, which extracts the estimate from the fitted model, it is equivalent with simply pasting the value directly `z2r(-0.0008722083)`). + +```{r} +z2r(summary(fit_PET)$coefficients["(Intercept)", "Estimate"]) +``` +Since the Fisher's *z* transformation is almost linear around zero, we obtain an almost identical estimate. + +More importantly, since the test for the effect size with PET was not significant at $\alpha = .10$, we interpret the PET model. However, if the test for effect size were significant, we would fit and interpret the PEESE model. The PEESE model can be fitted in an analogous way, by replacing the predictor of standard errors with standard errors squared (we need to wrap the `se^2` predictor in `I()` that tells R to square the predictor prior to fitting the model). + +```{r} +fit_PEESE <- lm(y ~ I(se^2), weights = 1/se^2, data = dfz) +summary(fit_PEESE) +``` + + +### Selection models +The second publication bias adjustment that we will perform is selection models. Selection models adjust for the different publication probabilities in different *p*-value intervals. Selection models are implemented in `weightr` package (`weightfunct()` function; @weightr) and newly also in the `metafor` package (`selmodel()` function; @metafor). First, we use the `weightr` implementation and fit the "4PSM" selection model that specifies three distinct *p*-value intervals: (1) covering the range of significant *p*-values for effect sizes in the expected direction (0.00-0.025), (2) covering the range of "marginally" significant *p*-values for effect sizes in the expected direction (0.025-0.05), and covering the range of non-significant *p*-values (0.05-1). We use Cohen's *d* transformation of the correlation coefficients since it is better at maintaining the distribution of test statistics. To fit the model, we need to pass the effect sizes (`dfd$y`) into the `effect` argument and variances (`dfd$se^2`) into the `v` argument (note that we need to pass the vector of values directly since the `weightfunct()` function does not allow us to pass the data frame directly as did the previous functions). We further set `steps = c(0.025, 0.05)` to specify the appropriate cut-points (note that the steps correspond to one-sided *p*-values), and we set `table = TRUE` to obtain the frequency of *p* values in each of the specified intervals. + +```{r} +fit_4PSM <- weightfunct(effect = dfd$y, v = dfd$se^2, steps = c(0.025, 0.05), table = TRUE) +fit_4PSM +``` + +Note the warning message informing us about the fact that our data do not contain sufficient number of *p*-values in one of the *p*-value intervals. The model output obtained by printing the fitted model object `fit_4PSM` shows that there is only one *p*-value in the (0.025, 0.05) interval. We can deal with this issue by joining the "marginally" significant and non-significant *p*-value interval, resulting in the "3PSM" model. + +```{r} +fit_3PSM <- weightfunct(effect = dfd$y, v = dfd$se^2, steps = c(0.025), table = TRUE) +fit_3PSM +``` + +The new model does not suffer from the estimation problem due to the limited number of *p*-values in the intervals, so we can now interpreting the results with more confidence. First, we check the test for heterogeneity that clearly rejects the null hypothesis `Q(df = 17) = 75.4999, $p$ = 5.188348e-09` (if we did not find evidence for heterogeneity, we could have proceeded by fitting the fixed effects version of the model by specifying the `fe = TRUE` argument). We follow by checking the test for publication bias which is a likelihood ratio test comparing the unadjusted and adjusted estimate `X^2(df = 1) = 3.107176, $p$ = 0.077948`. The result of the test is slightly ambiguous -- we would reject the null hypothesis of no publication bias with $\alpha = 0.10$ but not with $\alpha = 0.05$. + +If we decide to interpret the estimate effect size, we have to again transform it back to the correlation scale. However, this time we need to use the `d2r()` function since we supplied the effect sizes as Cohen's *d* (note that the effect size estimate corresponds to the second value in the `fit_3PSM$adj_est` object for the random effect model, alternatively, we could simply use `d2r(0.3219641)`). + +```{r} +d2r(fit_3PSM$adj_est[2]) +``` + +Alternatively, we could have conducted the analysis analogously but with the `metafor` package. First, we would fit a random effect meta-analysis with the Cohen's *d* transformed effect sizes. + +```{r} +fit_rma_d <- rma(yi = y, sei = se, data = dfd) +``` +Subsequently, we would have used the `selmodel` function, passing the estimated random effect meta-analysis object and specifying the `type = "stepfun"`argument to obtain a step weight function and setting the appropriate steps with the `steps = c(0.025)` argument. + +```{r} +fit_sel_d <- selmodel(fit_rma_d, type = "stepfun", steps = c(0.025)) +fit_sel_d +``` + +The output verifies the results obtained in the previous analysis. + + +### Robust Bayesian meta-analysis +The third and final publication bias adjustment that we will perform is robust Bayesian meta-analysis (RoBMA). RoBMA uses Bayesian model-averaging to combine inference from both PET-PEESE and selection models. We use the `RoBMA` R package (and the `RoBMA()` function; @RoBMA) to fit the default 36 model ensemble (called RoBMA-PSMA) based on an orthogonal combination of models assuming the presence and absence of the effect size, heterogeneity, and publication bias. The models assuming the presence of publication bias are further split into six weight function models and models utilizing the PET and PEESE publication bias adjustment. To fit the model, we can directly pass the original correlation coefficients into the `r` argument and sample sizes into `n` argument -- the `RoBMA()` function will internally transform them to the Fisher's *z* scale and, by default, return the estimates on a Cohen's *d* scale which is used to specify the prior distributions (both of these settings can be changed with the `prior_scale` and `transformation` arguments, and the output can be conveniently transformed later). We further set the `model` argument to `"PSMA"` to fit the 36 model ensemble and use the `seed` argument to make the analysis reproducible (it uses MCMC sampling in contrast to the previous methods). We turn on parallel estimation by setting `parallel = TRUE` argument (the parallel processing might in some cases fail, try rerunning the model one more time or turning the parallel processing off in that case). +```r +fit_RoBMA <- RoBMA(r = df$r, n = df$n, seed = 1, model = "PSMA", parallel = TRUE) +``` +This step can take some time depending on your CPU. For example, this will take around ~ 1 minute on a fast CPU (e.g., AMD Ryzen 3900x 12c/24t) and up to ten minutes or longer on slower CPUs (e.g., 2.7 GHz Intel Core i5). + +We use the `summary()` function to explore details of the fitted model. + +```{r} +summary(fit_RoBMA) +``` + +The printed output consists of two parts. The first table called `Components summary` contains information about the fitted models. It tells us that we estimated the ensemble with 18/36 models assuming the presence of an effect, 18/36 models assuming the presence of heterogeneity, and 32/36 models assuming the presence of the publication bias. The second column summarizes the prior model probabilities of models assuming either presence of the individual components -- here, we see that the presence and absence of the components is balanced a priori. The third column contains information about the posterior probability of models assuming the presence of the components -- we can observe that the posterior model probabilities of models assuming the presence of an effect slightly increased to 0.552. The last column contains information about the evidence in favor of the presence of any of those components. Evidence for the presence of an effect is undecided; the models assuming the presence of an effect are only 1.232 times more likely given the data than the models assuming the absence of an effect. However, we find overwhelming evidence in the favor of heterogeneity, with the models assuming the presence of heterogeneity being 19,168 times more likely given the data than models assuming the absence of heterogeneity, and moderate evidence in favor of publication bias. + +As the name indicates, the second table called `Model-averaged estimates` contains information about the model-averaged estimates. The first row labeled `mu` corresponds to the model-averaged effect size estimate (on Cohen's *d* scale) and the second row label `tau` corresponds to the model-averaged heterogeneity estimates. Below are the estimated model-averaged weights for the different *p*-value intervals and the PET and PEESE regression coefficients. We convert the estimates to the correlation coefficients by adding the `output_scale = "r"` argument to the summary function. + +```{r} +summary(fit_RoBMA, output_scale = "r") +``` + +Now, we obtained the model-averaged effect size estimate on the correlation scale. If we were interested in the estimates model-averaging only across the models assuming the presence of an effect (for the effect size estimate), heterogeneity (for the heterogeneity estimate), and publication bias (for the publication bias weights and PET and PEESE regression coefficients), we could have added `conditional = TRUE` argument to the summary function. A quick textual summary of the model can also be generated with the `interpret()` function. + +```{r} +interpret(fit_RoBMA, output_scale = "r") +``` + +We can also obtain summary information about the individual models by specifying the `type = "models"` option. The resulting table shows the prior and posterior model probabilities and inclusion Bayes factors for the individual models (we also set `short_name = TRUE` argument reducing the width of the output by abbreviating names of the prior distributions). + +```{r} +summary(fit_RoBMA, type = "models", short_name = TRUE) +``` + +To obtain a summary of the individual model diagnostics we set `type = "diagnostics"`. The resulting table provides information about the maximum MCMC error, relative MCMC error, minimum ESS, and maximum R-hat when aggregating over the parameters of each model. As we can see, we obtain acceptable ESS and R-hat diagnostic values. + +```{r} +summary(fit_RoBMA, type = "diagnostics") +``` + +Finally, we can also plot the model-averaged posterior distribution with the `plot()` function. We set `prior = TRUE` parameter to include the prior distribution as a grey line (and arrow for the point density at zero) and `output_scale = "r"` to transform the posterior distribution to the correlation scale (the default figure output would be on Cohen's *d* scale). (The `par(mar = c(4, 4, 1, 4))` call increases the left margin of the figure, so the secondary y-axis text is not cut off.) + +```{r, dpi = 300, fig.width = 6, fig.height = 4, out.width = "50%", fig.align = "center"} +par(mar = c(4, 4, 1, 4)) +plot(fit_RoBMA, prior = TRUE, output_scale = "r", ) +``` + +#### Specifying Different Priors +The `RoBMA` package allows us to fit ensembles of highly customized meta-analytic models. Here we reproduce the ensemble for perinull directional hypothesis test from the Appendix (see the R package vignettes for more examples and details). Instead of using the fully pre-specified model with the `model = "PSMA"` argument, we explicitly specify the prior distribution for models assuming presence of the effect with the `priors_effect = prior("normal", parameters = list(mean = 0.60, sd = 0.20), truncation = list(0, Inf))` argument, which assigns Normal(0.60, 0.20) distribution bounded to the positive numbers to the $\mu$ parameter (note that the prior distribution is specified on the Cohen's *d* scale, corresponding to 95% prior probability mass contained approximately in the $\rho$ = (0.10, 0.45) interval). Similarly, we also exchange the default prior distribution for the models assuming absence of the effect with a perinull hypothesis with the `priors_effect_null = prior("normal", parameters = list(mean = 0, sd = 0.10)))` argument that sets 95% prior probability mass to values in the $\rho$ = (-0.10, 0.10) interval. + +```r +fit_RoBMA2 <- RoBMA(r = df$r, n = df$n, seed = 2, parallel = TRUE, + priors_effect = prior("normal", parameters = list(mean = 0.60, sd = 0.20), truncation = list(0, Inf)), + priors_effect_null = prior("normal", parameters = list(mean = 0, sd = 0.10))) +``` + +As previously, we can use the `summary()` function to inspect the model fit and verify that the specified models correspond to the settings. + +```{r} +summary(fit_RoBMA2, type = "models") +``` + + +### References