1 Load packages

library("knitr")       # for RMarkdown commands 
library("kableExtra")  # for nice tables
library("janitor")     # for cleaning column names 
library("brms")        # for Bayesian regression modeling
library("tidybayes")   # for Bayesian regression modeling
library("broom.mixed") # for tidy regression results
library("xtable")      # for latex tables
library("ggeffects")   # for marginal effects 
library("car")         # for ANOVAs
library("lme4")        # for linear mixed effects models
library("effectsize")  # for calculating effect sizes
library("pwr")         # for power analysis
library("patchwork")   # for figure panels
library("tidyverse")   # for data wrangling, visualization, etc. 
# set ggplot theme 
theme_set(theme_classic())

# set knitr options
opts_chunk$set(comment = "",
               fig.show = "hold")

# suppress summarise() grouping warning 
options(dplyr.summarise.inform = F)

# use effects contrast coding to interpret effects from categorical variables as main effects 
options(contrasts = c("contr.sum", "contr.poly"))

2 Functions

# function for printing out html or latex tables 
print_table = function(data, format = "html", digits = 2){
  if(format == "html"){
    data %>% 
      kable(digits = digits) %>% 
      kable_styling()
  }else if(format == "latex"){
    data %>% 
      xtable(digits = digits,
             caption = "Caption",
             label = "tab:table") %>%
      print(include.rownames = F,
            booktabs = T,
            sanitize.colnames.function = identity,
            caption.placement = "top")
  }
}

3 Experiment 1: Normality inference

3.1 Read in data

# CONJUNCTIVE STRUCTURE: STATISTICAL NORMS 
df.stat_con_data = read.csv(file = "../../data/statistical_conjunctive.csv",
                   stringsAsFactors = F) %>% 
  filter(row_number() > 2) %>% # additional rows in qualtrics csv
  clean_names() %>% 
  filter(!status == "Survey Preview", #exclude preview trials, 
         !ethnicity == "") #exclude people who did not make it until demographics, i.e. incomplete data

# Reference clip in final test clip: dark motion block is at the top 
# in test clip (Recode condition "db")
# Reference slider: picture with dark block at the top is on the right 
# of the slider (0-"Normal", 100 "Abnormal") (Recode condition "1 and "4"")
df.stat_con_judgments = df.stat_con_data %>% 
  mutate(participant = 1:n()) %>% 
  select(dt_ball_a_1:b_dark_top_right_1, contains("dark_thru"), condition, participant) %>% 
  pivot_longer(cols = -c(participant, condition),
               names_to = "index",
               values_to = "value") %>% 
  filter(value != "") %>% 
  mutate(index = ifelse(str_detect(index, "conjunctive"),
                        "conjunctive",
                        index),
         position = str_extract_all(index, "db|dt"),
         index = ifelse(str_detect(index, "thru"),
                        "selection",
                        index),
         index = ifelse(str_detect(index, "top"),
                        "judgment",
                        index),
         index = str_remove(index, "db_"),
         index = str_remove(index, "dt_"),
         index = str_remove(index, "_1"),
         value = ifelse(value == "Ball E did go through the gate because ball A did go through the motion block.",
                        "1",
                        ifelse(value == "Ball E did go through the gate because ball B did go through the motion block.",
                               2,
                               value))) %>% 
  mutate(value = as.numeric(value),
         value = ifelse(test = (position == "db" & str_detect(index, "ball")),
                        yes = 100 - value,
                        no = value),
         value = ifelse((position == "db" & str_detect(index, "selection")),
                        3 - value,
                        value)) %>% 
  select(-position) %>% 
  pivot_wider(names_from = index,
              values_from = value) %>% 
  mutate(selection = factor(selection,
                            levels = 1:2,
                            labels = c("abnormal", "normal"))) %>%
  mutate(judgment = ifelse(condition %in% c(1, 4),
                           100 - judgment,
                           judgment)) %>% 
  rename(ball_abnormal = ball_a,
         ball_normal = ball_b) %>% 
  arrange(participant) %>% 
  # define exclusion criteria 
  mutate(exclude = ifelse((ball_abnormal < ball_normal) & 
         (conjunctive < 50), 0, 1))

# CONJUNCTIVE STRUCTURE: PRESCRIPTIVE NORMS 
df.pres_con_data = read.csv(file = "../../data/prescriptive_conjunctive.csv",
                   stringsAsFactors = F) %>% 
  filter(row_number() > 2) %>% # additional rows in qualtrics csv
  clean_names() %>% 
  filter(!status == "Survey Preview", #exclude preview trials, 
         !ethnicity == "") #exclude people who did not make until demographics, i.e. incomplete survey

# reference clip: 
# Billy is the abnormal agent, Suzy the normal agent (Recode norm_condition "2")
# The selected agent as the abnormal agent is at the top of the 
# judgment scale ( Recode learn_condition ="2")

df.pres_con_judgments = df.pres_con_data %>% 
  mutate(participant = 1:n()) %>% 
  select(b_abnorm_billy_1:suzy_b_abnorm_right_1,
         learn_condition,
         condition,
         participant,
         -starts_with("q"),
         -starts_with("timing")) %>% 
  pivot_longer(cols = -c(participant, learn_condition, condition),
               names_to = "index",
               values_to = "value") %>%
  filter(value != "") %>%
  mutate(index = ifelse(str_detect(index, "left|right"),
                        "judgment",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "b_abnorm_billy_1|s_abnorm_billy_1"),
                        "agreement_Billy",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "b_abnorm_suzy_1|s_abnorm_suzy_1"),
                        "agreement_Suzy",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "conjunctive"),
                        "conjunctive",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "selection"),
                        "selection",
                        index))%>% 
  mutate(value = as.numeric(value),
         value = ifelse(test = (learn_condition == "2" & str_detect(index, "agreement")),
                        yes = 100 - value,
                        no = value),
         value = ifelse((learn_condition == "2" & str_detect(index, "selection")),
                        3 - value,
                        value))%>% 
  pivot_wider(names_from = index,
              values_from = value) %>% 
  mutate(selection = factor(selection,
                            levels = 1:2,
                            labels = c("abnormal", "normal"))) %>%
  mutate(judgment = ifelse(condition %in% c(1, 4),
                           100 - judgment,
                           judgment)) %>% 
  rename(agreement_abnormal_agent = agreement_Billy,
         agreement_normal_agent = agreement_Suzy) %>% 
  arrange(participant) %>% 
  # define exclusion criteria 
  mutate(exclude = ifelse((agreement_abnormal_agent < agreement_normal_agent) &
         (conjunctive < 50), 0, 1))

# DISJUNCTIVE STRUCTURE: STATISTICAL NORMS 
df.stat_dis_data = read.csv(file = "../../data/statistical_disjunctive.csv",
                   stringsAsFactors = F) %>% 
  filter(row_number() > 2) %>% # additional rows in qualtrics csv
  clean_names() %>% 
  filter(!status == "Survey Preview", # exclude preview trials, 
         !ethnicity == "") # exclude people who did not make until it demographics

# Reference clip in final test clip: dark motion block is at the top in 
# test clip (Recode condition "db")
# Reference slider: picture with dark block at the top is on the right of 
# the slider (0-"Normal", 100 "Abnormal") (Recode condition "1 and "4"")
df.stat_dis_judgments = df.stat_dis_data %>% 
  mutate(participant = 1:n()) %>% 
  select(dt_ball_a_1:b_dark_top_right_1,
         contains("dark_thru"),
         condition,
         participant) %>% 
  pivot_longer(cols = -c(participant, condition),
               names_to = "index",
               values_to = "value") %>%
  filter(value != "") %>% 
  mutate(index = ifelse(str_detect(index, "disjunctive"),
                        "disjunctive",
                        index),
         position = str_extract_all(index, "db|dt"),
         index = ifelse(str_detect(index, "thru"),
                        "selection",
                        index),
         index = ifelse(str_detect(index, "top"),
                        "judgment",
                        index),
         index = str_remove(index, "db_"),
         index = str_remove(index, "dt_"),
         index = str_remove(index, "_1"),
         value = ifelse(value == "Ball E did go through the gate because ball A did go through the motion block.",
                        "1",
                        ifelse(value == "Ball E did go through the gate because ball B did go through the motion block.",
                               2,
                               value))) %>% 
  mutate(value = as.numeric(value),
         value = ifelse(test = (position == "db" & str_detect(index, "ball")),
                        yes = 100 - value,
                        no = value),
         value = ifelse((position == "db" & str_detect(index, "selection")),
                        3 - value,
                        value)) %>% 
  select(-position) %>% 
  pivot_wider(names_from = index,
              values_from = value) %>% 
  mutate(selection = factor(selection,
                            levels = 1:2,
                            labels = c("abnormal", "normal"))) %>%
  mutate(judgment = ifelse(condition %in% c(1, 4),
                           100 - judgment,
                           judgment)) %>% 
  rename(ball_abnormal = ball_a,
         ball_normal = ball_b) %>% 
  arrange(participant) %>% 
  # define exclusion criteria 
  mutate(exclude = ifelse((ball_abnormal < ball_normal) & 
         (disjunctive > 50), 0, 1))

# DISJUNCTIVE STRUCTURE: PRESCRIPTIVE NORMS 
df.pres_dis_data = read.csv(file = "../../data/prescriptive_disjunctive.csv",
                   stringsAsFactors = F) %>% 
  filter(row_number() > 2) %>% # additional rows in qualtrics csv
  clean_names() %>% 
  filter(!status == "Survey Preview", #exclude preview trials, 
         !age == "") #exclude people who did not make until demographics

# reference clip: 
# Billy is the abnormal agent, Suzy the normal agent (Recode norm_condition "2")
# The selected agent as the abnormal agent is at the top of the 
# judgment scale (100) ( Recode learn_condition ="2")

df.pres_dis_judgments = df.pres_dis_data %>% 
  mutate(participant = 1:n()) %>% 
  select(b_abnorm_billy_1:suzy_b_abnorm_right_1,
         learn_condition,
         condition,
         participant,
         -starts_with("q"),
         -starts_with("timing")) %>% 
  pivot_longer(cols = -c(participant, learn_condition, condition),
               names_to = "index",
               values_to = "value") %>%
  filter(value != "") %>%
  mutate(index = ifelse(str_detect(index, "left|right"),
                        "judgment",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "b_abnorm_billy_1|s_abnorm_billy_1"),
                        "agreement_Billy",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "b_abnorm_suzy_1|s_abnorm_suzy_1"),
                        "agreement_Suzy",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "disjunctive"),
                        "disjunctive",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "selection"),
                        "selection",
                        index))%>% 
  mutate(value = as.numeric(value),
         value = ifelse(test = (learn_condition == "2" & str_detect(index, "agreement")),
                        yes = 100 - value,
                        no = value),
         value = ifelse((learn_condition == "2" & str_detect(index, "selection")),
                        3 - value,
                        value))%>% 
  pivot_wider(names_from = index,
              values_from = value) %>% 
  mutate(selection = factor(selection,
                            levels = 1:2,
                            labels = c("abnormal", "normal"))) %>%
  mutate(judgment = ifelse(condition %in% c(1, 4),
                           100 - judgment,
                           judgment)) %>% 
  rename(agreement_abnormal_agent = agreement_Billy,
         agreement_normal_agent = agreement_Suzy) %>% 
  arrange(participant) %>% 
  # define exclusion criteria
  mutate(exclude = ifelse((agreement_abnormal_agent < agreement_normal_agent) &
         (disjunctive > 50), 0, 1))

# COMBINE THE DATA 
df.normality = df.stat_con_judgments %>% 
  mutate(structure = "conjunctive") %>% 
  rename(structure_question = conjunctive) %>% 
  bind_rows(df.stat_dis_judgments %>% 
              mutate(structure = "disjunctive") %>% 
              rename(structure_question = disjunctive)) %>% 
  mutate(norm = "statistical") %>% 
  rename(abnormal_rating = ball_abnormal, 
         normal_rating = ball_normal, 
         structure_rating = structure_question) %>% 
  bind_rows(df.pres_con_judgments %>% 
              mutate(structure = "conjunctive") %>% 
              rename(structure_question = conjunctive) %>% 
              bind_rows(df.pres_dis_judgments %>% 
                          mutate(structure = "disjunctive") %>% 
                          rename(structure_question = disjunctive)) %>% 
              mutate(norm = "prescriptive") %>% 
              rename(abnormal_rating = agreement_abnormal_agent, 
                     normal_rating = agreement_normal_agent,
                     structure_rating = structure_question) %>% 
              select(- learn_condition)) %>% 
  filter(exclude == 0) %>% # filter based on exclusion criteria
  mutate(participant = 1:n()) %>% 
  mutate(structure = factor(structure, levels = c("conjunctive", "disjunctive")),
         norm = factor(norm, levels = c("statistical", "prescriptive"))) %>% 
  select(-condition)

3.2 Demographics

df.normality.demo = df.stat_con_data %>% 
  select(feedback,
         age_1_text,
         gender,
         race_1_text,
         ethnicity) %>% 
  mutate(norm = "statistical",
         structure = "conjunctive",
         participant = 1:n()) %>% 
  left_join(df.stat_con_judgments %>% 
              select(participant, exclude),
            by = "participant") %>% 
  bind_rows(df.stat_dis_data %>% 
              select(feedback,
                     age_1_text,
                     gender,
                     race_1_text,
                     ethnicity) %>% 
              mutate(norm = "statistical",
                     structure = "disjunctive",
                     participant = 1:n()) %>% 
              left_join(df.stat_dis_judgments %>% 
                          select(participant, exclude),
                        by = "participant")) %>% 
  bind_rows(df.pres_con_data %>% 
              select(feedback,
                     age_1_text,
                     gender,
                     race_1_text,
                     ethnicity) %>% 
              mutate(norm = "prescriptive",
                     structure = "conjunctive",
                     participant = 1:n()) %>% 
              left_join(df.pres_con_judgments %>% 
                          select(participant, exclude),
                        by = "participant")) %>% 
  bind_rows(df.pres_dis_data %>% 
              select(feedback,
                     age_1_text,
                     gender,
                     race_1_text,
                     ethnicity) %>% 
              mutate(norm = "prescriptive",
                     structure = "disjunctive",
                     participant = 1:n()) %>% 
              left_join(df.pres_dis_judgments %>% 
                          select(participant, exclude),
                        by = "participant")) %>% 
  mutate(age = as.numeric(age_1_text),
         age = ifelse(age > 1000, 2019 - age, age),
         gender = ifelse(gender == 1, "Female", gender),
         gender = ifelse(gender == 2, "Male", gender),
         gender = ifelse(gender == 3, "Non-binary", gender),
         gender = ifelse(gender == 4, "Prefer not to say.", gender))

df.normality.demo %>% 
  summarize(n_excluded = sum(exclude == 1))
  n_excluded
1         56
df.normality.demo %>% 
  filter(exclude == 0) %>%
  count(norm, structure) %>% 
  print_table()
norm structure n
prescriptive conjunctive 46
prescriptive disjunctive 41
statistical conjunctive 30
statistical disjunctive 37
df.normality.demo %>% 
  summarize(n_total = n(),
            n_female = sum(gender == "Female"),
            n_male = sum(gender == "Male"),
            n_not_say = sum(gender == "Prefer not to say."),
            n_non_binary = sum(gender == "Non-binary"),
            mean_age = mean(age, na.rm = T),
            sd_age = sd(age, na.rm = T)) %>% 
  mutate(across(contains("age"), round)) %>% 
  pivot_longer(cols = everything()) %>% 
  print_table()
name value
n_total 210
n_female 77
n_male 127
n_not_say 4
n_non_binary 2
mean_age 33
sd_age 9
# remove unnecessary variables
rm(list = ls()[!ls() %in% c("df.normality",
                            "df.normality.demo",
                            "print_table")])

3.3 Plots

3.3.1 Causal selections

set.seed(1)

df.plot = df.normality %>% 
  mutate(selection = ifelse(selection == "abnormal", 1, 0),
         norm = factor(norm,
                       levels = c("statistical", "prescriptive"),
                       labels = c("statistical\nnorm", "prescriptive\nnorm")))

df.text = df.plot %>% 
  count(structure, norm) %>% 
  mutate(label = str_c("n = ", n),
         selection = 1.05)

p.exp1.selections = ggplot(data = df.plot,
                           mapping = aes(x = norm, 
                                         y = selection,
                                         group = structure,
                                         fill = structure)) +
  geom_hline(yintercept = 0.5, 
             linetype = 2) + 
  stat_summary(fun.data = "mean_cl_boot",
               geom = "pointrange",
               position = position_dodge(width = 0.5),
               size = 1.5,
               shape = 21) + 
  geom_text(data = df.text,
            mapping = aes(label = label),
            position = position_dodge(width = 0.5),
            size = 6) +
  labs(x = "type of norm",
       y = "% abnormal cause\nselections",
       title = "Experiment 1") +
  scale_fill_brewer(palette = "Set1") +
  scale_y_continuous(breaks = seq(0, 1, 0.25),
                     labels = str_c(seq(0, 100, 25), "%"),
                     expand = expansion(mult = c(0.05, 0))) +
  coord_cartesian(clip = "off",
                  ylim = c(0, 1)) + 
  theme(text = element_text(size = 24),
        legend.position = "bottom",
        axis.title.x = element_blank(),
        plot.margin = margin(t = 1, l = 0.2, r = 0.2, unit = "cm"),
        plot.title = element_text(vjust = 4,
                                  color = "gray40",
                                  hjust = 0.5))

p.exp1.selections

ggsave(filename = "../../figures/plots/normality_selections.pdf",
       plot = p.exp1.selections,
       width = 8,
       height = 6)

3.3.2 Main judgment data

set.seed(1)

df.plot = df.normality %>% 
  mutate(participant = 1:n(),
         norm = factor(norm,
                       levels = c("statistical", "prescriptive"),
                       labels = c("statistical\nnorm", "prescriptive\nnorm")),
         judgment = judgment - 50)

df.text = df.plot %>% 
  count(structure, norm) %>% 
  mutate(label = str_c("n = ", n),
         judgment = 55)

p.exp1.judgments = ggplot(data = df.plot,
                          mapping = aes(x = norm, 
                                        y = judgment,
                                        group = structure,
                                        fill = structure)) +
  geom_hline(yintercept = 0, 
             linetype = 2) + 
  geom_point(mapping = aes(color = structure), 
             alpha = 0.2,
             size = 2,
             position = position_jitterdodge(jitter.width = 0.1,
                                             dodge.width = 0.5),
             show.legend = F) + 
  stat_summary(fun.data = "mean_cl_boot",
               geom = "pointrange",
               position = position_dodge(width = 0.5),
               size = 1.5,
               shape = 21) + 
  annotate(x = c(0.5, 0.5),
           y = c(-50, 50),
           geom = "text",
           label = c("preference for\nnormal event", "preference for\nabnormal event"),
           hjust = c(0, 1),
           angle = 90,
           size = 6) + 
  geom_text(data = df.text,
            mapping = aes(label = label),
            position = position_dodge(width = 0.5),
            size = 5) + 
  labs(x = "type of norm",
       y = "normality inference") + 
  scale_fill_brewer(palette = "Set1") +
  scale_color_brewer(palette = "Set1") +
  scale_y_continuous(breaks = seq(-50, 50, 25),
                     labels = seq(-50, 50, 25),
                     expand = expansion(mult = c(0.05, 0))) + 
  coord_cartesian(clip = "off",
                  ylim = c(-50, 50)) +
  theme(text = element_text(size = 24),
        legend.position = "bottom",
        axis.title.x = element_blank(),
        plot.margin = margin(t = 1, l = 0.2, r = 0.2, unit = "cm"))

p.exp1.judgments

ggsave(filename = "../../figures/plots/normality_judgments.pdf",
       plot = p.exp1.judgments,
       width = 8,
       height = 6)

3.3.3 Individual differences

set.seed(1)

df.plot = df.normality %>% 
  mutate(judgment = judgment - 50,
         selection = str_c(selection, "\nselection"))

df.text = df.plot %>% 
  count(selection, structure) %>% 
  mutate(judgment = 55,
         label = str_c("n = ", n))

p.exp1.individual = ggplot(data = df.plot,
                           mapping = aes(x = selection, 
                                         y = judgment,
                                         group = structure,
                                         fill = structure)) +
  geom_hline(yintercept = 0, 
             linetype = 2) + 
  geom_point(mapping = aes(color = structure),
             alpha = 0.2,
             size = 2,
             position = position_jitterdodge(jitter.width = 0.1,
                                             dodge.width = 0.5),
             show.legend = F) + 
  stat_summary(fun.data = "mean_cl_boot",
               geom = "pointrange",
               position = position_dodge(width = 0.5),
               size = 1.5,
               shape = 21) + 
  annotate(x = c(0.5, 0.5),
           y = c(-50, 50),
           geom = "text",
           label = c("preference for\nnormal event", "preference for\nabnormal event"),
           hjust = c(0, 1),
           angle = 90,
           size = 6) + 
  geom_text(data = df.text,
            mapping = aes(label = label),
            position = position_dodge(width = 0.5),
            size = 5) +
  labs(x = "type of norm",
       y = "normality inference") + 
  scale_fill_brewer(palette = "Set1") +
  scale_color_brewer(palette = "Set1") +
  scale_y_continuous(breaks = seq(-50, 50, 25),
                     labels = seq(-50, 50, 25),
                     expand = expansion(mult = c(0.05, 0))) + 
  coord_cartesian(clip = "off",
                  ylim = c(-50, 50)) +
  theme(text = element_text(size = 24),
        legend.position = "bottom",
        axis.title.x = element_blank(),
        plot.margin = margin(t = 1, l = 0.2, r = 0.2, unit = "cm"))

p.exp1.individual  
  
ggsave(filename = "../../figures/plots/normality_judgments_based_on_selections.pdf",
       plot = p.exp1.individual,
       width = 8,
       height = 6)

3.4 Stats

3.4.1 Means and standard deviations

df.normality %>% 
  group_by(structure) %>% 
  summarize(mean = mean(judgment),
            sd = sd(judgment)) %>% 
  print_table()
structure mean sd
conjunctive 84.58 28.13
disjunctive 41.42 34.99

3.4.2 Bayesian regression on selection data

fit_normality_selection = brm(formula = selection ~ structure * norm,
                              family = "bernoulli",
                              data = df.normality %>% 
                                mutate(selection = ifelse(selection == "normal", 0, 1)),
                              seed = 1,
                              cores = 2,
                              file = "cache/fit_normality_selection")

fit_normality_selection
 Family: bernoulli 
  Links: mu = logit 
Formula: selection ~ structure * norm 
   Data: df.normality %>% mutate(selection = ifelse(selecti (Number of observations: 154) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Population-Level Effects: 
                 Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept            0.81      0.26     0.35     1.36 1.00     2231     2148
structure1           1.60      0.26     1.13     2.16 1.00     2407     2364
norm1                0.10      0.26    -0.39     0.65 1.00     2274     2465
structure1:norm1     0.34      0.26    -0.16     0.90 1.00     2302     2214

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

Predictions on probability scale:

fit_normality_selection %>% 
  ggpredict(terms = c("structure", "norm"))
# Predicted probabilities of selection

# norm = statistical

structure   | Predicted |       95% CI
--------------------------------------
conjunctive |      0.94 | [0.82, 0.99]
disjunctive |      0.27 | [0.14, 0.42]

# norm = prescriptive

structure   | Predicted |       95% CI
--------------------------------------
conjunctive |      0.88 | [0.76, 0.95]
disjunctive |      0.37 | [0.23, 0.52]

3.4.3 Bayesian regression on judgment data

fit_normality_judgment = brm(formula = judgment ~ structure * norm,
                             data = df.normality,
                             seed = 1,
                             cores = 2,
                             file = "cache/fit_normality_judgment")

fit_normality_judgment
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: judgment ~ structure * norm 
   Data: df.normality (Number of observations: 154) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Population-Level Effects: 
                 Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept           62.83      2.67    57.51    68.06 1.00     5163     2745
structure1          21.22      2.61    16.06    26.17 1.00     5198     3281
norm1               -1.47      2.62    -6.62     3.62 1.00     5125     3108
structure1:norm1    -1.46      2.56    -6.54     3.43 1.00     5258     3133

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma    32.03      1.87    28.61    35.90 1.00     4767     2850

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

3.5 Tables

3.5.1 Causal selections

fit_normality_selection %>% 
  gather_draws(b_Intercept, 
               b_structure1,
               b_norm1,
               `b_structure1:norm1`) %>% 
  mean_hdi() %>% 
  rename(term = .variable,
         estimate = .value,
         `lower 95% CI` = .lower,
          `upper 95% CI` = .upper) %>% 
  select(-c(.width, .point, .interval)) %>% 
  mutate(term = tolower(term),
         term = str_remove_all(term, "b_"),
         term = str_remove_all(term, "1"),
         term = factor(term, levels = c("intercept", "structure", "norm", "structure:norm"))) %>% 
  arrange(term) %>% 
  print_table()
term estimate lower 95% CI upper 95% CI
intercept 0.81 0.34 1.34
structure 1.60 1.11 2.13
norm 0.10 -0.39 0.64
structure:norm 0.34 -0.20 0.86

3.5.2 Normality judgments

fit_normality_judgment %>% 
  gather_draws(b_Intercept, 
               b_structure1,
               b_norm1,
               `b_structure1:norm1`) %>% 
  mean_hdi() %>% 
  rename(term = .variable,
         estimate = .value,
         `lower 95% CI` = .lower,
          `upper 95% CI` = .upper) %>% 
  select(-c(.width, .point, .interval)) %>% 
  mutate(term = tolower(term),
         term = str_remove_all(term, "b_"),
         term = str_remove_all(term, "1"),
         term = factor(term, levels = c("intercept", "structure", "norm", "structure:norm"))) %>% 
  arrange(term) %>% 
  print_table()
term estimate lower 95% CI upper 95% CI
intercept 62.83 57.57 68.11
structure 21.22 16.27 26.35
norm -1.47 -6.37 3.83
structure:norm -1.46 -6.41 3.53

3.5.3 Relationship between selections and normality inference

df.normality %>% 
  mutate(judgment_binary = ifelse(judgment < 50, "normal", "abnormal")) %>% 
  group_by(structure, norm) %>% 
  summarize(selection = sum(selection == "abnormal")/n(),
            inference = sum(judgment_binary == "abnormal")/n()) %>% 
  select(norm, structure, everything()) %>% 
  arrange(desc(norm), structure) %>% 
  ungroup() %>% 
  mutate(across(where(is.numeric), ~ round(.,2))) %>% 
  print_table()
norm structure selection inference
prescriptive conjunctive 0.87 0.91
prescriptive disjunctive 0.37 0.41
statistical conjunctive 0.93 0.90
statistical disjunctive 0.27 0.43

4 Experiment 2: Structure inference

4.1 Read in data

# ABNORMAL EXPLANATION: STATISTICAL NORM 
df.stat_abnormal_data = read.csv(file = "../../data/statistical_abnormal.csv",
                   stringsAsFactors = F) %>% 
  filter(row_number() > 2) %>% # additional rows in qualtrics csv
  clean_names() %>% 
  filter(!status == "Survey Preview", # exclude preview trials, 
         !ethnicity == "") # exclude people who did not make until it demographics

# Reference clip Learning Trial and Judgment: Darkrm Motion Block is top, Ball A 
# is abnormal (Recode values of DB condition)
# Reference Slider: Conjunctive on the right side (100 - Definitely Conjunctive, 
# 0 - Definitely Disjunctive) (Recode Conj_Left Condition)

df.stat_abnormal_judgments = df.stat_abnormal_data %>% 
  mutate(participant = 1:n()) %>% 
  select(conj_a_db_1,
         conj_b_db_1,
         conj_db_1,
         conj_a_dt_1,
         conj_b_dt_1,
         conj_dt_1,
         disj_a_db_1,
         disj_b_db_1,
         disj_db_1,
         disj_a_dt_1,
         disj_b_dt_1,
         disj_dt_1,
         exp_conj_db,
         exp_conj_dt,
         exp_disj_db,
         exp_disj_dt,
         abnormal_b_conj_left_1,
         abnormal_b_con_right_1,
         abnormal_a_conj_left_1,
         abnormal_b_con_righ_1,
         condition,
         structure,
         participant) %>% 
  pivot_longer(cols = -c(participant, condition, structure),
               names_to = "index",
               values_to = "value") %>%
  filter(value != "") %>% 
  mutate(index = ifelse(str_detect(index, "abnormal"),
                        "judgment",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "conj_a_db_1|conj_a_dt_1"),
                        "conjunctive_ball_a",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "conj_b_db_1|conj_b_dt_1"),
                        "conjunctive_ball_b",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "conj_db_1|conj_dt_1"),
                        "conjunctive",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "disj_a_db_1|disj_a_dt_1"),
                        "disjunctive_ball_a",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "disj_b_db_1|disj_b_dt_1"),
                        "disjunctive_ball_b",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "disj_db_1|disj_dt_1"),
                        "disjunctive",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "exp_conj_db|exp_conj_dt"),
                        "conjunctive_selection",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "exp_disj_db|exp_disj_dt"),
                        "disjunctive_selection",
                        index))%>%
  mutate(value = as.numeric(value),
         value = ifelse(test = (condition == "DB" & str_detect(index, "ball")),
                        yes = 100 - value,
                        no = value),
         value = ifelse((condition == "DB" & str_detect(index, "selection")),
                        3 - value,
                        value)) %>% 
  pivot_wider(names_from = index,
              values_from = value) %>% 
  mutate(conjunctive_selection = factor(conjunctive_selection,
                            levels = 1:2,
                            labels = c("abnormal", "normal")))%>% 
  mutate(disjunctive_selection = factor(disjunctive_selection,
                            levels = 1:2,
                            labels = c("abnormal", "normal")))%>%
  mutate(judgment = ifelse(structure =="Conj_Left",
                           100 - judgment,
                           judgment)) %>% 
  rename(conjunctive_ball_abnormal = conjunctive_ball_a,
         conjunctive_ball_normal = conjunctive_ball_b,
         disjunctive_ball_abnormal = disjunctive_ball_a,
         disjunctive_ball_normal = disjunctive_ball_b) %>% 
  arrange(participant) %>% 
  # define exclusion criteria 
  mutate(exclude = ifelse((conjunctive_ball_abnormal < conjunctive_ball_normal) &
         (disjunctive_ball_abnormal < disjunctive_ball_normal) & 
         (conjunctive < 50) &
         (disjunctive > 50), 0, 1))

# ABNORMAL EXPLANATION: PRESCRIPTIVE NORM 
df.pres_abnormal_data = read.csv(file = "../../data/prescriptive_abnormal.csv",
                   stringsAsFactors = F) %>% 
  filter(row_number() > 2) %>% # additional rows in qualtrics csv
  clean_names() %>% 
  filter(!status == "Survey Preview", #exclude preview trials, 
         !ethnicity == "") # exclude people who did not make until demographics

# reference clip: 
# Billy is the abnormal agent (Recode Norm Condition ="2")
# Cojunctive Structure is at the top scale (100) (Recode ConjLeft Condition)

df.pres_abnormal_judgments = df.pres_abnormal_data %>% 
  mutate(participant = 1:n()) %>% 
  select(b_abnorm_conj_billy1_1:s_selected_conj_right_1,
         structure,
         norm_condition,
         participant,
         -starts_with("x45"),
         -starts_with("s89"),
         -starts_with("q"),
         -starts_with("check"),
         -starts_with("timing")) %>% 
  pivot_longer(cols = -c(participant, structure, norm_condition),
               names_to = "index",
               values_to = "value") %>%
  filter(value != "") %>%
  mutate(index = ifelse(str_detect(index, "selected"),
                        "judgment",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "b_abnorm_conj_billy|s_abnorm_conj_billy"),
                        "conjunctive_agreement_billy",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "b_abnorm_conj_suzy|s_abnorm_conj_suzy"),
                        "conjunctive_agreement_suzy",
                        index))%>%
  mutate(index = ifelse(str_detect(index, "b_abnorm_conjunctive|s_abnorm_conjunctive"),
                        "conjunctive",
                        index))%>%
  mutate(index = ifelse(str_detect(index, "b_abnorm_conj_exp|s_abnorm_conj_exp"),
                        "conjunctive_selection",
                        index))%>%
  mutate(index = ifelse(str_detect(index, "b_abnorm_disj_billy|s_abnorm_disj_billy"),
                        "disjunctive_agreement_billy",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "b_abnorm_disj_suzy|s_abnorm_disj_suzy"),
                        "disjunctive_agreement_suzy",
                        index))%>%
  mutate(index = ifelse(str_detect(index, "b_abnorm_disjunctive|s_abnorm_disjunctive"),
                        "disjunctive",
                        index))%>%
  mutate(index = ifelse(str_detect(index, "b_abnorm_disj_exp|s_abnorm_disj_exp"),
                        "disjunctive_selection",
                        index))%>%
  mutate(value = as.numeric(value),
         value = ifelse(test = (norm_condition == "2" & str_detect(index, "agreement")),
                        yes = 100 - value,
                        no = value),
         value = ifelse((norm_condition == "2" & str_detect(index, "selection")),
                        3 - value,
                        value))%>% 
  pivot_wider(names_from = index,
              values_from = value) %>% 
  mutate(conjunctive_selection = factor(conjunctive_selection,
                            levels = 1:2,
                            labels = c("abnormal", "normal"))) %>%
  mutate(disjunctive_selection = factor(disjunctive_selection,
                            levels = 1:2,
                            labels = c("abnormal", "normal")))%>%
  mutate(judgment = ifelse(structure =="ConjLeft",
                           100 - judgment,
                           judgment)) %>% 
  rename(conjunctive_agent_abnormal = conjunctive_agreement_billy,
        conjunctive_agent_normal = conjunctive_agreement_suzy,
        disjunctive_agent_abnormal = disjunctive_agreement_billy,
        disjunctive_agent_normal = disjunctive_agreement_suzy) %>% 
  arrange(participant) %>% 
  # define exclusion criteria
  mutate(exclude = ifelse(
    test = (conjunctive_agent_abnormal < conjunctive_agent_normal) &
         (disjunctive_agent_abnormal < disjunctive_agent_normal) &
         (conjunctive < 50) &
         (disjunctive > 50),
    yes = 0, 
    no = 1))

# NORMAL EXPLANATION: STATISTICAL NORMS 
df.stat_normal_data = read.csv(file = "../../data/statistical_normal.csv",
                   stringsAsFactors = F) %>% 
  filter(row_number() > 2) %>% # additional rows in qualtrics csv
  clean_names() %>% 
  filter(!status == "Survey Preview", # exclude preview trials, 
         !ethnicity == "") # exclude people who did not make until it demographics, i.e. incomplete data

# Reference clip Learning Trial and Judgment: Dark Motion Block is top, Ball A is abnormal 
# (Recode values of DB condition)
# Reference Slider: Conjunctive on the right side (100 - Definitely Conjunctive, 
# 0 - Definitely Disjunctive) (Recode Conj_Left Condition)
df.stat_normal_judgments = df.stat_normal_data %>% 
  mutate(participant = 1:n()) %>% 
  select(conj_a_db_1,
         conj_b_db_1,
         conj_db_1,
         conj_a_dt_1,
         conj_b_dt_1,
         conj_dt_1,
         disj_a_db_1,
         disj_b_db_1,
         disj_db_1,
         disj_a_dt_1,
         disj_b_dt_1,
         disj_dt_1,
         exp_conj_db,
         exp_conj_dt,
         exp_disj_db,
         exp_disj_dt,
         normal_a_conj_left_1,
         normal_a_con_right_1,
         normal_b_conj_left_1,
         normal_b_con_right_1,
         condition,
         structure,
         participant) %>% 
  pivot_longer(cols = -c(participant, condition, structure),
               names_to = "index",
               values_to = "value") %>%
  filter(value != "") %>% 
  mutate(index = ifelse(str_detect(index, "normal"),
                        "judgment",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "conj_a_db_1|conj_a_dt_1"),
                        "conjunctive_ball_a",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "conj_b_db_1|conj_b_dt_1"),
                        "conjunctive_ball_b",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "conj_db_1|conj_dt_1"),
                        "conjunctive",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "disj_a_db_1|disj_a_dt_1"),
                        "disjunctive_ball_a",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "disj_b_db_1|disj_b_dt_1"),
                        "disjunctive_ball_b",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "disj_db_1|disj_dt_1"),
                        "disjunctive",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "exp_conj_db|exp_conj_dt"),
                        "conjunctive_selection",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "exp_disj_db|exp_disj_dt"),
                        "disjunctive_selection",
                        index))%>%
  mutate(value = as.numeric(value),
         value = ifelse(test = (condition == "DB" & str_detect(index, "ball")),
                        yes = 100 - value,
                        no = value),
         value = ifelse((condition == "DB" & str_detect(index, "selection")),
                        3 - value,
                        value)) %>% 
  pivot_wider(names_from = index,
              values_from = value) %>% 
  mutate(conjunctive_selection = factor(conjunctive_selection,
                            levels = 1:2,
                            labels = c("abnormal", "normal")))%>% 
  mutate(disjunctive_selection = factor(disjunctive_selection,
                            levels = 1:2,
                            labels = c("abnormal", "normal")))%>%
  mutate(judgment = ifelse(structure =="Conj_Left",
                           100 - judgment,
                           judgment)) %>% 
  rename(conjunctive_ball_abnormal = conjunctive_ball_a,
        conjunctive_ball_normal = conjunctive_ball_b,
        disjunctive_ball_abnormal = disjunctive_ball_a,
        disjunctive_ball_normal = disjunctive_ball_b) %>% 
  arrange(participant) %>% 
  # define exclusion criteria 
  mutate(exclude = ifelse((conjunctive_ball_abnormal < conjunctive_ball_normal) &
         (disjunctive_ball_abnormal < disjunctive_ball_normal) & 
         (conjunctive < 50) &
         (disjunctive > 50), 0, 1))

# NORMAL EXPLANATION: PRESCRIPTIVE NORMS 
df.pres_normal_data = read.csv(file = "../../data/prescriptive_normal.csv",
                   stringsAsFactors = F) %>% 
  filter(row_number() > 2) %>% # additional rows in qualtrics csv
  clean_names() %>% 
  filter(!status == "Survey Preview", # exclude preview trials, 
         !ethnicity == "") # exclude people who did not make until demographics

# reference clip: 
# Billy is the abnormal agent, Suzy the normal agent (recode Norm Condition 2)
# Cojunctive Structure is at the top scale (100) (Recode Structure Condition 'ConjLeft')

df.pres_normal_judgments = df.pres_normal_data %>% 
  mutate(participant = 1:n()) %>% 
  select(s_norm_conj_billy1_1:b_selected_conj_right_1,
         structure,
         norm_condition,
         participant,
         -starts_with("x45"),
         -starts_with("s89"),
         -starts_with("q"),
         -starts_with("check"),
         -starts_with("timing")) %>% 
  pivot_longer(cols = -c(participant, structure, norm_condition),
               names_to = "index",
               values_to = "value") %>%
  filter(value != "") %>%
  mutate(index = ifelse(str_detect(index, "selected"),
                        "judgment",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "s_norm_conj_billy|b_norm_conj_billy"),
                        "conjunctive_agreement_billy",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "s_norm_conj_suzy|b_norm_conj_suzy"),
                        "conjunctive_agreement_suzy",
                        index))%>%
  mutate(index = ifelse(str_detect(index, "s_norm_conjunctive|b_norm_conjunctive"),
                        "conjunctive",
                        index))%>%
  mutate(index = ifelse(str_detect(index, "s_norm_conj_exp|b_norm_conj_exp"),
                        "conjunctive_selection",
                        index))%>%
  mutate(index = ifelse(str_detect(index, "s_norm_disj_billy|b_norm_disj_billy"),
                        "disjunctive_agreement_billy",
                        index))%>% 
  mutate(index = ifelse(str_detect(index, "s_norm_disj_suzy|b_norm_disj_suzy"),
                        "disjunctive_agreement_suzy",
                        index))%>%
  mutate(index = ifelse(str_detect(index, "s_norm_disjunctive|b_norm_disjunctive"),
                        "disjunctive",
                        index))%>%
  mutate(index = ifelse(str_detect(index, "s_norm_disj_exp|b_norm_disj_exp"),
                        "disjunctive_selection",
                        index))%>%
  mutate(value = as.numeric(value),
         value = ifelse(test = (norm_condition == "2" & str_detect(index, "agreement")),
                        yes = 100 - value,
                        no = value),
         value = ifelse(test = (norm_condition == "2" & str_detect(index, "selection")),
                        yes = 3 - value,
                        no = value))%>% 
  pivot_wider(names_from = index,
              values_from = value) %>% 
  mutate(conjunctive_selection = factor(conjunctive_selection,
                            levels = 1:2,
                            labels = c("abnormal", "normal"))) %>%
  mutate(disjunctive_selection = factor(disjunctive_selection,
                            levels = 1:2,
                            labels = c("abnormal", "normal")))%>%
  mutate(judgment = ifelse(structure =="ConjLeft",
                           100 - judgment,
                           judgment)) %>% 
  rename(conjunctive_agent_abnormal = conjunctive_agreement_billy,
        conjunctive_agent_normal = conjunctive_agreement_suzy,
        disjunctive_agent_abnormal = disjunctive_agreement_billy,
        disjunctive_agent_normal = disjunctive_agreement_suzy) %>% 
  arrange(participant) %>%
  # define exclusion criteria
  mutate(exclude = ifelse(
    test = (conjunctive_agent_abnormal < conjunctive_agent_normal) &
         (disjunctive_agent_abnormal < disjunctive_agent_normal) &
         (conjunctive < 50) &
         (disjunctive > 50),
    yes = 0,
    no = 1))

# COMBINE DATA 
df.structure = df.stat_abnormal_judgments %>% 
  mutate(explanation = "abnormal") %>% 
  bind_rows(df.stat_normal_judgments %>% 
              mutate(explanation = "normal")) %>% 
  rename(conjunctive_abnormal = conjunctive_ball_abnormal,
         conjunctive_normal = conjunctive_ball_normal,
         disjunctive_abnormal = disjunctive_ball_abnormal,
         disjunctive_normal = disjunctive_ball_normal) %>% 
  mutate(norm = "statistical") %>% 
  bind_rows(df.pres_abnormal_judgments %>% 
              mutate(explanation = "abnormal") %>% 
              bind_rows(df.pres_normal_judgments %>% 
                          mutate(explanation = "normal")) %>% 
              rename(conjunctive_abnormal = conjunctive_agent_abnormal,
                     conjunctive_normal = conjunctive_agent_normal,
                     disjunctive_abnormal = disjunctive_agent_abnormal,
                     disjunctive_normal = disjunctive_agent_normal) %>% 
              mutate(norm = "prescriptive")) %>% 
  filter(exclude == 0) %>% # filter based on exclusion criteria
  select(-c(norm_condition, structure)) %>% 
  rename(conjunctive_cause = conjunctive, 
         disjunctive_cause = disjunctive) %>% 
  mutate(participant = 1:n(),
         across(c(contains("disjunctive"), contains("conjunctive")), ~ as.character(.))) %>% 
  pivot_longer(names_to = c("structure", "index"),
               names_sep = "_",
               cols = c(contains("disjunctive"), contains("conjunctive"))) %>% 
  pivot_wider(names_from = index,
              values_from = value) %>% 
  mutate(across(c(cause, abnormal, normal), ~ as.numeric(.))) %>% 
  mutate(selection = factor(selection, levels = c("abnormal", "normal")),
    structure = factor(structure, levels = c("conjunctive", "disjunctive")),
         norm = factor(norm, levels = c("statistical", "prescriptive")))

4.2 Demographics

df.structure.demo = df.stat_abnormal_data %>% 
  select(feedback,
         age_1_text,
         gender,
         race_1_text,
         ethnicity) %>% 
  mutate(norm = "statistical",
         normality = "abnormal",
         participant = 1:n()) %>% 
  left_join(df.stat_abnormal_judgments %>% 
              select(participant, exclude),
            by = "participant") %>% 
  bind_rows(df.stat_normal_data %>% 
              select(feedback,
                     age_1_text,
                     gender,
                     race_1_text,
                     ethnicity) %>% 
              mutate(norm = "statistical",
                     normality = "normal",
                     participant = 1:n()) %>% 
              left_join(df.stat_normal_judgments %>% 
                          select(participant, exclude),
                        by = "participant")) %>% 
  bind_rows(df.pres_abnormal_data %>% 
              select(feedback,
                     age_1_text,
                     gender,
                     race_1_text,
                     ethnicity) %>% 
              mutate(norm = "prescriptive",
                     normality = "abnormal",
                     participant = 1:n()) %>% 
              left_join(df.pres_abnormal_judgments %>% 
                          select(participant, exclude),
                        by = "participant")) %>% 
  bind_rows(df.pres_normal_data %>% 
              select(feedback,
                     age_1_text,
                     gender,
                     race_1_text,
                     ethnicity) %>% 
              mutate(norm = "prescriptive",
                     normality = "normal",
                     participant = 1:n()) %>% 
              left_join(df.pres_normal_judgments %>% 
                          select(participant, exclude),
                        by = "participant")) %>% 
  mutate(age = as.numeric(age_1_text),
         age = ifelse(age > 1000, 2019 - age, age),
         gender = ifelse(gender == 1, "Female", gender),
         gender = ifelse(gender == 2, "Male", gender),
         gender = ifelse(gender == 3, "Non-binary", gender),
         gender = ifelse(gender == 4, "Prefer not to say.", gender))

df.structure.demo %>% 
  summarize(n_excluded = sum(exclude == 1))
  n_excluded
1         70
df.structure.demo %>% 
  filter(exclude == 0) %>%
  count(norm, normality) %>% 
  print_table()
norm normality n
prescriptive abnormal 41
prescriptive normal 42
statistical abnormal 33
statistical normal 27
df.structure.demo %>% 
  # filter(exclude == 0) %>% 
  summarize(n_total = n(),
            n_female = sum(gender == "Female"),
            n_male = sum(gender == "Male"),
            n_not_say = sum(gender == "Prefer not to say."),
            n_non_binary = sum(gender == "Non-binary"),
            mean_age = mean(age, na.rm = T),
            sd_age = sd(age, na.rm = T)) %>% 
  mutate(across(contains("age"), round)) %>% 
  pivot_longer(cols = everything()) %>% 
  print_table()
name value
n_total 213
n_female 70
n_male 142
n_not_say 1
n_non_binary 0
mean_age 34
sd_age 10
# remove unnecessary variables
rm(list = ls()[!ls() %in% c("df.normality",
                            "df.normality.demo",
                            "df.structure",
                            "df.structure.demo",
                            "print_table",
                            "fit_normality_selection",
                            "fit_normality_judgment",
                            "p.exp1.selections")])

4.3 Plots

4.3.1 Causal selections

set.seed(1)
df.plot = df.structure %>% 
  mutate(selection = ifelse(selection == "abnormal", 1, 0),
         norm = factor(norm,
                       levels = c("statistical", "prescriptive"),
                       labels = c("statistical\nnorm", "prescriptive\nnorm")))

df.text = df.plot %>% 
  distinct(participant, norm) %>% 
  count(norm) %>% 
  mutate(label = str_c("n = ", n),
         selection = 1.05,
         structure = NA)

df.line = df.plot %>% 
  group_by(norm, structure) %>% 
  summarize(selection = mean(selection)) %>% 
  ungroup() %>% 
  mutate(x = c(0.75, 1.25, 1.75, 2.25))

p.exp2.selections = ggplot(data = df.plot,
                           mapping = aes(x = norm, 
                                         y = selection,
                                         group = structure,
                                         fill = structure)) +
  geom_hline(yintercept = 0.5, 
             linetype = 2) + 
  stat_summary(fun.data = "mean_cl_boot",
               geom = "pointrange",
               position = position_dodge(width = 0.5),
               size = 1.5,
               shape = 21,
               alpha = 0) +
  geom_text(data = df.text,
            mapping = aes(label = label),
            position = position_dodge(width = 0.5),
            size = 6) +
  labs(x = "type of norm",
       y = "% abnormal cause selections",
       title = "Experiment 2") +
  scale_fill_brewer(palette = "Set1") +
  scale_y_continuous(breaks = seq(0, 1, 0.25),
                     labels = str_c(seq(0, 100, 25), "%"),
                     expand = expansion(mult = c(0.05, 0))) +
  coord_cartesian(clip = "off",
                  ylim = c(0, 1)) + 
  theme(text = element_text(size = 24),
        legend.position = "bottom",
        axis.title.x = element_blank(),
        axis.title.y = element_blank(),
        plot.margin = margin(t = 1, l = 0.2, r = 0.2, unit = "cm"),
        plot.title = element_text(vjust = 4,
                                  color = "gray40",
                                  hjust = 0.5))

# hack to link the dodged points:
# https://stackoverflow.com/questions/51737201/how-to-draw-line-between-dodged-geometry-in-ggplot

p.exp2.selections = p.exp2.selections + geom_line(data = layer_data(p.exp2.selections, 2L),
                              mapping = aes(x = x,
                                            y = y,
                                            group = rep(1:2, 2),
                                            fill = NA),
                              size = 1,
                              linetype = 1) + 
  stat_summary(fun.data = "mean_cl_boot",
               geom = "pointrange",
               position = position_dodge(width = 0.5),
               size = 1.5,
               shape = 21)

p.exp2.selections

ggsave(filename = "../../figures/plots/structure_selections.pdf",
       plot = p.exp2.selections,
       width = 8,
       height = 6)

4.3.2 Main judgment data

set.seed(1)

df.plot = df.structure %>% 
  select(participant, norm, explanation, judgment) %>% 
  distinct() %>% 
  mutate(norm = factor(norm,
                       levels = c("statistical", "prescriptive"),
                       labels = c("statistical\nnorm", "prescriptive\nnorm")),
         judgment = judgment - 50)

df.text = df.plot %>% 
  count(explanation, norm) %>% 
  mutate(label = str_c("n = ", n),
         judgment = 55)

p.exp2.judgments = ggplot(data = df.plot,
       mapping = aes(x = norm, 
                     y = judgment,
                     group = explanation,
                     fill = explanation)) +
  geom_hline(yintercept = 0, 
             linetype = 2) + 
  geom_point(mapping = aes(color = explanation), 
             alpha = 0.2,
             size = 2,
             position = position_jitterdodge(jitter.width = 0.1,
                                             dodge.width = 0.5),
             show.legend = F) + 
  stat_summary(fun.data = "mean_cl_boot",
               geom = "pointrange",
               position = position_dodge(width = 0.5),
               size = 1.5,
               shape = 21) + 
  annotate(x = c(0.5, 0.5),
           y = c(-50, 50),
           geom = "text",
           label = c("preference for\ndisjunctive structure", "preference for\nconjunctive structure"),
           hjust = c(0, 1),
           angle = 90,
           size = 5) + 
  geom_text(data = df.text,
            mapping = aes(label = label),
            position = position_dodge(width = 0.5),
            size = 5) + 
  labs(x = "type of norm",
       y = "structure inference") + 
  scale_fill_manual(values = c("#984EA3", "#4DAF4A")) +
  scale_color_manual(values = c("#984EA3", "#4DAF4A")) +
  scale_y_continuous(breaks = seq(-50, 50, 25),
                     labels = seq(-50, 50, 25),
                     expand = expansion(mult = c(0.05, 0))) + 
  coord_cartesian(clip = "off",
                  ylim = c(-50, 50)) +
  theme(text = element_text(size = 24),
        legend.position = "bottom",
        axis.title.x = element_blank(),
        plot.margin = margin(t = 1, l = 0.2, r = 0.2, unit = "cm"))

ggsave("../../figures/plots/structure_judgments.pdf",
       width = 8,
       height = 6)

4.3.3 Individual differences

set.seed(1)

df.plot = df.structure %>% 
  select(participant, explanation, judgment, norm, structure, selection) %>% 
  pivot_wider(names_from = "structure",
              values_from = "selection") %>%
  mutate(disjunctive = str_c("D: ", disjunctive),
         conjunctive = str_c("C: ", conjunctive)) %>% 
  unite("selection", c(disjunctive, conjunctive), sep = "&", remove = F) %>% 
  mutate(judgment = judgment - 50,
         selection = str_replace(selection, "&", "\n"))

df.text = df.plot %>% 
  count(selection) %>% 
  mutate(judgment = 55,
         label = str_c("n = ", n),
         explanation = NA)

p.exp2.individual = ggplot(data = df.plot,
                           mapping = aes(x = selection, 
                                         y = judgment,
                                         group = explanation,
                                         fill = explanation)) +
  geom_hline(yintercept = 0, 
             linetype = 2) + 
  geom_point(mapping = aes(color = explanation),
             alpha = 0.2,
             size = 2,
             position = position_jitterdodge(jitter.width = 0.1,
                                             dodge.width = 0.5),
             show.legend = F) + 
  stat_summary(fun.data = "mean_cl_boot",
               geom = "pointrange",
               position = position_dodge(width = 0.5),
               size = 1.5,
               shape = 21) + 
  annotate(x = c(0.54, 0.54),
           y = c(-50, 50),
           geom = "text",
           label = c("preference for\ndisjunctive structure", "preference for\nconjunctive structure"),
           hjust = c(0, 1),
           angle = 90,
           size = 4.5) + 
  geom_text(data = df.text,
            mapping = aes(label = label),
            position = position_dodge(width = 0.5),
            size = 5) +
  labs(x = "participants' causal selections",
       y = "structure inference") + 
  scale_fill_manual(values = c("#984EA3", "#4DAF4A")) +
  scale_color_manual(values = c("#984EA3", "#4DAF4A")) +
  scale_y_continuous(breaks = seq(-50, 50, 25),
                     labels = seq(-50, 50, 25),
                     expand = expansion(mult = c(0.05, 0))) + 
  coord_cartesian(clip = "off",
                  ylim = c(-50, 50)) +
  theme(text = element_text(size = 24),
        legend.position = "bottom",
        axis.title.x = element_text(margin = margin(t = 0.5, unit = "cm")),
        plot.margin = margin(t = 1, l = 0.2, r = 0.2, unit = "cm"))

p.exp2.individual

ggsave(filename = "../../figures/plots/structure_judgments_based_on_selections.pdf",
       plot = p.exp2.individual,
       width = 8,
       height = 6)

4.4 Stats

4.4.1 Causal selections

df.structure %>% 
  count(structure, selection) %>% 
  group_by(structure) %>% 
  mutate(perc = n/sum(n)) %>% 
  filter(selection == "abnormal") %>% 
  select(-n) %>% 
  print_table()
structure selection perc
conjunctive abnormal 0.87
disjunctive abnormal 0.33

4.4.2 Means and standard deviations

df.structure %>% 
  group_by(explanation) %>% 
  summarize(mean = mean(judgment),
            sd = sd(judgment)) %>% 
  print_table()
explanation mean sd
abnormal 74.07 30.26
normal 27.25 31.96

4.4.3 Bayesian regression on selections

fit_structure_selection = brm(formula = selection ~ 1 + structure * norm + (1 | participant),
                              family = "bernoulli",
                              data = df.structure,
                              seed = 1,
                              cores = 2,
                              file = "cache/fit_structure_selection")
fit_structure_selection
 Family: bernoulli 
  Links: mu = logit 
Formula: selection ~ 1 + structure * norm + (1 | participant) 
   Data: df.structure (Number of observations: 286) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Group-Level Effects: 
~participant (Number of levels: 143) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     1.78      0.68     0.53     3.23 1.03      213      427

Population-Level Effects: 
                 Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept           -0.87      0.30    -1.54    -0.38 1.01     1035     1461
structure1          -1.92      0.41    -2.88    -1.26 1.02      321      959
norm1               -0.08      0.25    -0.54     0.41 1.00     2193     2156
structure1:norm1     0.46      0.23     0.05     0.96 1.00     1368     1721

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

4.4.4 Bayesian regression on judgment data

fit_structure_judgment = brm(formula = judgment ~ explanation * norm,
                             data = df.structure %>% 
                               select(participant, norm, explanation, judgment) %>% 
                               distinct(),
                             seed = 1,
                             cores = 2,
                             file = "cache/fit_structure_judgment")
fit_structure_judgment
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: judgment ~ explanation * norm 
   Data: df.structure %>% select(participant, norm, explana (Number of observations: 143) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Population-Level Effects: 
                   Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept             50.67      2.68    45.25    55.88 1.00     4250     2657
explanation1          23.48      2.61    18.29    28.74 1.00     4841     2687
norm1                  0.56      2.62    -4.59     5.72 1.00     4068     2953
explanation1:norm1     0.48      2.71    -4.88     5.73 1.00     4931     3106

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma    31.66      1.88    28.25    35.55 1.00     4201     3225

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

4.5 Tables

4.5.1 Causal selections

fit_structure_selection %>% 
  tidy(conf.method = "HPDinterval",
         fix.intercept = F) %>% 
    filter(effect == "fixed") %>% 
    mutate(term = tolower(term),
           term = str_remove_all(term, "1"),
           across(where(is.numeric), ~ round(., 2))) %>% 
    rename(`lower 95% CI` = conf.low,
           `upper 95% CI` = conf.high) %>% 
    select(-c(effect:group,std.error)) %>% 
  print_table()
term estimate lower 95% CI upper 95% CI
intercept -0.87 -1.48 -0.35
structure -1.92 -2.77 -1.20
norm -0.08 -0.54 0.42
structure:norm 0.46 0.02 0.91

4.5.2 Structure judgments

fit_structure_judgment %>% 
  gather_draws(b_Intercept, 
               b_explanation1,
               b_norm1,
               `b_explanation1:norm1`) %>% 
  mean_hdi() %>% 
  rename(term = .variable,
         estimate = .value,
         `lower 95% CI` = .lower,
          `upper 95% CI` = .upper) %>% 
  select(-c(.width, .point, .interval)) %>% 
  mutate(term = tolower(term),
         term = str_remove_all(term, "b_"),
         term = str_remove_all(term, "1"),
         term = factor(term, levels = c("intercept", "explanation", "norm", "explanation:norm"))) %>% 
  arrange(term) %>% 
  print_table()
term estimate lower 95% CI upper 95% CI
intercept 50.67 45.22 55.81
explanation 23.48 18.45 28.81
norm 0.56 -4.21 6.09
explanation:norm 0.48 -4.89 5.71

4.5.3 Selection patterns

df.structure %>% 
  select(participant, judgment, norm, structure, selection) %>% 
  pivot_wider(names_from = "structure",
              values_from = "selection") %>% 
  count(disjunctive, conjunctive) %>% 
  print_table()
disjunctive conjunctive n
abnormal abnormal 44
abnormal normal 3
normal abnormal 80
normal normal 16

4.5.4 Difference scores in structure inference based on causal selections

df.structure %>% 
  select(participant, explanation, judgment, norm, structure, selection) %>% 
  pivot_wider(names_from = "structure",
              values_from = "selection") %>%
  mutate(disjunctive = str_c("D: ", disjunctive),
         conjunctive = str_c("C: ", conjunctive)) %>% 
  unite("selection", c(disjunctive, conjunctive), sep = " & ", remove = F) %>% 
  group_by(explanation, selection) %>% 
  summarize(judgment = mean(judgment),
            n = n()) %>% 
  pivot_wider(names_from = explanation,
              values_from = c(judgment, n)) %>% 
  ungroup() %>% 
  mutate(difference = judgment_abnormal - judgment_normal,
         n = n_abnormal + n_normal) %>% 
  select(-c(n_abnormal, n_normal)) %>% 
  mutate(across(where(is.numeric), ~ round(., 2))) %>% 
  print_table()
selection judgment_abnormal judgment_normal difference n
D: abnormal & C: abnormal 65.68 34.64 31.04 44
D: abnormal & C: normal 52.50 0.00 52.50 3
D: normal & C: abnormal 80.25 21.22 59.03 80
D: normal & C: normal 66.33 35.71 30.62 16

5 Comparisons

# experiment 1: percentage of choosing the abnormal cause as a function of causal structure
df.normality %>% 
  group_by(structure) %>% 
  summarize(percentage = sum(selection == "abnormal")/n()) %>% 
  mutate(across(where(is.numeric), ~ round(., 2) * 100))
# A tibble: 2 × 2
  structure   percentage
  <fct>            <dbl>
1 conjunctive         89
2 disjunctive         32
# experiment 1: normality inference as a function of causal structure 
df.normality %>% 
  group_by(structure) %>% 
  summarize(judgment_mean = mean(judgment),
            judgment_sd = sd(judgment)) %>% 
  mutate(across(where(is.numeric), ~ round(., 2)))
# A tibble: 2 × 3
  structure   judgment_mean judgment_sd
  <fct>               <dbl>       <dbl>
1 conjunctive          84.6        28.1
2 disjunctive          41.4        35.0
# experiment 2: percentage of choosing the abnormal cause as a function of causal structure
df.structure %>% 
  group_by(structure) %>% 
  summarize(percentage = sum(selection == "abnormal")/n()) %>% 
  mutate(across(where(is.numeric), ~ round(., 2) * 100))
# A tibble: 2 × 2
  structure   percentage
  <fct>            <dbl>
1 conjunctive         87
2 disjunctive         33
# experiment 2: structure inference as a function of normality
df.structure %>% 
  group_by(explanation) %>% 
  summarize(judgment_mean = mean(judgment),
            judgment_sd = sd(judgment)) %>% 
  mutate(across(where(is.numeric), ~ round(., 2)))
# A tibble: 2 × 3
  explanation judgment_mean judgment_sd
  <chr>               <dbl>       <dbl>
1 abnormal             74.1        30.3
2 normal               27.2        32.0

6 Appendix

6.1 Frequentist analysis

6.1.1 Experiment 1

6.1.1.1 Causal selections

# model fit 
ffit_normality_selection = glm(formula = selection ~ structure * norm,
                               family = "binomial",
                               data = df.normality)

# analysis of deviance & effect size
aov_normality_selection = Anova(ffit_normality_selection,
                                type = 3) %>% 
  as_tibble(rownames = "Term") %>% 
  mutate(cramers_v = map(`LR Chisq`, ~ chisq_to_cramers_v(chisq = .,
                                                          n = nrow(df.normality),
                                                          nrow = 2, 
                                                          ncol = 2))) %>% 
  unnest(cramers_v)

aov_normality_selection %>% 
  print_table()
Term LR Chisq Df Pr(>Chisq) Cramers_v CI CI_low CI_high
structure 58.77 1 0.00 0.62 0.95 0.46 0.78
norm 0.09 1 0.76 0.02 0.95 0.00 0.17
structure:norm 1.57 1 0.21 0.10 0.95 0.00 0.26
6.1.1.1.1 Power analysis
table_normality_selection = table(df.normality$structure,
                                  df.normality$selection)

pwr.chisq.test(w = table_normality_selection %>% 
                 chisq.test() %>% 
                 .[1] %>% 
                 as.numeric(),
               N = sum(table_normality_selection),
               df = 1)

     Chi squared power calculation 

              w = 50.68802
              N = 154
             df = 1
      sig.level = 0.05
          power = 1

NOTE: N is the number of observations

6.1.1.2 Normality inference

# model fit 
ffit_normality_judgment = lm(formula = judgment ~ structure * norm,
                             data = df.normality)

# anova 
aov_normality_judgment = Anova(ffit_normality_judgment,
                               type = 3)

aov_normality_judgment %>%
  as_tibble(rownames = "Term") %>% 
  print_table()
Term Sum Sq Df F value Pr(>F)
(Intercept) 590388.61 1 578.94 0.00
structure 67911.03 1 66.59 0.00
norm 332.89 1 0.33 0.57
structure:norm 348.55 1 0.34 0.56
Residuals 152966.72 150 NA NA
# partial eta squared 
eta_squared(ffit_normality_judgment,
            ci = 0.95) %>% 
  print_table()
Parameter Eta2_partial CI CI_low CI_high
structure 0.32 0.95 0.2 0.43
norm 0.00 0.95 0.0 0.04
structure:norm 0.00 0.95 0.0 0.04
6.1.1.2.1 Power analysis
# post-hoc power analysis 
pwr.anova.test(k = 2,
               n = df.normality %>% 
                 count(structure) %>%
                 .$n %>% 
                 min(),
               f = aov_normality_judgment$`F value`[1])

     Balanced one-way analysis of variance power calculation 

              k = 2
              n = 76
              f = 578.9383
      sig.level = 0.05
          power = 1

NOTE: n is number in each group

6.1.2 Experiment 2

6.1.2.1 Causal selections

# model fit 
ffit_structure_selection = glmer(formula = selection ~ 1 + structure * norm + (1 | participant),
                                 family = "binomial",
                                 data = df.structure)

# analysis of deviance & effect size
aov_structure_selection = Anova(ffit_structure_selection,
                                type = 3) %>% 
  as_tibble(rownames = "Term") %>% 
  mutate(cramers_v = map(`Chisq`, ~ chisq_to_cramers_v(chisq = .,
                                                          n = nrow(df.structure),
                                                          nrow = 2, 
                                                          ncol = 2))) %>% 
  unnest(cramers_v)

aov_structure_selection %>% 
  print_table()
Term Chisq Df Pr(>Chisq) Cramers_v CI CI_low CI_high
(Intercept) 10.70 1 0.00 0.19 0.95 0.08 0.31
structure 26.44 1 0.00 0.30 0.95 0.19 0.42
norm 0.17 1 0.68 0.02 0.95 0.00 0.14
structure:norm 4.19 1 0.04 0.12 0.95 0.00 0.24
6.1.2.1.1 Power analysis
# post-hoc power analysis on the effect of structure on selections
table_structure_selection = table(df.structure$structure,
                                  df.structure$selection)

pwr.chisq.test(w = table_structure_selection %>% 
                 chisq.test() %>% 
                 .[1] %>% 
                 as.numeric(),
               N = sum(table_normality_selection),
               df = 1)

     Chi squared power calculation 

              w = 84.00386
              N = 154
             df = 1
      sig.level = 0.05
          power = 1

NOTE: N is the number of observations

6.1.2.2 Structure inference

# model fit 
ffit_structure_judgment = lm(formula = judgment ~ explanation * norm,
                             data = df.structure %>% 
                               select(participant,
                                      norm,
                                      explanation,
                                      judgment) %>% 
                               distinct())

# anova 
aov_structure_judgment = Anova(ffit_structure_judgment,
                               type = 3)

aov_structure_judgment %>%
  as_tibble(rownames = "Term") %>% 
  print_table()
Term Sum Sq Df F value Pr(>F)
(Intercept) 356193.93 1 360.94 0.00
explanation 76137.49 1 77.15 0.00
norm 34.48 1 0.03 0.85
explanation:norm 21.84 1 0.02 0.88
Residuals 137174.06 139 NA NA
# partial eta squared 
eta_squared(ffit_structure_judgment,
            ci = 0.95) %>% 
  print_table()
Parameter Eta2_partial CI CI_low CI_high
explanation 0.36 0.95 0.24 0.47
norm 0.00 0.95 0.00 0.03
explanation:norm 0.00 0.95 0.00 0.02
6.1.2.2.1 Power analysis
# post-hoc power analysis 
pwr.anova.test(k = 2,
               n = df.structure %>% 
                 select(participant, norm, explanation, judgment) %>% 
                 distinct() %>% 
                 count(explanation) %>%
                 .$n %>% 
                 min(),
               f = aov_structure_judgment$`F value`[1])

     Balanced one-way analysis of variance power calculation 

              k = 2
              n = 69
              f = 360.9353
      sig.level = 0.05
          power = 1

NOTE: n is number in each group

7 Combined plots

7.1 Selections

p.exp1.selections + p.exp2.selections + 
  plot_annotation(tag_levels = "A") & 
  theme(plot.tag = element_text(face = "bold"),
        plot.margin = margin(t = 0.1, r = 0.1, l = 0.1, b = 0, unit = "cm"))

ggsave(filename = "../../figures/plots/selections.pdf",
       width = 16,
       height = 6)

8 Session Info

sessionInfo()
R version 4.1.1 (2021-08-10)
Platform: x86_64-apple-darwin17.0 (64-bit)
Running under: macOS Big Sur 10.16

Matrix products: default
BLAS:   /Library/Frameworks/R.framework/Versions/4.1/Resources/lib/libRblas.0.dylib
LAPACK: /Library/Frameworks/R.framework/Versions/4.1/Resources/lib/libRlapack.dylib

locale:
[1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8

attached base packages:
[1] stats     graphics  grDevices utils     datasets  methods   base     

other attached packages:
 [1] forcats_0.5.1     stringr_1.4.0     dplyr_1.0.7       purrr_0.3.4      
 [5] readr_2.0.1       tidyr_1.1.3       tibble_3.1.3      ggplot2_3.3.5    
 [9] tidyverse_1.3.1   patchwork_1.1.1   pwr_1.3-0         effectsize_0.4.5 
[13] lme4_1.1-27.1     Matrix_1.3-4      car_3.0-11        carData_3.0-4    
[17] ggeffects_1.1.1   xtable_1.8-4      broom.mixed_0.2.7 tidybayes_3.0.0  
[21] brms_2.15.0       Rcpp_1.0.7        janitor_2.1.0     kableExtra_1.3.4 
[25] knitr_1.33       

loaded via a namespace (and not attached):
  [1] utf8_1.2.2           tidyselect_1.1.1     htmlwidgets_1.5.3   
  [4] grid_4.1.1           munsell_0.5.0        codetools_0.2-18    
  [7] DT_0.18              miniUI_0.1.1.1       withr_2.4.2         
 [10] Brobdingnag_1.2-6    colorspace_2.0-2     highr_0.9           
 [13] rstudioapi_0.13      stats4_4.1.1         bayesplot_1.8.1     
 [16] emmeans_1.6.2-1      rstan_2.21.2         farver_2.1.0        
 [19] datawizard_0.1.0     bridgesampling_1.1-2 coda_0.19-4         
 [22] vctrs_0.3.8          generics_0.1.0       xfun_0.25           
 [25] R6_2.5.0             markdown_1.1         HDInterval_0.2.2    
 [28] gamm4_0.2-6          projpred_2.0.2       assertthat_0.2.1    
 [31] promises_1.2.0.1     scales_1.1.1         nnet_7.3-16         
 [34] gtable_0.3.0         processx_3.5.2       rlang_0.4.11        
 [37] systemfonts_1.0.2    splines_4.1.1        broom_0.7.9         
 [40] checkmate_2.0.0      inline_0.3.19        yaml_2.2.1          
 [43] reshape2_1.4.4       abind_1.4-5          modelr_0.1.8        
 [46] threejs_0.3.3        crosstalk_1.1.1      backports_1.2.1     
 [49] httpuv_1.6.1         rsconnect_0.8.24     Hmisc_4.5-0         
 [52] tensorA_0.36.2       tools_4.1.1          bookdown_0.22       
 [55] ellipsis_0.3.2       RColorBrewer_1.1-2   jquerylib_0.1.4     
 [58] posterior_1.0.1      ggridges_0.5.3       plyr_1.8.6          
 [61] base64enc_0.1-3      ps_1.6.0             prettyunits_1.1.1   
 [64] rpart_4.1-15         zoo_1.8-9            cluster_2.1.2       
 [67] haven_2.4.3          fs_1.5.0             magrittr_2.0.1      
 [70] data.table_1.14.0    ggdist_3.0.0         openxlsx_4.2.4      
 [73] colourpicker_1.1.0   reprex_2.0.1         mvtnorm_1.1-2       
 [76] matrixStats_0.60.0   hms_1.1.0            shinyjs_2.0.0       
 [79] mime_0.11            evaluate_0.14        arrayhelpers_1.1-0  
 [82] shinystan_2.5.0      jpeg_0.1-9           rio_0.5.27          
 [85] readxl_1.3.1         gridExtra_2.3        rstantools_2.1.1    
 [88] compiler_4.1.1       V8_3.4.2             crayon_1.4.1        
 [91] minqa_1.2.4          StanHeaders_2.21.0-7 htmltools_0.5.1.1   
 [94] mgcv_1.8-36          later_1.2.0          tzdb_0.1.2          
 [97] Formula_1.2-4        RcppParallel_5.1.4   lubridate_1.7.10    
[100] DBI_1.1.1            sjlabelled_1.1.8     dbplyr_2.1.1        
[103] MASS_7.3-54          boot_1.3-28          cli_3.0.1           
[106] parallel_4.1.1       insight_0.14.2       igraph_1.2.6        
[109] pkgconfig_2.0.3      foreign_0.8-81       xml2_1.3.2          
[112] svUnit_1.0.6         dygraphs_1.1.1.6     svglite_2.0.0       
[115] bslib_0.2.5.1        webshot_0.5.2        estimability_1.3    
[118] rvest_1.0.1          snakecase_0.11.0     distributional_0.2.2
[121] callr_3.7.0          digest_0.6.27        parameters_0.14.0   
[124] rmarkdown_2.10       cellranger_1.1.0     htmlTable_2.2.1     
[127] curl_4.3.2           shiny_1.6.0          gtools_3.9.2        
[130] nloptr_1.2.2.2       lifecycle_1.0.0      nlme_3.1-152        
[133] jsonlite_1.7.2       viridisLite_0.4.0    fansi_0.5.0         
[136] pillar_1.6.2         lattice_0.20-44      loo_2.4.1           
[139] fastmap_1.1.0        httr_1.4.2           pkgbuild_1.2.0      
[142] survival_3.2-11      glue_1.4.2           xts_0.12.1          
[145] bayestestR_0.10.5    zip_2.2.0            png_0.1-7           
[148] shinythemes_1.2.0    stringi_1.7.3        sass_0.4.0          
[151] latticeExtra_0.6-29