1 Load packages

library("knitr")       # for knitting
library("janitor")     # for cleaning variable names
library("corrr")       # for correlations
library("ggtext")      # for text in ggplots
library("kableExtra")  # for knitr tables
library("lubridate")   # for dealing with dates
library("brms")        # for bayesian regression
library("RSQLite")     # for reading in db files
library("tidyjson")    # for parsing json files
library("DT")          # tables in Rmarkdown
library("grid")        # functions for dealing with images
library("xtable")      # for latex tables
library("png")         # adding pngs to images
library("egg")         # for geom_custom
library("patchwork")   # for making figure panels
library("modelr")      # for bootstrapping
library("rsample")     # for bootstrapping
library("lme4")        # for linear mixed effects models
library("broom.mixed") # for cleaning up mixed effects models
library("Hmisc")       # for bootstrapping
library("tidybayes")   # for Bayesian regression modeling
library("emmeans")     # for marginal effects
library("ggeffects")   # for marginal effects
library("scales")      # for rescaling
library("emojifont")   # for awesome font 
library("boot")        # for inv.logit
library("tidyverse")   # for everything else

2 Helper functions

theme_set(theme_classic() +
              theme(text = element_text(size = 24)))

opts_chunk$set(comment = "",
               fig.show = "hold")

options(dplyr.summarise.inform = F)

# root mean squared error
fun.rmse = function(x, y){
    return(sqrt(mean((x - y)^2)))
}

# function for printing out html or latex tables
fun.print_table = function(data, format = "html", digits = 2){
    if(format == "html"){
        data %>%
            kable(digits = digits) %>%
            kable_styling()
    }else if(format == "latex"){
        data %>%
            xtable(digits = digits) %>%
            print(include.rownames = F,
                  booktabs = T)
    }
}

# softmax function
fun.softmax = function(x, beta = 1){
    return(exp(beta * x)/sum(exp(beta * x)))
}

3 EXP 1: Scenarios

3.1 DATA

3.1.1 Read in data

df.exp1.long = bind_rows(
    read_csv("../../data/experiment1/experiment1-trials.csv"), 
    read_csv("../../data/experiment1/experiment1-trials2.csv")) %>%
    filter(!is.na(trial_num)) %>%
    select(-c(proliferate.condition, stimulus, error)) %>%
    mutate(across(.cols = c("helper1", "helper2", "requester"),
                  .fns = ~ ifelse(str_detect(., "female"), "female", "male"))) %>%
    # effort was reverse coded, the high blame was query 1 instead of zero
    mutate(query = ifelse(trial_type == 'effort', 1 - query, query)) %>%
    mutate(predicted = ifelse(query == response, 1, 0))

# number of excluded participants
df.exp1.long %>%
    filter(trial_num == 'attention_check', response == 1) %>%
    nrow()
[1] 0
# update scenario index
df.scenario_index = tibble(trial_num = 1:15,
                           trial = c(1:3, 7:9, 13:15, 10:12, 4:6),
                           question = rep(c("ability", "knowledge", "social_role",
                                    "intention", "effort"), each = 3),
                           scenario = c("tire", "couch", "shelf",
                                        "airpot", "picnic", "cooking",
                                        "birthday", "mover", "acting",
                                        "soccer", "lunch", "invite",
                                        "tug_of_war", "presentation", "call"))

# exclude attention check from data
df.exp1.long = df.exp1.long %>%
    filter(trial_num != "attention_check") %>%
    mutate(trial_num = as.numeric(trial_num),
           trial_index = trial_index - 1) %>%
    left_join(df.scenario_index,
              by = "trial_num") %>%
    select(participant = workerid,
           trial,
           question,
           scenario,
           order = trial_index,
           predicted,
           response) %>%
    arrange(participant, trial)

3.1.2 Demographics

df.exp1.demographics = bind_rows(
  read_csv("../../data/experiment1/experiment1-participants.csv"),
  read_csv("../../data/experiment1/experiment1-participants2.csv"))

df.exp1.demographics %>%
    count(gender) %>%
    fun.print_table()
gender n
Female 20
Male 21
Non binary, female 1
Woman 1
cisgender woman 1
female 2
m 1
male 2
malel 1
df.exp1.demographics %>%
    count(race) %>%
    fun.print_table()
race n
African-American 1
Arab 1
Asian 2
Caucasian 5
Filipino 1
Indian 1
Italic 1
Latino 1
Mexican 2
Mixed 1
South Asian 1
Welsh 1
White 20
White - Caucasian 1
White British 1
White caucasic 1
White/Asian 1
human 1
white 6
NA 1
df.exp1.demographics %>%
    summarise(age_mean = mean(age),
              age_sd = sd(age),
              time_mean    = mean(time/(1000*60)), # converting from ms to minutes
              time_sd  = sd(time/(1000*60))) %>%
    fun.print_table()
age_mean age_sd time_mean time_sd
26.62 6.74 7.08 2.91

3.2 STATS

3.2.1 Overall model fit

fit.exp1.overall = brm(formula = predicted ~ 1 + (1 | participant) + 
                           (1 | question / scenario),
               data = df.exp1.long,
               family = "bernoulli",
               seed = 1,
               cores = 4,
               file = "cache/fit.exp1.overall",
               control = list(adapt_delta = 0.95))

fit.exp1.overall
 Family: bernoulli 
  Links: mu = logit 
Formula: predicted ~ 1 + (1 | participant) + (1 | question/scenario) 
   Data: df.exp1.long (Number of observations: 750) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Multilevel Hyperparameters:
~participant (Number of levels: 50) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     1.06      0.22     0.69     1.54 1.00     1721     2288

~question (Number of levels: 5) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     0.40      0.35     0.02     1.30 1.00     1366     1844

~question:scenario (Number of levels: 15) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     0.55      0.23     0.16     1.04 1.00     1636     2109

Regression Coefficients:
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept     2.40      0.36     1.74     3.15 1.00     2277     1969

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
fit.exp1.overall %>%
    tidy() %>%
    select(-c(component)) %>%
    fun.print_table()
effect group term estimate std.error conf.low conf.high
fixed NA (Intercept) 2.40 0.36 1.74 3.15
ran_pars participant sd__(Intercept) 1.06 0.22 0.69 1.54
ran_pars question sd__(Intercept) 0.40 0.35 0.02 1.30
ran_pars question:scenario sd__(Intercept) 0.55 0.23 0.16 1.04

3.2.2 Effect of scenario

fit.exp1.scenario = brm(formula = predicted ~ 1 + scenario + (1 | participant),
                        data = df.exp1.long,
                        family = "bernoulli",
                        seed = 1,
                        cores = 4,
                        file = "cache/fit.exp1.scenario")

fit.exp1.scenario %>%
    emmeans(~ scenario,
            type = "response") %>%
    summary(point.est = "mean") %>%
    left_join(df.scenario_index %>%
                  select(trial, question, scenario),
              by = "scenario") %>%
    arrange(trial) %>%
    relocate(trial, question) %>%
    fun.print_table()
trial question scenario response lower.HPD upper.HPD
1 ability tire 0.93 0.86 0.99
2 ability couch 0.93 0.86 0.99
3 ability shelf 0.94 0.88 0.99
4 effort tug_of_war 0.95 0.89 0.99
5 effort presentation 0.97 0.94 1.00
6 effort call 0.88 0.78 0.96
7 knowledge airpot 0.91 0.83 0.98
8 knowledge picnic 0.91 0.84 0.98
9 knowledge cooking 0.96 0.91 1.00
10 intention soccer 0.91 0.83 0.98
11 intention lunch 0.96 0.91 1.00
12 intention invite 0.78 0.65 0.90
13 social_role birthday 0.94 0.89 0.99
14 social_role mover 0.80 0.67 0.92
15 social_role acting 0.82 0.70 0.93

3.2.3 Post-hoc power analysis

set.seed(1)

df.params = fit.exp1.overall %>% 
    tidy() %>% 
    select(effect, parameter = group, estimate) %>% 
    replace_na(list(parameter = "intercept")) %>% 
    select(-effect) %>% 
    mutate(parameter = str_replace(parameter, ":", "_")) %>% 
    pivot_wider(names_from = parameter,
                values_from = estimate)

nsample = c(5:10, seq(15, 50, 5))
nsimulations = 50

df.exp1.power = df.exp1.long %>% 
    distinct(question, scenario) %>% 
    nest() %>% 
    expand_grid(nsample, nsimulations = 1:nsimulations) %>% 
    mutate(data = map2(.x = data,
                       .y = nsample,
                       .f = ~.x %>% 
                           expand_grid(participant = 1:.y) %>% 
                           mutate(intercept = df.params$intercept) %>% 
                           group_by(participant) %>% 
                           mutate(participant_par = rnorm(1,
                                                          sd = df.params$participant)) %>%
                           group_by(question) %>% 
                           mutate(question_par = rnorm(1,
                                                       sd = df.params$question)) %>% 
                           group_by(scenario) %>% 
                           mutate(scenario_par = rnorm(1, 
                                                       sd = df.params$question_scenario)) %>% 
                           ungroup() %>% 
                           mutate(predicted = intercept + participant_par + question_par + scenario_par,
                                  probability = inv.logit(predicted),
                                  response = rbinom(n(), 1, probability))),
           fit = map(.x = data, 
                     .f = ~ glmer(formula = response ~ 1 + (1 | participant) + 
                                     (1 | question / scenario),
                                 data = .x,
                                 family = "binomial")),
           p.value = map_dbl(.x = fit,
                             .f = ~ .x %>% 
                                 tidy() %>% 
                                 filter(effect == "fixed") %>% 
                                 pull(p.value))) %>% 
    select(nsample, nsimulations, p.value)

save(df.exp1.power,
     file = "cache/df.exp1.power.RData")

3.3 PLOTS

3.3.1 Bar plot

set.seed(1)
questions = c("ability", "effort", "knowledge", "intention", "social role")

df.plot = df.exp1.long %>%
    mutate(question = str_replace(question, "social_role", "social role"),
           question = factor(question, levels = questions))

ggplot(data = df.plot,
       mapping = aes(x = trial,
                     y = predicted,
                     fill = question)) +
    stat_summary(fun.data = "mean_cl_boot",
                 geom = "bar",
                 color = "black") +
    stat_summary(fun.data = "mean_cl_boot",
                 geom = "linerange",
                 linewidth = 2) +
    geom_hline(yintercept = 0.5,
               linetype = 2) +
    annotate("text",
             x = 1:15,
             y = 0.05,
             label = 1:15,
             size = 8) +
    labs(y = "% predicted selection") +
    scale_fill_brewer(palette = "Set1") +
    scale_x_continuous(breaks = c(2, 5, 8, 11, 14),
                       labels = questions,
                       expand = expansion(mult = c(0.01, 0.01))) +
    scale_y_continuous(breaks = seq(0, 1, 0.25),
                       labels = str_c(seq(0, 100, 25), "%"),
                       expand = expansion(mult = c(0, 0.02))) +
    theme(legend.position = "none",
          axis.title.x = element_blank(),
          text = element_text(size = 36),
          axis.text.x = element_text(vjust = 0),
          plot.margin = margin(b = 0.3, t = 0.2, l = 0.1, r = 0.1, unit = "cm"))

ggsave(filename = "../../figures/paper/figure2.pdf",
       width = 16,
       height = 6)

3.3.2 Histogram of individual participant responses

df.plot = df.exp1.long %>%
    group_by(participant) %>%
    summarize(response = sum(predicted))

ggplot(data = df.plot,
       mapping = aes(x = response)) +
    geom_histogram(binwidth = 1,
                   boundary = -0.5,
                   color = "black",
                   fill = "lightblue") +
    scale_x_continuous(breaks = 0:20) +
    scale_y_continuous(breaks = seq(0, 20, 2),
                       expand = expansion(mult = c(0, 0.1))) +
    theme(panel.grid.major.y = element_line())

3.3.3 Power analysis

load(file = "cache/df.exp1.power.RData")

df.plot = df.exp1.power %>% 
    mutate(significant = p.value < 0.05) %>% 
    group_by(nsample) %>%
    summarize(power = sum(significant)/n())


ggplot(data = df.plot,
       mapping = aes(x = nsample,
                     y = power)) + 
    geom_line() +
    geom_point() + 
    scale_x_continuous(breaks = seq(5, 50, 5)) + 
    labs(x = "sample size") +
    theme(panel.grid.major.y = element_line())

4 EXP 2: Coffee

4.1 EXP 2a: Generative

4.1.1 DATA

4.1.1.1 Read in data

# data 
df.exp2a = read_csv("../../data/experiment2a/experiment2a-trials.csv",
                    show_col_types = FALSE) %>% 
    rename(trial_img = map) %>% 
    mutate(trial_img = str_remove(trial_img, "images/gen_models/"))

# cost of moving on a red tile 
red_cost = 3

# model information 
df.exp2.models = read_csv(
    "../../data/experiment2a/experiment2a-trialinfo.csv",
    show_col_types = FALSE) %>% 
    mutate(action_cost = blue_yes + (red_cost * red_yes),
           alternative_cost = blue_no + (red_cost * red_no),
           across(.cols = c(action_cost, alternative_cost),
                  .fns = ~ ifelse(home_tile == "blue", . - 1, . - 3)),
           additional_action_cost = action_cost - alternative_cost)

# combining data and model 
df.exp2a.long = df.exp2a %>% 
    left_join(df.exp2.models,
              by = "trial_img") %>% 
    mutate(workerid = as.factor(workerid)) %>% 
    rename(participant = workerid)

# only keep necessary information
df.exp2a.long = df.exp2a.long %>%
    select(participant, trial, 
           action_cost, additional_action_cost, praise) %>% 
    arrange(participant, trial) %>% 
    # relabel trials to go from 1 to 48
    group_by(participant) %>% 
    mutate(trial = 1:48) %>% 
    ungroup()

# getting mean rating per trial
df.exp2a.means = df.exp2a.long %>%
    group_by(trial, action_cost, additional_action_cost) %>%
    summarise(praise = mean(praise)) %>%
    ungroup()

df.exp2.model_index = tibble(name = c("baseline",
                                      "action_cost",
                                      "additional_action_cost",
                                      "full",
                                      "empirical"),
                             color = c(NA,
                                       "#e41a1c",
                                       "#377eb8",
                                       "#984EA3",
                                       "gray80")) %>% 
    pivot_wider(names_from = name,
                values_from = color)

4.1.1.2 Demographics

df.exp2a.demographics = read_csv("../../data/experiment2a/experiment2a-participants.csv")

df.exp2a.demographics %>%
    count(race) %>%
    fun.print_table()
race n
ASIAN 1
African 1
Arab 1
Black 4
Caucasian 7
Chinese 1
KAUKASIAN 1
Latino 2
Mixed 1
North African 1
White 20
White British 1
White british 1
brunette 1
caucasian 1
mestizo 1
white 3
NA 2
df.exp2a.demographics %>%
    summarise(age_mean = mean(age),
              age_sd = sd(age),
              time_mean = mean(time/(1000*60)), # converting from ms to minutes
              time_sd = sd(time/(1000*60)),
              n = n()) %>%
    fun.print_table(digits = 0)
age_mean age_sd time_mean time_sd n
25 6 9 5 50

4.1.2 STATS

4.1.2.1 Correlation between action cost and additional action cost

df.exp2.models %>% 
    select(action_cost, additional_action_cost) %>%
    cor() %>% 
    .[2]
[1] 0.598191

4.1.2.2 Model fits

4.1.2.2.1 Overall
# fit models 
fit.exp2a.baseline = brm(formula = praise ~ 1 + (1 | participant),
                         data = df.exp2a.long,
                         seed = 1,
                         cores = 4,
                         iter = 4000,
                         file = "cache/fit.exp2a.baseline")

fit.exp2a.action_cost = brm(formula = praise ~ 1 + action_cost + 
                                (1 + action_cost | participant),
                            data = df.exp2a.long,
                            seed = 1,
                            cores = 4,
                            iter = 4000,
                            file = "cache/fit.exp2a.action_cost")

fit.exp2a.additional_action_cost = brm(formula = praise ~ 1 + additional_action_cost + 
                                           (1 + additional_action_cost | participant),
                                       data = df.exp2a.long,
                                       seed = 1,
                                       cores = 4,
                                       iter = 4000,
                                       file = "cache/fit.exp2a.additional_action_cost")

fit.exp2a.full = brm(formula = praise ~ 1 + additional_action_cost + action_cost + 
                         (1 + additional_action_cost + action_cost | participant),
                     data = df.exp2a.long,
                     seed = 1,
                     cores = 4,
                     iter = 4000,
                     file = "cache/fit.exp2a.full")


# add crossvalidation  
fit.exp2a.baseline = add_criterion(fit.exp2a.baseline,
                                   criterion = "loo",
                                   reloo = T,
                                   file = "cache/fit.exp2a.baseline")

fit.exp2a.action_cost = add_criterion(fit.exp2a.action_cost,
                                      criterion = "loo",
                                      reloo = T,
                                      file = "cache/fit.exp2a.action_cost")

fit.exp2a.additional_action_cost = add_criterion(
    fit.exp2a.additional_action_cost,
    criterion = "loo",
    reloo = T,
    file = "cache/fit.exp2a.additional_action_cost")

fit.exp2a.full = add_criterion(fit.exp2a.full,
                               criterion = "loo",
                               reloo = T,
                               file = "cache/fit.exp2a.full")

# getting the difference between models using LOOCV
df.exp2a.elpd = loo_compare(fit.exp2a.baseline,
                            fit.exp2a.action_cost,
                            fit.exp2a.additional_action_cost,
                            fit.exp2a.full) %>%
    as_tibble(rownames = "model") %>% 
    mutate(model = factor(model,
                          levels = str_c("fit.exp2a.",
                                            colnames(df.exp2.model_index)),
                          labels = colnames(df.exp2.model_index))) %>% 
    arrange(model)
4.1.2.2.2 Individual
  • This first block needs to be run once in order to locally save the individual participant model fits

  • Afterwards, it’s sufficient to run the code block below.

load(file = "cache/df.exp2a.individual_fits.RData")

4.1.3 PLOTS

4.1.3.1 Trial selection

set.seed(1)

# trial selection: 21, 19, 9, 13 
trial_selection = c(21, 19, 9, 13)

# alternative trial selection
trial_selection = c(21, 13, 9, 16)

fun.load_image = function(trial){
  readPNG(str_c("../../figures/stimuli/experiment2a/images/experiment2a_trial_",
                trial, ".png"))
}


# linking images and clips
df.images = df.exp2a.long %>%
    distinct(trial) %>%
    filter(trial %in% trial_selection) %>% 
    mutate(grob = map(.x = trial,
                      .f = ~ fun.load_image(trial = .x)))

df.plot = df.exp2a.long %>% 
    filter(trial %in% trial_selection)

df.model = df.exp2a.means %>% 
    filter(trial %in% trial_selection) %>% 
    left_join(fit.exp2a.action_cost %>% 
                  fitted(newdata = df.exp2a.means,
                         re_formula = NA) %>% 
                  as_tibble() %>% 
                  mutate(trial = 1:n(),
                         action_cost_prediction = Estimate) %>% 
                  select(trial, action_cost_prediction),
              by = "trial") %>% 
    left_join(fit.exp2a.additional_action_cost %>% 
                  fitted(newdata = df.exp2a.means,
                         re_formula = NA) %>% 
                  as_tibble() %>% 
                  mutate(trial = 1:n(),
                         additional_action_cost_prediction = Estimate) %>% 
                  select(trial, additional_action_cost_prediction),
              by = "trial") %>% 
    left_join(fit.exp2a.full %>% 
                  fitted(newdata = df.exp2a.means,
                         re_formula = NA) %>% 
                  as_tibble() %>% 
                  mutate(trial = 1:n(),
                         full_prediction = Estimate) %>% 
                  select(trial, full_prediction),
              by = "trial") %>% 
    pivot_longer(cols = c(praise,
                          action_cost_prediction,
                          additional_action_cost_prediction,
                          full_prediction),
                 names_to = "model",
                 values_to = "prediction") %>% 
    select(trial, model, prediction)

df.text = df.images %>% 
    select(trial) %>% 
    mutate(x = 1.5,
           y = 50,
           # label = c("3", "4", "2", "1"))
           label = c("3", "2", "4", "1"))

ggplot(data = df.plot,
       mapping = aes(x = 1,
                     y = praise)) + 
    stat_summary(fun = "mean",
                 geom = "bar",
                 color = "black",
                 fill = df.exp2.model_index$empirical) + 
    stat_summary(fun.data = "mean_cl_boot",
                 geom = "linerange",
                 linewidth = 1.5) + 
    geom_point(alpha = 0.1,
               position = position_jitter(width = 0.3, height = 0)) + 
    geom_point(data = df.model %>% 
                   filter(str_detect(model, "prediction")),
               mapping = aes(y = prediction, 
                             fill = model,
                             group = model),
               shape = 21,
               size = 3,
               position = position_dodge(width = 0.75)) + 
    geom_custom(data = df.images,
                mapping = aes(data = grob,
                              x = -Inf,
                              y = Inf),
                grob_fun = function(x) rasterGrob(x,
                                                  vjust = -0.25,
                                                  hjust = 1,
                                                  width = unit(6.8, "cm"))) + 
    geom_text(data = df.text,
              mapping = aes(x = x,
                            y = y,
                            label = label),
              size = 10,
              vjust = -10.2,
              hjust = 0.5) + 
    coord_flip(clip = "off") +
    facet_wrap(~ factor(trial, levels = trial_selection),
               ncol = 4) + 
    scale_y_continuous(breaks = seq(0, 100, 25),
                       labels = seq(0, 100, 25),
                       limits = c(0, 100)) +
    scale_fill_manual(labels = c("action cost model",
                                 "additional action cost model",
                                 "full model"),
                        values = c(df.exp2.model_index$action_cost,
                                   df.exp2.model_index$additional_action_cost,
                                   df.exp2.model_index$full)) +
    labs(y = "praise judgments (0 = none, 100 = a lot)") +
    theme(legend.position = "bottom",
          legend.title = element_blank(),
          axis.text.x = element_text(size = 16),
          axis.text.y = element_blank(),
          axis.ticks.y = element_blank(),
          axis.line.y = element_blank(),
          axis.title.y = element_blank(),
          axis.title.x = element_text(vjust = -0.2),
          strip.background = element_blank(),
          strip.text = element_blank(),
          panel.spacing.x = unit(0.5, "cm"),
          plot.margin = margin(t = 8, l = 0.1, r = 0.3, b = 0.1, unit = "cm")) + 
    guides(fill = guide_legend(override.aes = list(size = 5)))

ggsave("../../figures/paper/figure4.pdf",
       width = 12,
       height = 5)

4.1.3.2 Scatterplot

set.seed(1)

# model colors
model_colors = c("full" = df.exp2.model_index$full,
                 "action_cost" = df.exp2.model_index$action_cost,
                 "additional_action_cost" = df.exp2.model_index$additional_action_cost)

# function for computing model predictions
fun.model_predictions = function(fit, data, name){
  fit %>%
    fitted(newdata = data,
           re_formula = NA) %>%
    as_tibble() %>%
    clean_names() %>%
    rename_with(.fn  = ~ str_c(., ".", name))
}

# compute means and bootstrapped confidence intervals
df.plot = df.exp2a.long %>%
  select(participant,
         trial,
         praise,
         action_cost,
         additional_action_cost) %>%
  reframe(praise = smean.cl.boot(praise),
          index = c("mean", "low", "high"), 
          .by = c(trial, action_cost, additional_action_cost)) %>% 
  pivot_wider(names_from = index,
              values_from = praise)

# add model predictions
df.plot = df.plot %>%
  bind_cols(fun.model_predictions(fit = fit.exp2a.full,
                                  data = df.plot,
                                  name = "full"),
            fun.model_predictions(fit = fit.exp2a.action_cost,
                                  data = df.plot,
                                  name = "action_cost"),
            fun.model_predictions(fit = fit.exp2a.additional_action_cost,
                                  data = df.plot,
                                  name = "additional_action_cost"))

# function to make plots
fun.generative_model_plot = function(data, model, colors, xlabel, ylabel){
  data = data %>%
    rename(estimate = str_c("estimate.", model),
           q2_5 = str_c("q2_5.", model),
           q97_5 = str_c("q97_5.", model))

  # make plots
  plot = ggplot(data = data,
         mapping = aes(x = estimate,
                       y = mean)) +
    geom_abline(intercept = 0,
                slope = 1,
                linetype = 2) +
    geom_smooth(mapping = aes(y = estimate,
                              ymin = q2_5,
                              ymax = q97_5),
                stat = "identity",
                color = model_colors[model],
                alpha = 0.1,
                fill = model_colors[model]) +
    geom_linerange(mapping = aes(ymin = low,
                                 ymax = high),
                   linewidth = 1,
                   color = "gray80") +
    geom_point(size = 2.5,
               shape = 21,
               color = "black",
               fill = model_colors[model]) +
    annotate(geom = "text",
             label = data %>%
               summarize(r = cor(.data[["estimate"]], .data[["mean"]]),
                         rmse = fun.rmse(.data[["estimate"]], .data[["mean"]])) %>%
               mutate(across(.cols = everything(), 
                             .fns = ~ round(., 2)),
                      r = as.character(r),
                      r = substring(r, 2)) %>%
               unlist() %>%
               str_c(names(.), " = ", .),
             x = c(0, 0),
             y = c(100, 90),
             size = 7,
             hjust = 0) +
    labs(x = xlabel,
         y = "praise judgments") +
    scale_x_continuous(breaks = seq(0, 100, 25)) +
    scale_y_continuous(breaks = seq(0, 100, 25)) +
    coord_cartesian(xlim = c(0, 100),
                    ylim = c(0, 100)) +
    theme(text = element_text(size = 23),
          axis.title.x = element_markdown(color = model_colors[model]))
  
  if(ylabel == F){
      plot = plot + 
          theme(axis.title.y = element_blank())
  }
  return(plot)
}

# put plots together with patchwork
p1 = fun.generative_model_plot(data = df.plot,
                               model = "action_cost",
                               xlabel = "action cost model",
                               ylabel = T)

p2 = fun.generative_model_plot(data = df.plot,
                               model = "additional_action_cost",
                               xlabel = "additional action cost model",
                               ylabel = F)

p3 = fun.generative_model_plot(data = df.plot,
                               model = "full",
                               xlabel = "full model",
                               ylabel = F)

p1 + p2 + p3 +
  plot_annotation(tag_levels = "A") &
  theme(plot.tag = element_text(face = "bold"))


ggsave("../../figures/paper/figure5.pdf",
       width = 15,
       height = 5)

4.1.4 TABLES

4.1.4.1 Model fits

# printing out the correlation btw model predictions and judgments
fun.r_rmse = function(fit, dv){
  df = fit %>%
    fitted(newdata = df.exp2a.means,
           re_formula = NA) %>%
    as_tibble() %>%
    clean_names()  %>%
    bind_cols(df.exp2a.means)

  if(sd(df$estimate) > 0){
    df = df %>%
      summarise(r = cor(.data[[dv]], estimate),
                rmse = fun.rmse(.data[[dv]], estimate))
  }else{
    df = df %>%
      summarise(rmse = fun.rmse(.data[[dv]], estimate))
  }
  return(df)
}

df.table1 = df.exp2a.elpd %>% 
    select(model,
           elpd = elpd_diff,
           elpd_se = se_diff) %>% 
    mutate(r = c(0,
                 fun.r_rmse(fit = fit.exp2a.action_cost,
                            dv = "praise")$r,
                 fun.r_rmse(fit = fit.exp2a.additional_action_cost,
                            dv = "praise")$r,
                 fun.r_rmse(fit = fit.exp2a.full,
                      dv = "praise")$r),
           rmse = c(fun.r_rmse(fit = fit.exp2a.baseline,
                               dv = "praise")$rmse,
                    fun.r_rmse(fit = fit.exp2a.action_cost,
                               dv = "praise")$rmse,
                    fun.r_rmse(fit = fit.exp2a.additional_action_cost,
                               dv = "praise")$rmse,
                    fun.r_rmse(fit = fit.exp2a.full,
                               dv = "praise")$rmse)) %>%
    mutate(across(where(is.numeric), ~ round(., 2))) %>%
    mutate(elpd = str_c(elpd, " (", elpd_se,")")) %>%
    select(-elpd_se) %>%
    left_join(df.exp2a.individual_fits %>%
                  count(best_model) %>% 
                  rename(`# best fit` = n,
                         model = best_model),
              by = "model") %>% 
    select(model, r, rmse, elpd, `# best fit`)

df.table2 =
    fit.exp2a.baseline %>%
    tidy(effects = "fixed",
         fix.intercept = F,
         conf.method = "HPDinterval") %>%
    mutate(model = "baseline") %>%
    bind_rows(fit.exp2a.action_cost %>%
                  tidy(effects = "fixed",
                       fix.intercept = F,
                       conf.method = "HPDinterval") %>%
                  mutate(model = "action_cost")) %>%
    bind_rows(fit.exp2a.additional_action_cost %>%
                  tidy(effects = "fixed",
                       fix.intercept = F,
                       conf.method = "HPDinterval") %>%
                  mutate(model = "additional_action_cost")) %>%
    bind_rows(fit.exp2a.full %>%
                  tidy(effects = "fixed",
                       fix.intercept = F,
                       conf.method = "HPDinterval") %>%
                  mutate(model = "full")) %>%
    mutate(across(where(is.numeric), ~ round(., 2)),
           label = str_c(estimate, " [", conf.low, ", ", conf.high, "]")) %>%
    select(model, term, label) %>%
    pivot_wider(names_from = term,
                values_from = label) %>%
    rename(intercept = Intercept) %>% 
    left_join(df.table1,
              by = "model") %>% 
    mutate(model = str_replace_all(model, pattern = "_", " "))

df.table2 %>% 
    # fun.print_table("latex")
    fun.print_table()
model intercept action_cost additional_action_cost r rmse elpd # best fit
baseline 47.54 [43.64, 50.97] NA NA 0.00 14.65 -786.71 (40.16) 1
action cost 17.19 [9.35, 25.22] 2.09 [1.69, 2.5] NA 0.68 10.78 -480.64 (35.25) 5
additional action cost 32.34 [27.04, 37.65] NA 2.39 [1.91, 2.85] 0.92 5.75 -53.14 (15.46) 31
full 25.44 [18.58, 32.73] 0.61 [0.29, 0.94] 2.09 [1.58, 2.6] 0.93 5.26 0 (0) 13

4.1.4.2 All trials

set.seed(1)

# function for computing model predictions
fun.model_predictions = function(fit, data, name){
  fit %>%
    fitted(newdata = data,
           re_formula = NA) %>%
    as_tibble() %>%
    clean_names() %>%
    rename_with(.fn  = ~ str_c(., ".", name))
}

# compute means and bootstrapped confidence intervals
df.table = df.exp2a.long %>%
  select(participant,
         trial,
         praise,
         action_cost,
         additional_action_cost) %>%
  reframe(praise = smean.cl.boot(praise),
          index = c("mean", "low", "high"), 
          .by = c(trial, action_cost, additional_action_cost)) %>%
  pivot_wider(names_from = index,
              values_from = praise)

# add model predictions
df.table = df.table %>%
  bind_cols(fun.model_predictions(fit = fit.exp2a.full,
                                  data = df.table,
                                  name = "full"),
            fun.model_predictions(fit = fit.exp2a.action_cost,
                                  data = df.table,
                                  name = "action_cost"),
            fun.model_predictions(fit = fit.exp2a.additional_action_cost,
                                  data = df.table,
                                  name = "additional_action_cost"))

df.table %>% 
    mutate(across(.fns = ~ round(., 0))) %>% 
    mutate(data = str_c(mean, " (", low, ", ", high, ")")) %>%
    select(trial, action_cost, additional_action_cost, 
           model_action_cost = estimate.action_cost, 
           model_additional_action_cost = estimate.additional_action_cost, 
           model_full = estimate.full, 
           data) %>% 
    # fun.print_table("latex", digits = 0)
    fun.print_table("html",
                    digits = 0)
Warning: There was 1 warning in `mutate()`.
ℹ In argument: `across(.fns = ~round(., 0))`.
Caused by warning:
! Using `across()` without supplying `.cols` was deprecated in dplyr 1.1.0.
ℹ Please supply `.cols` instead.
trial action_cost additional_action_cost model_action_cost model_additional_action_cost model_full data
1 14 10 46 56 55 59 (52, 65)
2 12 0 42 32 33 28 (21, 35)
3 15 12 48 61 60 64 (56, 71)
4 15 6 48 47 47 42 (35, 49)
5 12 0 42 32 33 32 (25, 38)
6 18 12 55 61 61 53 (46, 60)
7 23 10 65 56 60 61 (56, 67)
8 8 4 34 42 39 43 (38, 49)
9 16 0 51 32 35 31 (26, 38)
10 9 6 36 47 43 49 (43, 55)
11 12 10 42 56 54 62 (56, 68)
12 12 0 42 32 33 27 (20, 33)
13 9 8 36 51 48 59 (53, 65)
14 22 0 63 32 39 47 (38, 55)
15 15 0 48 32 35 30 (24, 36)
16 6 4 30 42 37 29 (23, 35)
17 6 2 30 37 33 30 (24, 36)
18 20 16 59 71 71 72 (67, 76)
19 9 0 36 32 31 26 (19, 33)
20 15 6 48 47 47 45 (38, 51)
21 25 16 69 71 74 75 (69, 80)
22 13 4 44 42 42 37 (31, 44)
23 23 10 65 56 60 60 (54, 66)
24 17 4 53 42 44 46 (40, 52)
25 12 0 42 32 33 33 (26, 40)
26 16 4 51 42 43 50 (44, 55)
27 18 6 55 47 49 56 (50, 61)
28 18 6 55 47 49 52 (46, 57)
29 19 16 57 71 70 64 (57, 71)
30 19 16 57 71 70 69 (62, 75)
31 24 22 67 85 86 75 (68, 81)
32 18 16 55 71 70 67 (61, 73)
33 14 8 46 51 51 54 (46, 61)
34 8 2 34 37 34 36 (30, 42)
35 14 10 46 56 55 62 (58, 67)
36 8 4 34 42 39 38 (32, 43)
37 18 12 55 61 61 70 (64, 75)
38 8 2 34 37 34 34 (28, 41)
39 19 10 57 56 58 68 (62, 74)
40 9 0 36 32 31 27 (22, 34)
41 12 0 42 32 33 29 (23, 36)
42 12 6 42 47 45 45 (38, 51)
43 12 0 42 32 33 34 (28, 40)
44 12 6 42 47 45 47 (41, 53)
45 15 0 48 32 35 33 (27, 40)
46 15 8 48 51 51 47 (41, 53)
47 18 2 55 37 41 40 (33, 47)
48 18 12 55 61 61 51 (44, 58)

4.2 EXP 2b: Inference

4.2.1 DATA

4.2.1.1 Read in data

df.exp2b = read_csv("../../data/experiment2b/experiment2b-trials.csv")

# filtering out instruction trials
df.exp2b.long = df.exp2b %>% 
    filter(!is.na(trial)) %>% 
    mutate(p_right = as.numeric(p_right),
           map1 = case_when(map_order == 0 ~ left_map,
                            map_order == 1 ~ right_map),
           map2 = case_when(map_order == 0 ~ right_map,
                            map_order == 1 ~ left_map),
           # taking counterbalancing into account
           p_right = case_when(map_order == 0 ~ p_right/100,
                               map_order == 1 ~ 1 - (p_right/100))) %>%
    mutate(map1 = as.numeric(str_extract(map1, "\\d+(?=.png?)")),
           map2 = as.numeric(str_extract(map2, "\\d+(?=.png?)")),
           praise_val = case_when(praise == "low"  ~ 0,
                                  praise == "medium" ~ .5,
                                  praise == "high" ~ 1)) %>% 
    rename(participant = workerid) %>% 
    select(-c(proliferate.condition, coffee_getter, recipient, 
              error, left_map, right_map, map_order)) %>% 
    relocate(participant, trial, map1, map2, praise, praise_val, p_right) %>% 
    arrange(participant, trial)

4.2.1.2 Demographics

df.exp2b.demographics = read_csv("../../data/experiment2b/experiment2b-participants.csv")

df.exp2b.demographics %>%
    count(race) %>%
    fun.print_table()
race n
African 1
Asian 2
Black 9
Black African 1
Black african 1
Caribbean 1
Caucasian 4
Coloured 1
Dutch 1
European caucasic 1
Irish 1
Mixed 1
West Slavic 1
White 11
White - European 1
White British 1
belgian 1
black 1
caucasian 1
mixed 1
white 4
white/caucasian 1
NA 3
df.exp2b.demographics %>%
    summarise(age_mean = mean(age),
              age_sd = sd(age),
              time_mean = mean(time/(1000*60)), # converting from ms to minutes
              time_sd = sd(time/(1000*60)),
              n = n()) %>%
    fun.print_table(digits = 0)
age_mean age_sd time_mean time_sd n
28 10 18 10 50

4.2.2 STATS

4.2.2.1 Model predictions

df.exp2b.model = df.exp2.models %>% 
    # add rescaled mean judgments from experiment 2a 
    left_join(df.exp2a.means %>% 
                  select(trial, empirical = praise),
              by = c("map" = "trial")) %>% 
    select(map, action_cost, additional_action_cost,
           contains("rescaled"), empirical) %>% 
    # model predictions for full model based on predictor weights in experiment 2a
    mutate(full = fitted(object = fit.exp2a.full,
                         newdata = .,
                         re_formula = NA)[,1]) %>% 
    mutate(across(.cols = c(action_cost, additional_action_cost,
                            empirical, full),
                  .fns = ~ rescale(., to = c(0, 1)),
                  .names = "pred_{.col}"))

# long data frame with scaled predictions for each map 
df.exp2b.model.long = df.exp2b.long %>% 
    left_join(df.exp2b.model %>% 
                  select(map1 = map,
                         action_cost1 = pred_action_cost,
                         additional_action_cost1 = pred_additional_action_cost,
                         full1 = pred_full,
                         empirical1 = pred_empirical),
              by = "map1") %>% 
    left_join(df.exp2b.model %>% 
                  select(map2 = map,
                         action_cost2 = pred_action_cost,
                         additional_action_cost2 = pred_additional_action_cost,
                         full2 = pred_full,
                         empirical2 = pred_empirical),
              by = "map2") %>% 
    pivot_longer(cols = c(contains("1"), contains("2"), -map1, -map2),
                 names_sep = -1,
                 names_to = c("model", "path")) %>%
    mutate(path = str_c("path", path)) %>% 
    pivot_wider(names_from = path,
                values_from = value)

# compute likelihood and posterior
fun.exp2b.fit_sd_inference = function(sd, dd, models){
    dd %>% 
        rowwise() %>% 
        filter(model %in% models) %>% 
        mutate(likelihood1 = map2_dbl(.x = path1,
                                      .y = praise_val,
                                      .f = ~ dnorm(.x,
                                                   mean = praise_val,
                                                   sd = sd)),
               likelihood2 = map2_dbl(.x = path2,
                                      .y = praise_val,
                                      .f = ~ dnorm(.x,
                                                   mean = praise_val,
                                                   sd = sd))) %>% 
        ungroup() %>% 
        mutate(posterior = likelihood2/(likelihood1 + likelihood2),
               loss = (posterior - p_right)^2) %>% 
        summarize(loss = mean(loss)) %>% 
        pull(loss)
}

# find best-fitting parameter for the SD in the Gaussians 
# param.exp2b.sd = suppressWarnings(optim(par = 1,
#                                         fn = fun.exp2b.fit_sd_inference,
#                                         dd = df.exp2b.model.long,
#                                         models = c("action_cost",
#                                                    "additional_action_cost",
#                                                    "full",
#                                                    "empirical")))
# 
# param_sd = param.exp2b.sd$par

param_sd = 0.3984375

df.exp2b.model.long = df.exp2b.model.long %>% 
    rowwise() %>% 
    mutate(likelihood1 = map2_dbl(.x = path1,
                                  .y = praise_val,
                                  .f = ~ dnorm(.x,
                                               mean = praise_val,
                                               sd = param_sd)),
           likelihood2 = map2_dbl(.x = path2,
                                  .y = praise_val,
                                  .f = ~ dnorm(.x,
                                               mean = praise_val,
                                               sd = param_sd))) %>% 
    ungroup() %>% 
    mutate(posterior = likelihood2/(likelihood1 + likelihood2),
           loss = (posterior - p_right)^2,
           model = factor(model, levels = c("action_cost",
                                            "additional_action_cost",
                                            "full",
                                            "empirical")))

4.2.2.2 Individual participant model fits

models = c("action_cost",
           "additional_action_cost",
           "full")

df.exp2b.model.long %>%
    filter(model %in% models)  %>% 
    group_by(participant, model) %>% 
    summarize(loss = mean(loss)) %>% 
    filter(loss == min(loss)) %>% 
    ungroup() %>% 
    count(model) %>% 
    fun.print_table()
model n
action_cost 25
additional_action_cost 9
full 16

4.2.3 PLOTS

4.2.3.1 Histograms

# plotting example trials
trials = c(7, 6,
           21, 13, 
           25, 32)

fun.load_image = function(trial){
    readPNG(str_c("../../figures/stimuli/experiment2b/images/experiment2b_trial_",
                  trial, ".png"))
}

df.plot = df.exp2b.long %>% 
    filter(trial %in% trials) %>% 
    mutate(p_right = 100 * p_right,
           praise = factor(praise,
                           levels = c("low", "medium", "high"),
                           labels = c("low praise",
                                      "medium praise",
                                      "high praise")))

df.mean = df.plot %>% 
    group_by(trial, map1, map2, praise) %>% 
    summarize(mean = mean(p_right)) %>% 
    ungroup() %>% 
    mutate(label = 1:6)

df.model = df.exp2b.model.long %>% 
    filter(trial %in% trials) %>% 
    group_by(trial, map1, map2, praise, model) %>% 
    summarize(prediction = mean(posterior)*100) %>% 
    mutate(praise = factor(praise,
                           levels = c("low", "medium", "high"),
                           labels = c("low praise",
                                      "medium praise",
                                      "high praise")),
           mutate = ifelse(model == "praise", "empirical", model)) 

df.images = df.plot %>%
    distinct(trial) %>%
    mutate(grob = map(.x = trial, .f = ~ fun.load_image(trial = .x)))

# plotting
ggplot(data = df.plot,
       mapping = aes(x = p_right)) +
    geom_histogram(aes(fill = praise),
                   color = "black",
                   binwidth = 10,
                   center = 5) +
    geom_custom(data = df.images,
                mapping = aes(data = grob,
                              x = -Inf,
                              y = Inf),
                grob_fun = function(x) rasterGrob(x,
                                                  interpolate = T,
                                                  vjust = -0.1,
                                                  hjust = 0,
                                                  width = unit(8.7, "cm"))) +
    geom_point(data = df.model,
               mapping = aes(y = 40,
                             x = prediction,
                             color = model),
               alpha = 0.5,
               shape = 16,
               size = 5) +
    geom_vline(data = df.mean,
               mapping = aes(xintercept = mean),
               linetype = 2,
               show.legend = F) + 
    geom_text(data = df.mean,
              mapping = aes(label = label,
                            x = 50,
                            y = 87),
              size = 10) + 
    facet_wrap(~ trial,
               scales = "free_x",
               nrow = 1) +
    coord_cartesian(clip = "off",
                    ylim = c(0, 40)) +
    labs(y = "# participants") + 
    scale_x_continuous(breaks = c(5, 50, 95),
                       labels = c("definitely\nleft", "unsure", "definitely\nright"),
                       limits = c(0, 100)) +
    scale_color_manual(labels = c("action cost model",
                                  "additional action cost model",
                                  "full model",
                                  "empirical model"),
                       values = c(df.exp2.model_index$action_cost,
                                  df.exp2.model_index$additional_action_cost,
                                  df.exp2.model_index$full,
                                  df.exp2.model_index$empirical)) +
    scale_fill_manual(values = c("low praise" = "gray30",
                                 "medium praise" = "gray50",
                                 "high praise" = "gray70")) +
    theme(legend.position = "bottom",
          legend.title = element_blank(),
          legend.text = element_text(size = 20),
          panel.grid.major.y = element_line(),
          axis.text.x = element_text(size = 16),
          axis.title.x = element_blank(),
          strip.background = element_blank(),
          strip.text = element_blank(),
          panel.spacing.x = unit(1.5, "cm"),
          # panel.spacing.y = unit(6, "cm"),
          plot.margin = margin(t = 5.8, l = 0.2, r = 0.5, b = 0.1, unit = "cm"),
          legend.box = "vertical")
    
ggsave("../../figures/paper/figure6.pdf",
       width = 24,
       height = 6)

4.2.3.2 Scatter plots

set.seed(1)

df.model = df.exp2b.model.long %>% 
    select(trial, model, posterior) %>% 
    distinct() %>% 
    mutate(posterior = 100 * posterior)

df.data = df.exp2b.long %>% 
    mutate(p_right = 100 * p_right) %>% 
    group_by(trial) %>% 
    summarize(mean_cl_boot(p_right)) %>% 
    rename(mean = y,
           low = ymin,
           high = ymax)

df.plot = df.data %>% 
    left_join(df.model,
              by = "trial")

df.text = df.plot %>% 
    group_by(model) %>% 
    summarize(r = cor(mean, posterior),
              rmse = sqrt(mean((mean - posterior)^2))) %>% 
    mutate(across(-model, ~ round(., 2)),
           r = as.character(r),
           r = str_c("r = ", substring(r, 2)),
           rmse = str_c("rmse = ", rmse)) %>% 
    pivot_longer(-model) %>% 
    mutate(x = 0,
           y = rep(c(100, 93), 4))

l.plot = list(data = df.plot,
              text = df.text)

fun.exp2b.scatterplot = function(l.plot, model_name){
    l.plot$data = l.plot$data %>% 
        filter(model == model_name)
    l.plot$text = l.plot$text %>% 
        filter(model == model_name)
    
    p = ggplot(data = l.plot$data,
           mapping = aes(x = posterior,
                         y = mean)) + 
        geom_abline(intercept = 0,
                    slope = 1,
                    linetype = 2,
                    linewidth = 0.3) + 
        geom_smooth(method = "lm",
                    mapping = aes(color = model,
                                  fill = model),
                    alpha = 0.1,
                    show.legend = F) +
        geom_linerange(mapping = aes(ymin = low,
                                     ymax = high),
                       color = "gray80") + 
        geom_point(mapping = aes(fill = model),
                   shape = 21,
                   show.legend = F,
                   size = 2) + 
        geom_text(data = l.plot$text,
                  mapping = aes(x = x,
                                y = y,
                                label = value),
                  size = 5,
                  hjust = 0) + 
        labs(x = str_c(str_replace_all(model_name, "_", " "), " model"),
             y = "mean judgments") + 
        scale_x_continuous(breaks = seq(0, 100, 25),
                           limits = c(0, 100)) + 
        scale_y_continuous(breaks = seq(0, 100, 25)) +
        coord_cartesian(ylim = c(0, 100)) + 
        scale_color_manual(values = c(
            "action_cost" = df.exp2.model_index$action_cost,
            "additional_action_cost" = df.exp2.model_index$additional_action_cost,
            "full" = df.exp2.model_index$full,
            "empirical" = df.exp2.model_index$empirical)) + 
        scale_fill_manual(values = c(
            "action_cost" = df.exp2.model_index$action_cost,
            "additional_action_cost" = df.exp2.model_index$additional_action_cost,
            "full" = df.exp2.model_index$full,
            "empirical" = df.exp2.model_index$empirical)) + 
        theme(text = element_text(size = 18),
              axis.title.x = element_markdown(color = df.exp2.model_index[[model_name]])) 
    
    if(model_name != "action_cost"){
        p = p + theme(axis.title.y = element_blank())
    }
    return(p)
}

p1 = fun.exp2b.scatterplot(l.plot, model_name = "action_cost")
p2 = fun.exp2b.scatterplot(l.plot, model_name = "additional_action_cost")
p3 = fun.exp2b.scatterplot(l.plot, model_name = "full")
p4 = fun.exp2b.scatterplot(l.plot, model_name = "empirical")

p1 + p2 + p3 + p4 + 
    plot_layout(nrow = 1) & 
    plot_annotation(tag_levels = "A") &
    theme(plot.tag = element_text(face = "bold"))

ggsave("../../figures/paper/figure7.pdf",
       width = 16,
       height = 4)

4.2.4 TABLES

4.2.4.1 Trial information

df.exp2b.model.long %>% 
    reframe(value = smean.cl.boot(p_right),
            name = c("mean", "low", "high"),
            .by = trial) %>% 
    pivot_wider() %>% 
    mutate(across(c(mean, low, high), ~ format(round(., 2),
                                               nsmall = 2,
                                               trim = T)),
           judgment = str_c(mean, " (", low, ", ", high, ")")) %>% 
    select(trial, judgment) %>% 
    left_join(df.exp2b.model.long %>% 
                  select(trial, map1, map2, praise, model, posterior) %>% 
                  distinct() %>% 
                  pivot_wider(names_from = model,
                              values_from = posterior),
              by = "trial") %>% 
    relocate(judgment, .after = last_col()) %>% 
    mutate(across(c(trial, map1, map2), ~ as.character(.))) %>% 
    # fun.print_table(format = "latex",
    #                 digits = 2)
    fun.print_table()
trial map1 map2 praise action_cost additional_action_cost full empirical judgment
1 15 16 low 0.67 0.47 0.49 0.50 0.62 (0.57, 0.67)
2 23 12 low 0.90 0.66 0.71 0.82 0.87 (0.84, 0.90)
3 45 2 low 0.60 0.50 0.50 0.52 0.69 (0.64, 0.73)
4 47 25 low 0.72 0.51 0.52 0.55 0.75 (0.71, 0.79)
5 33 34 low 0.63 0.60 0.60 0.71 0.82 (0.78, 0.85)
6 45 46 low 0.50 0.40 0.40 0.37 0.46 (0.42, 0.51)
7 5 6 low 0.28 0.28 0.28 0.28 0.14 (0.11, 0.17)
8 19 20 low 0.35 0.44 0.43 0.39 0.18 (0.14, 0.22)
9 35 36 low 0.63 0.63 0.63 0.82 0.85 (0.81, 0.88)
10 37 38 low 0.77 0.71 0.72 0.92 0.85 (0.81, 0.88)
11 39 40 low 0.80 0.66 0.68 0.91 0.82 (0.78, 0.86)
12 21 22 low 0.94 0.83 0.86 0.95 0.85 (0.82, 0.88)
13 9 10 medium 0.41 0.65 0.58 0.62 0.56 (0.50, 0.60)
14 19 20 medium 0.59 0.65 0.66 0.68 0.63 (0.58, 0.68)
15 41 42 medium 0.50 0.65 0.62 0.64 0.55 (0.50, 0.59)
16 5 6 medium 0.51 0.69 0.66 0.61 0.57 (0.52, 0.62)
17 27 28 medium 0.50 0.50 0.50 0.51 0.54 (0.51, 0.57)
18 47 48 medium 0.50 0.63 0.58 0.53 0.54 (0.49, 0.58)
19 24 26 medium 0.50 0.50 0.50 0.51 0.53 (0.48, 0.58)
20 42 46 medium 0.53 0.53 0.53 0.51 0.62 (0.57, 0.66)
21 13 14 medium 0.50 0.33 0.43 0.52 0.37 (0.32, 0.42)
22 21 20 medium 0.69 0.50 0.53 0.67 0.60 (0.54, 0.65)
23 7 8 medium 0.50 0.42 0.40 0.52 0.45 (0.40, 0.50)
24 39 28 medium 0.51 0.46 0.48 0.60 0.65 (0.60, 0.69)
25 11 12 high 0.50 0.10 0.13 0.06 0.22 (0.18, 0.27)
26 17 18 high 0.95 0.91 0.93 0.93 0.89 (0.86, 0.92)
27 21 22 high 0.22 0.13 0.13 0.13 0.18 (0.14, 0.22)
28 37 38 high 0.11 0.12 0.11 0.11 0.16 (0.12, 0.20)
29 3 4 high 0.50 0.27 0.30 0.22 0.23 (0.20, 0.27)
30 23 24 high 0.37 0.24 0.24 0.31 0.19 (0.16, 0.22)
31 39 28 high 0.47 0.33 0.35 0.35 0.34 (0.29, 0.39)
32 13 14 high 0.90 0.13 0.31 0.33 0.60 (0.55, 0.65)
33 29 30 high 0.50 0.50 0.50 0.53 0.37 (0.33, 0.40)
34 31 32 high 0.40 0.44 0.43 0.48 0.20 (0.17, 0.24)
35 11 13 high 0.32 0.42 0.39 0.47 0.50 (0.45, 0.56)
36 35 1 high 0.50 0.50 0.50 0.47 0.51 (0.46, 0.56)

5 EXP 3: Fishermen

5.1 EXP 3a: Choices

5.1.1 DATA

5.1.1.1 Read in data

# choice data 
df.exp3a = read_csv("../../data/experiment3a/experiment3a-data.csv") %>%
    clean_names() %>%
    mutate(participant = as.numeric(as.factor(user))) %>%
    # remove 3 judgments by participant 21 that have been recorded incorrectly
    filter(judgment != -1) %>%
    relocate(participant) %>%
    arrange(participant)

df.exp3a.model = read_csv(
    "../../data/experiment3a/experiment3a-model.csv",
    show_col_types = FALSE) %>%
    clean_names() %>% 
    select(trial, contains("pred_")) %>% 
    distinct() %>% 
    rename_with(.fn = ~ str_remove(.x, "pred_"),
                .cols = contains("pred_"))

df.exp3a %>% 
    distinct(participant) %>% 
    summarize(n_participants = n())
# A tibble: 1 × 1
  n_participants
           <int>
1             50

5.1.1.2 Demographics

# number of participants
df.exp3a %>% 
    distinct(participant) %>% 
    nrow()
[1] 50

5.1.2 STATS

5.1.2.1 Fit optimal heuristic model to the data

df.data = df.exp3a %>% 
    left_join(df.exp3a.model %>% 
                  select(trial, optimal_heuristic),
              by = "trial")

fit.exp3a.optimal_heuristic = brm(
    formula = judgment ~ 1 + optimal_heuristic + (1 + optimal_heuristic | participant),
    data = df.data,
    seed = 1,
    cores = 4,
    file = "cache/fit.exp3a.optimal_heuristic")

df.exp3a.model = df.exp3a.model %>% 
    mutate(optimal_heuristic_fitted = fit.exp3a.optimal_heuristic %>% 
               fitted(newdata = df.exp3a.model,
                      re_formula = NA) %>% 
               as_tibble() %>% 
               clean_names() %>% 
               pull(estimate))

5.1.3 PLOTS

5.1.3.1 Selection of trials

set.seed(1)

trial_selection = c("t1_a3b1c2", "t1_a2b1c1",
                    "t2_a3b1c1", "t3_a3b3c3", 
                    "t2_a2b2c3", "t3_a3b1c2",
                    "t2_a1b1c3", "t1_a1b2c2")

df.plot = df.exp3a %>% 
    filter(trial %in% trial_selection) %>% 
    mutate(trial = factor(trial, levels = trial_selection))

df.model = df.exp3a.model %>% 
    select(-optimal_heuristic_fitted) %>%
    filter(trial %in% trial_selection) %>%
    pivot_longer(cols = -trial,
                 names_to = "model",
                 values_to = "prediction")

df.text = df.plot %>% 
    distinct(trial) %>% 
    arrange(trial) %>% 
    mutate(label = 1:n())

df.index = df.plot %>% 
    distinct(trial) %>% 
    separate(trial, into = c("tree", "action"),
             sep = "_",
             remove = F) %>% 
    separate(action, into = c("a", "b", "c"), sep = c(2, 4)) %>% 
    mutate(across(.cols = -trial,
                  .fns = ~ str_extract(.x, "[0-9]")),
           tree_label = map(.x = tree,
                            .f = ~ rep(fontawesome("fa-tree"), .x) %>% 
                                str_flatten()),
           player_label = str_c("**", a, "**", " ", b, " ", c),
           player = NA)

ggplot(data = df.plot,
       mapping = aes(x = 1,
                     y = judgment)) + 
    stat_summary(fun = "mean",
                 geom = "bar",
                 fill = "gray80",
                 color = "black") + 
    stat_summary(fun.data = "mean_cl_boot",
                 geom = "linerange",
                 linewidth = 1.5) + 
    geom_hline(yintercept = 0.5,
               linetype = "dashed") +
    geom_point(alpha = 0.1,
               position = position_jitter(width = 0.3, height = 0)) + 
    geom_point(data = df.model,
               mapping = aes(y = prediction,
                             fill = model,
                             group = model),
               shape = 21,
               size = 5,
               alpha = 0.5,
               position = position_dodge(width = 0.75)) +
    geom_text(data = df.text,
              mapping = aes(x = 1,
                            y = 1.3,
                            label = label),
              size = 10,
              hjust = 0.5) +
    geom_text(data = df.index, 
              mapping = aes(y = 1.2,
                            x = 1,
                            label = tree_label),
              family = 'fontawesome-webfont',
              size = 6,
              color = "darkgreen") +
    geom_richtext(data = df.index,
              mapping = aes(y = 1.1,
                            x = 1,
                            label = player_label),
              fill = NA,
              label.color = NA,
              size = 6) +
    coord_cartesian(clip = "off",
                    ylim = c(0, 1)) +
    facet_wrap(~ factor(trial, levels = trial_selection),
               ncol = 8) + 
    scale_y_continuous(breaks = seq(0, 1, .25),
                       labels = seq(0, 100, 25)) +
    scale_fill_manual(values = c("k_level" = "blue",
                                 "optimal_heuristic" = "orange"),
                      labels = c("recursive reasoning model", "heuristic choice model")) +
    labs(y = "action judgment<br><span style = 'font-size:16pt'>(0 = fish, 100 = trees)</span>",
         fill = "model") + 
    theme(legend.position = "bottom",
          legend.title = element_blank(),
          axis.text.x = element_blank(),
          axis.title.x = element_blank(),
          axis.ticks.x = element_blank(),
          axis.title.y = element_textbox_simple(halign = 0.5,
                                                orientation = "left-rotated"),
          strip.background = element_blank(),
          strip.text = element_blank(),
          plot.margin = margin(t = 2.5, l = 0.1, r = 0.3, b = 0.1, unit = "cm")) + 
    guides(fill = guide_legend(override.aes = list(size = 5)))

ggsave("../../figures/paper/figure9.pdf",
       width = 14,
       height = 5)

5.1.3.2 Scatter plots

set.seed(1)

df.plot = df.exp3a %>% 
    reframe(value = smean.cl.boot(judgment),
            name = c("mean", "low", "high"), 
            .by = trial) %>% 
    pivot_wider() %>% 
    left_join(df.exp3a.model,
              by = "trial")

fun.exp3a.scatter = function(data, model, color, modelname, ytitle){
    p = ggplot(data = data,
               mapping = aes(x = .data[[model]],
                             y = mean)) +
        geom_smooth(method = "lm",
                    fill = color,
                    color = color,
                    alpha = 0.1,
                    show.legend = F) +
        geom_linerange(mapping = aes(ymin = low,
                                     ymax = high),
                       color = "gray80") + 
        geom_point(fill = color,
                   shape = 21,
                   show.legend = F,
                   size = 3) +
        annotate(geom = "text",
                 label = df.plot %>%
                     summarize(r = cor(mean, .data[[model]]),
                               rmse = fun.rmse(mean, .data[[model]])) %>%
                     mutate(across(.fns = ~ round(., 2))) %>%
                     unlist() %>%
                     str_c(names(.), " = ", .),
                 x = c(0, 0),
                 y = c(1, 0.92),
                 size = 7,
                 hjust = 0) +
        scale_x_continuous(breaks = seq(0, 1, 0.25),
                           labels = seq(0, 100, 25)) +
        scale_y_continuous(breaks = seq(0, 1, 0.25),
                           labels = seq(0, 100, 25)) +
        coord_cartesian(xlim = c(0, 1),
                        ylim = c(0, 1)) +
        labs(x = modelname,
             y = "action judgment<br><span style = 'font-size:16pt'>(0 = fish, 100 = trees)</span>") + 
        theme(legend.position = "none",
              axis.title.x = element_markdown(color = color),
              axis.title.y = element_textbox_simple(halign = 0.5,
                                                    orientation = "left-rotated"))
    
    if(!ytitle){
        p = p + theme(axis.title.y = element_blank())
    }
    return(p)
}

p1 = fun.exp3a.scatter(data = df.plot,
                             model = "k_level",
                             color = "blue",
                             modelname = "recursive reasoning model",
                             ytitle = T)

p2 = fun.exp3a.scatter(data = df.plot,
                             model = "optimal_heuristic_fitted",
                             color = "orange",
                             modelname = "heuristic choice model",
                             ytitle = F)

p1 + p2 + 
    plot_annotation(tag_levels = "A") & 
    theme(plot.tag = element_text(face = "bold"))

ggsave("../../figures/paper/figure10.pdf",
       width = 16,
       height = 6)

5.1.4 TABLES

5.1.4.1 Trial information

df.exp3a %>% 
    distinct(trial) %>% 
    mutate(index = 1:n()) %>% 
    mutate(trees = str_sub(trial, start = 2, end = 2),
           a = str_sub(trial, start = 5, end = 5),
           b = str_sub(trial, start = 7, end = 7),
           c = str_sub(trial, start = 9, end = 9)) %>% 
    mutate(across(-trial, ~ as.numeric(.))) %>% 
    left_join(df.exp3a.model,
              by = "trial") %>% 
    left_join(df.exp3a %>% 
                  reframe(value = smean.cl.boot(judgment),
                          name = c("mean", "low", "high"),
                          .by = trial) %>% 
                  pivot_wider(),
              by = "trial") %>% 
    mutate(across(.cols = where(is.numeric),
                  .fns = ~ round(., 2))) %>%
    mutate(judgment = str_c(mean, " (", low, ", ", high, ")")) %>% 
    mutate(across(.cols = index:c, .fns = as.character)) %>%
    select(-c(trial, mean, low, high, optimal_heuristic)) %>% 
    fun.print_table()
index trees a b c k_level optimal_heuristic_fitted judgment
1 1 1 2 2 0.95 0.95 0.94 (0.87, 0.99)
2 1 2 2 2 0.35 0.43 0.42 (0.28, 0.56)
3 1 3 1 2 0.00 0.17 0.02 (0.01, 0.03)
4 1 2 3 3 1.00 0.95 0.87 (0.77, 0.96)
5 1 2 1 1 0.08 0.17 0.08 (0.02, 0.17)
6 1 1 3 3 1.00 0.95 0.91 (0.81, 0.98)
7 1 3 3 3 0.26 0.43 0.36 (0.23, 0.49)
8 1 1 1 2 0.67 0.56 0.78 (0.66, 0.89)
9 1 2 1 3 0.04 0.17 0.12 (0.04, 0.24)
10 2 1 2 2 0.15 0.17 0.2 (0.07, 0.35)
11 2 2 2 2 0.35 0.43 0.56 (0.42, 0.69)
12 2 3 1 2 0.05 0.17 0.06 (0.02, 0.13)
13 2 2 3 3 1.00 0.95 0.84 (0.73, 0.95)
14 2 2 1 1 0.69 0.56 0.79 (0.65, 0.91)
15 2 1 3 3 0.09 0.17 0.16 (0.06, 0.27)
16 2 3 3 3 0.26 0.43 0.4 (0.27, 0.54)
17 2 1 1 2 0.42 0.56 0.34 (0.2, 0.49)
18 2 2 1 3 0.97 0.95 0.89 (0.8, 0.96)
19 3 1 2 2 0.71 0.95 0.78 (0.64, 0.91)
20 3 2 2 2 0.61 0.69 0.59 (0.46, 0.7)
21 3 3 1 2 0.86 0.56 0.82 (0.69, 0.92)
22 3 2 3 3 0.01 0.17 0.15 (0.05, 0.28)
23 3 2 1 1 0.61 0.95 0.9 (0.81, 0.97)
24 3 1 3 3 0.09 0.17 0.13 (0.03, 0.24)
25 3 3 3 3 0.26 0.43 0.56 (0.39, 0.72)
26 3 1 1 2 0.50 0.56 0.68 (0.55, 0.81)
27 3 2 1 3 0.19 0.56 0.25 (0.14, 0.38)
28 1 2 1 2 0.07 0.17 0.13 (0.06, 0.21)
29 1 3 2 2 0.01 0.17 0.07 (0.02, 0.16)
30 1 1 1 3 0.90 0.56 0.82 (0.71, 0.92)
31 1 1 2 3 1.00 0.95 0.86 (0.75, 0.97)
32 1 3 2 3 0.00 0.17 0.06 (0.01, 0.14)
33 1 1 1 1 0.44 0.43 0.58 (0.46, 0.71)
34 1 3 1 3 0.00 0.17 0.06 (0.01, 0.13)
35 1 2 2 3 0.75 0.56 0.47 (0.33, 0.61)
36 1 3 1 1 0.01 0.17 0.06 (0.01, 0.13)
37 2 2 1 2 0.59 0.56 0.66 (0.52, 0.79)
38 2 3 2 2 0.01 0.17 0.06 (0.01, 0.14)
39 2 1 1 3 0.59 0.95 0.92 (0.84, 0.97)
40 2 1 2 3 0.06 0.17 0.21 (0.09, 0.34)
41 2 3 2 3 0.00 0.17 0.13 (0.05, 0.23)
42 2 1 1 1 0.55 0.69 0.66 (0.53, 0.77)
43 2 3 1 3 0.60 0.56 0.48 (0.34, 0.61)
44 2 2 2 3 0.75 0.56 0.75 (0.63, 0.86)
45 2 3 1 1 0.50 0.17 0.26 (0.12, 0.42)
46 3 2 1 2 0.53 0.56 0.72 (0.6, 0.84)
47 3 3 2 2 0.98 0.95 0.89 (0.78, 0.97)
48 3 1 1 3 0.26 0.17 0.22 (0.09, 0.37)
49 3 1 2 3 0.43 0.56 0.43 (0.28, 0.6)
50 3 3 2 3 0.80 0.56 0.76 (0.64, 0.86)
51 3 1 1 1 0.50 0.56 0.82 (0.7, 0.93)
52 3 3 1 3 0.60 0.56 0.77 (0.67, 0.86)
53 3 2 2 3 0.05 0.17 0.27 (0.16, 0.42)
54 3 3 1 1 0.83 0.95 0.89 (0.78, 0.97)
    # fun.print_table(format = "latex",
    #                 digits = 2)

5.2 EXP 3b: Blame

5.2.1 DATA

5.2.1.1 Read in data

df.exp3b = read_csv("../../data/experiment3b/experiment3b-data.csv") %>%
  clean_names() %>%
  separate(col = trial,
           into = c("trees", "strength", "actions", "player"),
           sep = "_",
           remove = F) %>%
  mutate(actions = str_replace_all(actions, "[^[:alnum:]]", "")) %>%
  separate(col = actions,
           into = c("action_a", "action_b", "action_c"),
           sep = c(1, 2)) %>%
  separate(col = strength,
           into = c("strength_a", "strength_b", "strength_c"),
           sep = c(2, 4)) %>%
  mutate(across(trees:strength_c, ~ as.numeric(str_extract(., "\\-*\\d+\\d*")))) %>%
  mutate(across(action_a:action_c, ~ factor(.,
                                            levels = 0:1,
                                            labels = c("fish", "trees")))) %>%
  mutate(player = factor(player,
                         levels = 0:2,
                         labels = letters[1:3])) %>%
  rename(participant = user) %>%
  arrange(participant)


df.exp3b.selection = read_csv("../../data/experiment3b/experiment3b-data2.csv") %>% 
    clean_names() %>%
    separate(col = trial,
             into = c("trees", "strength", "actions", "player"),
             sep = "_",
             remove = F) %>%
    mutate(actions = str_replace_all(actions, "[^[:alnum:]]", "")) %>%
    separate(col = actions,
             into = c("action_a", "action_b", "action_c"),
             sep = c(1, 2)) %>%
    separate(col = strength,
             into = c("strength_a", "strength_b", "strength_c"),
             sep = c(2, 4)) %>%
    mutate(across(trees:strength_c, ~ as.numeric(str_extract(., "\\-*\\d+\\d*")))) %>%
    mutate(across(action_a:action_c, ~ factor(.,
                                              levels = 0:1, 
                                              labels = c("fish", "trees")))) %>%
    mutate(player = factor(player,
                           levels = 0:2,
                           labels = letters[1:3]))

5.2.1.2 Demographics

# number of participants
df.exp3b %>% 
    distinct(participant) %>% 
    nrow()
[1] 59
# number of judgments per trial 
df.exp3b %>% 
    count(trial) %>% 
    summarize(min = min(n),
              max = max(n))
# A tibble: 1 × 2
    min   max
  <int> <int>
1    16    74
# number of judgments per participant 
df.exp3b %>% 
    count(participant) %>% 
    summarize(min = min(n/3),
              max = max(n/3))
# A tibble: 1 × 2
    min   max
  <dbl> <dbl>
1    20    23

5.2.2 STATS

5.2.2.1 Model predictions

df.exp3b.model = read_csv(
    "../../data/experiment3b/experiment3b-model.csv",
    show_col_types = F) %>%
    clean_names() %>%
    rename(blame = `blame_17`,
           blame_zscored = `blame_18`) %>% 
    select(-c(x1, blame_zscored))

df.exp3b.model.pivotality = df.exp3b.model %>% 
    filter(model == "pivotality") %>% 
    distinct(trial, blame) %>% 
    rename(pivotality = blame)

# filter out based on best-fitting recursive reasoning model for experiment 3a
df.exp3b.model.rationality = df.exp3b.model %>% 
    filter(model == "rationality",
           k == 3,
           rationality_beta == 1.5) %>% 
    distinct(trial, blame) %>% 
    select(trial, rationality = blame)


df.exp3b = df.exp3b %>% 
    left_join(df.exp3b.model.pivotality,
              by = "trial") %>% 
    left_join(df.exp3b.model.rationality,
              by = "trial")

5.2.2.2 Model fits

5.2.2.2.1 Overall
  • not sure that having random intercepts makes sense here as the data was z-scored (but maybe no harm?!)
fit.exp3b.baseline = brm(
    formula = judgment ~ 1 + (1 | participant),
    data = df.exp3b,
    cores = 4,
    seed = 1,
    file = "cache/fit.exp3b.baseline")

fit.exp3b.pivotality = brm(
    formula = judgment ~ 1 + pivotality + (1 + pivotality | participant),
    data = df.exp3b,
    cores = 4,
    seed = 1,
    file = "cache/fit.exp3b.pivotality")

fit.exp3b.rationality = brm(
    formula = judgment ~ 1 + rationality + (1 + rationality | participant),
    data = df.exp3b,
    cores = 4,
    seed = 1,
    file = "cache/fit.exp3b.rationality")

fit.exp3b.full = brm(
    formula = judgment ~ 1 + rationality + pivotality + 
        (1 + rationality + pivotality | participant),
    data = df.exp3b,
    cores = 4,
    seed = 1,
    file = "cache/fit.exp3b.full")

# add crossvalidation  
fit.exp3b.baseline = add_criterion(fit.exp3b.baseline,
                                   criterion = "loo",
                                   reloo = T,
                                   file = "cache/fit.exp3b.baseline")

fit.exp3b.pivotality = add_criterion(fit.exp3b.pivotality,
                                      criterion = "loo",
                                      reloo = T,
                                      file = "cache/fit.exp3b.pivotality")

fit.exp3b.rationality = add_criterion(fit.exp3b.rationality,
                                      criterion = "loo",
                                      reloo = T,
                                      file = "cache/fit.exp3b.rationality")

fit.exp3b.full = add_criterion(fit.exp3b.full,
                                      criterion = "loo",
                                      reloo = T,
                                      file = "cache/fit.exp3b.full")


# getting the difference between models using LOOCV
df.exp3b.elpd = loo_compare(fit.exp3b.baseline,
                            fit.exp3b.pivotality,
                            fit.exp3b.rationality,
                            fit.exp3b.full) %>%
    as_tibble(rownames = "model") %>%
    rownames_to_column() %>%
    mutate(model = factor(model,
                          levels = str_c("fit.exp3b.", c("baseline",
                                                         "pivotality",
                                                         "rationality",
                                                         "full")),
                          labels = c("baseline",
                                     "pivotality",
                                     "rationality",
                                     "full"))) %>% 
    remove_rownames()
5.2.2.2.2 Individual
# fit each model to one participant for compiling

fit.exp3b.baseline.individual = brm(
    formula = judgment ~ 1,
    data = df.exp3b  %>% 
        filter(participant == "10_9qveRxa"),
    cores = 4,
    seed = 1,
    file = "cache/fit.exp3b.baseline.individual")

fit.exp3b.pivotality.individual = brm(
    formula = judgment ~ 1 + pivotality,
    data = df.exp3b  %>% 
        filter(participant == "10_9qveRxa"),
    cores = 4,
    seed = 1,
    file = "cache/fit.exp3b.pivotality.individual")

fit.exp3b.rationality.individual = brm(
    formula = judgment ~ 1 + rationality,
    data = df.exp3b  %>% 
        filter(participant == "10_9qveRxa"),
    cores = 4,
    seed = 1,
    file = "cache/fit.exp3b.rationality.individual")

fit.exp3b.full.individual = brm(
    formula = judgment ~ 1 + rationality + pivotality,
    data = df.exp3b  %>% 
        filter(participant == "10_9qveRxa"),
    cores = 4,
    seed = 1,
    file = "cache/fit.exp3b.full.individual")

# fit models to individual participants
df.exp3b.individual_fits = df.exp3b %>%
    group_by(participant) %>%
    nest() %>%
    ungroup() %>%
    # fit model for each participant
    mutate(baseline = map(.x = data,
                          .f = ~ update(fit.exp3b.baseline.individual,
                                        newdata = .x,
                                        seed = 1)),
           pivotality = map(.x = data,
                            .f = ~ update(fit.exp3b.pivotality.individual,
                                          newdata = .x,
                                          seed = 1)),
           rationality = map(.x = data,
                             .f = ~ update(fit.exp3b.rationality.individual,
                                           newdata = .x,
                                           seed = 1)),
           full = map(.x = data,
                         .f = ~ update(fit.exp3b.full.individual,
                                       newdata = .x,
                                       seed = 1))) %>%
    mutate(baseline = map(.x = baseline,
                          ~ add_criterion(.x,
                                          criterion = c("loo"))),
           pivotality = map(.x = pivotality,
                            ~ add_criterion(.x,
                                            criterion = c("loo"))),
           rationality = map(.x = rationality,
                             ~ add_criterion(.x,
                                             criterion = c("loo"))),
           full = map(.x = full,
                         ~ add_criterion(.x,
                                         criterion = c("loo"))))

df.exp3b.individual_fits = df.exp3b.individual_fits %>% 
    mutate(model_comparison = pmap(.l = list(baseline = baseline,
                                             pivotality = pivotality,
                                             rationality = rationality,
                                             full = full),
                                   .f = ~ loo_compare(..1, ..2, ..3, ..4)),
           best_model = map_chr(.x = model_comparison,
                                .f = ~ rownames(.) %>%
                                    .[1]),
           best_model = factor(best_model,
                               levels = c("..1", "..2", "..3", "..4"),
                               labels = c("baseline",
                                          "pivotality",
                                          "rationality",
                                          "full")))

# getting best-fitting model
df.exp3b.individual_fits %>%
  count(best_model)
 
save(df.exp3b.individual_fits, file = "cache/df.exp3b.individual_fits.RData")
load(file = "cache/df.exp3b.individual_fits.RData")

5.2.2.3 Fit model predictions to means

df.exp3b.regression = df.exp3b %>% 
    group_by(trial, pivotality, rationality) %>%
    summarize(judgment = mean(judgment)) %>% 
    ungroup() %>% 
    bind_cols(fitted(object = fit.exp3b.pivotality,
                     newdata = .,
                     re_formula = NA) %>%
                  as_tibble() %>% 
                  rename_with(.cols = everything(),
                              .fn = ~ str_c("pivotality_", .))) %>% 
    bind_cols(fitted(object = fit.exp3b.rationality,
                     newdata = .,
                     re_formula = NA) %>%
                  as_tibble() %>% 
                  rename_with(.cols = everything(),
                              .fn = ~ str_c("rationality_", .))) %>% 
    bind_cols(fitted(object = fit.exp3b.full,
                     newdata = .,
                     re_formula = NA) %>%
                  as_tibble() %>% 
                  rename_with(.cols = everything(),
                              .fn = ~ str_c("full_", .))) %>% 
    clean_names()

5.2.3 PLOTS

5.2.3.1 Selection of trials

set.seed(1)

player_colors = c("#e46f27", "#7f659f", "#1c4a7a")

selection = c("T:1 S:313 A:FFF",
              "T:2 S:221 A:TTF",
              "T:3 S:331 A:TTF",
              "T:3 S:133 A:FFF",
              "T:3 S:213 A:FFF",
              "T:3 S:213 A:FTF",
              "T:3 S:112 A:FFF",
              "T:3 S:112 A:FTF",
              "T:3 S:322 A:FTT",
              "T:3 S:223 A:FFF")

df.plot = df.exp3b.selection %>% 
    mutate(across(contains("action"),
                  ~ factor(., levels = c("fish", "trees"),
                           labels = c("F", "T"))),
           label = str_c("T:", trees, 
                         " S:", strength_a, strength_b, strength_c,
                         " A:", action_a, action_b, action_c)) %>% 
    mutate(label = factor(label,
                          levels = selection,
                          labels = selection)) %>% 
    filter(label %in% selection)

df.text = df.plot %>% 
    select(-c(trial, judgment, player)) %>% 
    distinct() %>% 
    pivot_longer(cols = -c(trees, label),
                 names_to = c("type", "player"),
                 values_transform = list(value = as.character),
                 names_sep = "_",
                 values_to = "value") %>% 
    pivot_wider(names_from = type,
                values_from = value)

df.index = df.text %>% 
    distinct(label) %>% 
    arrange(label) %>% 
    mutate(index = 1:n(),
           player = NA)

df.trees = df.plot %>% 
    select(label, trees) %>% 
    distinct() %>% 
    mutate(tree = map(.x = trees,
                      .f = ~ rep(fontawesome("fa-tree"), .x) %>% 
                          str_flatten()),
           player = NA)

df.model = df.plot %>% 
    group_by(trial, label) %>% 
    summarize(judgment = mean(judgment)) %>% 
    ungroup() %>% 
    left_join(df.exp3b.model.pivotality,
              by = c("trial")) %>% 
    left_join(df.exp3b.model.rationality,
              by = c("trial")) %>% 
    mutate(model_pivotality = lm(formula = judgment ~ pivotality,
                                 data = .)$fitted.values,
           model_rationality = lm(formula = judgment ~ rationality,
                                 data = .)$fitted.values,
           model_full = lm(formula = judgment ~ pivotality + rationality,
                                 data = .)$fitted.values) %>% 
    select(-c(judgment, pivotality, rationality)) %>%
    pivot_longer(cols = -c(trial, label),
                 names_to = "model",
                 values_to = "judgment") %>% 
    mutate(player = str_sub(trial, -1),
           player = factor(player,
                           levels = 0:2,
                           labels = letters[1:3])) %>% 
    filter(model == "model_full")

ggplot(data = df.plot,
       mapping = aes(x = player,
                     y = judgment,
                     fill = player)) + 
    stat_summary(fun.data = "mean_cl_boot",
                 geom = "pointrange",
                 shape = 21,
                 size = 0.75,
                 show.legend = F) + 
    geom_point(mapping = aes(color = player),
               position = position_jitter(width = 0.1, height = 0),
               alpha = 0.1,
               show.legend = F) + 
    geom_point(data = df.model,
               alpha = 1,
               shape = 1,
               color = "black",
               size = 3,
               show.legend = F) + 
    geom_text(data = df.text, 
              mapping = aes(y = 1.15,
                            x = player,
                            label = strength),
              size = 6) +
    geom_text(data = df.text, 
              mapping = aes(y = 1.05,
                            x = player,
                            label = action),
              size = 6) +
    geom_text(data = df.trees, 
              mapping = aes(y = 1.26,
                            x = 2,
                            label = tree),
              family = 'fontawesome-webfont',
              size = 6,
              color = "darkgreen") +
    geom_text(data = df.index, 
              mapping = aes(y = 1.4,
                            x = 2,
                            label = index),
              size = 10) +
    facet_grid(~ label) +
    scale_y_continuous(breaks = seq(0, 1, 0.25),
                       labels = seq(0, 100, 25)) +
    scale_fill_manual(values = player_colors) +
    scale_color_manual(values = player_colors) +
    # scale_shape_manual(values = 22:24) +
    scale_x_discrete(labels = c("A", "B", "C")) +
    coord_cartesian(ylim = c(0, 1),
                    clip = "off") + 
    labs(x = "fisherman",
         y = "blame judgment") +
    theme(strip.text = element_blank(),
          strip.background = element_blank(),
          panel.grid.major.y = element_line(),
          panel.spacing = unit(0.75, "cm"),
          plot.margin = margin(t = 3, b = 0.5, l = 0.5, r = 0.5, "cm"))

ggsave("../../figures/paper/figure11.pdf",
       width = 20,
       height = 5)

5.2.3.2 Scatter plots

set.seed(1)

df.plot = df.exp3b %>% 
    reframe(judgment = smean.cl.boot(judgment),
            name = c("mean", "low", "high"), 
            .by = trial) %>% 
    pivot_wider(names_from = name,
                values_from = judgment) %>% 
    left_join(df.exp3b.regression %>% 
                  select(-judgment),
              by = c("trial"))

df.text = df.plot %>% 
    reframe(across(.cols = c(pivotality_estimate, rationality_estimate, full_estimate),
                   .fns = ~ list(r = cor(mean, .),
                                 rmse = fun.rmse(mean, .)))) %>% 
    mutate(name = c("r", "rmse")) %>%
    pivot_longer(cols = -name,
                 names_to = "model",
                 values_to = "value") %>% 
    unnest(value) %>% 
    mutate(value = round(value, digits = 2),
           value = ifelse(name == "r", str_sub(value, start = 2), value),
           label = str_c(name, " = ", value),
           x = -1.5,
           y = ifelse(name == "r", 1.5, 1.25))

fun.exp3b.scatter = function(data, text, modelname, color, ytitle = T){
    p = ggplot(data = data,
               mapping = aes(x = .data[[str_c(modelname, "_estimate")]],
                             y = mean)) + 
        geom_abline(intercept = 0,
                    slope = 1,
                    linetype = 2,
                    linewidth = 0.3) + 
        geom_smooth(mapping = aes(y = .data[[str_c(modelname, "_estimate")]],
                                  ymin = .data[[str_c(modelname, "_q2_5")]],
                                  ymax = .data[[str_c(modelname, "_q97_5")]]),
                    stat = "identity",
                    color = color,
                    fill = color,
                    alpha = 0.1) +
        geom_linerange(alpha = 0.1,
                       mapping = aes(ymin = low,
                                     ymax = high)) +
        geom_point(fill = color,
                   shape = 21) + 
        geom_text(data = text %>% 
                      filter(model == str_c(modelname, "_estimate")),
                  mapping = aes(label = label,
                                x = x,
                                y = y),
                  hjust = 0,
                  size = 6) +
        labs(x = str_c(modelname, " model"),
             y = "blame judgment") +
        scale_y_continuous(breaks = seq(-1.5, 1.5, 0.5),
                           labels = seq(-1.5, 1.5, 0.5)) +
        scale_x_continuous(breaks = seq(-1.5, 1.5, 0.5),
                           labels = seq(-1.5, 1.5, 0.5)) +
        coord_cartesian(xlim = c(-1.5, 1.5),
                        ylim = c(-1.5, 1.5)) + 
        theme(axis.title.x = element_markdown(color = color))
    
    if(ytitle == F){
        p = p + 
            theme(axis.title.y = element_blank())
    }
    p
}

p1 = fun.exp3b.scatter(data = df.plot, 
                            text = df.text,
                            modelname = "rationality",
                            color = "blue")

p2 = fun.exp3b.scatter(data = df.plot, 
                            text = df.text,
                            modelname = "pivotality",
                            color = "red",
                            ytitle = F)

p3 = fun.exp3b.scatter(data = df.plot, 
                            text = df.text,
                            modelname = "full",
                            color = "purple",
                            ytitle = F)

p1 + p2 + p3 & 
    plot_annotation(tag_levels = "A") & 
    theme(plot.tag = element_text(face = "bold"))

ggsave("../../figures/paper/figure12.pdf",
       width = 15,
       height = 5)

5.2.4 TABLES

5.2.4.1 Model fits

# printing out the correlation btw model predictions and judgments
df.exp3b.means = df.exp3b %>% 
    group_by(trial, pivotality, rationality) %>%
    summarize(judgment = mean(judgment)) %>% 
    ungroup()

fun.r_rmse = function(fit, dv){
  df = fit %>%
    fitted(newdata = df.exp3b.means,
           re_formula = NA) %>%
    as_tibble() %>%
    clean_names()  %>%
    bind_cols(df.exp3b.means)

  if(sd(df$estimate) > 0){
    df = df %>%
      summarise(r = cor(.data[[dv]], estimate),
                rmse = fun.rmse(.data[[dv]], estimate))
  }else{
    df = df %>%
      summarise(rmse = fun.rmse(.data[[dv]], estimate))
  }
  return(df)
}

df.table1 = df.exp3b.elpd %>%
    select(model,
           elpd = elpd_diff,
           elpd_se = se_diff) %>%
    left_join(tibble(model = c("baseline",
                               "pivotality",
                               "rationality",
                               "full"),
                     r = c(0,
                           fun.r_rmse(fit = fit.exp3b.pivotality,
                                      dv = "judgment")$r,
                           fun.r_rmse(fit = fit.exp3b.rationality,
                                      dv = "judgment")$r,
                           fun.r_rmse(fit = fit.exp3b.full,
                                      dv = "judgment")$r),
                     rmse = c(fun.r_rmse(fit = fit.exp3b.baseline,
                                         dv = "judgment")$rmse,
                              fun.r_rmse(fit = fit.exp3b.pivotality,
                                         dv = "judgment")$rmse,
                              fun.r_rmse(fit = fit.exp3b.rationality,
                                         dv = "judgment")$rmse,
                              fun.r_rmse(fit = fit.exp3b.full,
                                         dv = "judgment")$rmse)),
              by = "model") %>%
    mutate(across(where(is.numeric), ~ round(., 2))) %>%
    mutate(elpd = str_c(elpd, " (", elpd_se,")")) %>%
    select(-elpd_se) %>%
    left_join(df.exp3b.individual_fits %>%
                  count(best_model) %>%
                  rename(`# best fit` = n,
                         model = best_model),
              by = "model") %>%
    select(model, r, rmse, elpd, `# best fit`)

df.table2 =
    fit.exp3b.baseline %>%
    tidy(effects = "fixed",
         fix.intercept = F,
         conf.method = "HPDinterval") %>%
    mutate(model = "baseline") %>%
    bind_rows(fit.exp3b.pivotality %>%
                  tidy(effects = "fixed",
                       fix.intercept = F,
                       conf.method = "HPDinterval") %>%
                  mutate(model = "pivotality")) %>%
    bind_rows(fit.exp3b.rationality %>%
                  tidy(effects = "fixed",
                       fix.intercept = F,
                       conf.method = "HPDinterval") %>%
                  mutate(model = "rationality")) %>%
    bind_rows(fit.exp3b.full %>%
                  tidy(effects = "fixed",
                       fix.intercept = F,
                       conf.method = "HPDinterval") %>%
                  mutate(model = "full")) %>%
    mutate(across(where(is.numeric), ~ round(., 2)),
           label = str_c(estimate, " [", conf.low, ", ", conf.high, "]")) %>%
    select(model, term, label) %>%
    pivot_wider(names_from = term,
                values_from = label) %>%
    rename(intercept = Intercept) %>%
    left_join(df.table1,
              by = "model") %>%
    mutate(model = str_replace_all(model, pattern = "_", " "))

df.table2 %>%
    # fun.print_table("latex")
    fun.print_table()
model intercept pivotality rationality r rmse elpd # best fit
baseline 0 [-0.03, 0.04] NA NA 0.00 0.70 -915.41 (36.43) 3
pivotality -0.68 [-0.76, -0.61] 1.31 [1.17, 1.45] NA 0.73 0.48 -304.01 (23.18) 10
rationality -0.85 [-0.95, -0.75] NA 1.72 [1.53, 1.94] 0.77 0.45 -139.83 (16.5) 10
full -0.95 [-1.06, -0.84] 0.69 [0.55, 0.82] 1.19 [0.96, 1.38] 0.83 0.40 0 (0) NA

5.2.4.2 Trial information

df.exp3b %>% 
    group_by(trial, trees, strength_a, strength_b, strength_c, action_a, action_b, action_c, player) %>% 
    reframe(value = smean.cl.boot(judgment),
            name = c("mean", "low", "high")) %>% 
    pivot_wider(names_from = name,
                values_from = value) %>% 
    mutate(across(c(mean, low, high), ~ format(round(., 2),
                                               nsmall = 2,
                                               trim = T)),
           judgment = str_c(mean, " (", low, ", ", high, ")")) %>% 
    left_join(df.exp3b.regression %>% 
                  select(trial, contains("estimate")),
              by = "trial") %>% 
    rename_with(.fn = ~ str_remove(., "_estimate")) %>%
    select(-c(mean, low,  high, trial)) %>% 
    pivot_wider(names_from = player,
                values_from = c(judgment, pivotality, rationality, full)) %>% 
    relocate(judgment_a, judgment_b, judgment_c, .after = last_col()) %>% 
    mutate(index = 1:n(), .before = 1,
           across(contains("action"), .fns = ~ ifelse(. == "fish", "F", "T")),
           across(c(trees, contains("strength")), .fns = ~ as.character(.))) %>%
    select(-contains("pivotality"), -contains("rationality")) %>%
    fun.print_table(digits = 2)
index trees strength_a strength_b strength_c action_a action_b action_c full_a full_b full_c judgment_a judgment_b judgment_c
1 1 1 1 1 F F F 0.26 0.26 0.26 0.88 (0.70, 1.08) 0.72 (0.50, 0.93) 0.65 (0.35, 0.92)
2 1 1 1 1 T T F 0.41 0.41 -0.20 -0.19 (-0.51, 0.11) -0.07 (-0.39, 0.25) -0.96 (-1.24, -0.64)
3 1 1 1 3 F F T 0.46 0.46 0.57 0.79 (0.50, 1.05) 0.58 (0.21, 0.89) 0.49 (0.04, 0.91)
4 1 1 2 3 F F F 0.92 -0.90 -0.94 1.14 (1.01, 1.27) -0.63 (-0.96, -0.26) -0.70 (-1.01, -0.34)
5 1 1 2 3 T F T -0.94 -0.90 0.92 -0.78 (-1.12, -0.30) -1.10 (-1.38, -0.77) 0.87 (0.52, 1.15)
6 1 1 2 3 T T F -0.94 0.88 -0.94 -1.03 (-1.23, -0.80) 0.85 (0.51, 1.11) -1.11 (-1.35, -0.85)
7 1 1 3 3 F F F 0.92 -0.95 -0.95 0.83 (0.45, 1.18) -0.21 (-0.64, 0.24) -0.07 (-0.45, 0.35)
8 1 1 3 3 F T F 0.58 0.58 -0.95 0.81 (0.61, 0.98) 0.32 (0.05, 0.59) -0.94 (-1.09, -0.78)
9 1 1 3 3 T T F -0.95 0.92 -0.95 -0.90 (-1.14, -0.57) 0.95 (0.53, 1.30) -0.81 (-1.16, -0.41)
10 1 3 1 2 F F F -0.94 0.92 -0.90 -0.67 (-0.99, -0.29) 1.21 (1.04, 1.35) -0.56 (-0.93, -0.16)
11 1 3 1 2 T T F 0.92 -0.94 -0.90 0.58 (0.04, 1.02) -0.75 (-1.10, -0.35) -0.87 (-1.04, -0.69)
12 1 3 1 3 F F F -0.95 0.92 -0.95 -0.95 (-1.18, -0.64) 1.22 (1.08, 1.36) -1.00 (-1.16, -0.84)
13 1 3 1 3 F T T -0.95 -0.95 0.92 -1.17 (-1.33, -1.02) -1.10 (-1.33, -0.80) 1.11 (0.74, 1.39)
14 1 3 3 3 F F F 0.05 0.05 0.05 0.53 (0.19, 0.81) 0.56 (0.22, 0.83) 0.60 (0.32, 0.85)
15 2 1 1 1 F F F 0.05 0.05 0.05 0.68 (0.48, 0.87) 0.69 (0.48, 0.89) 0.58 (0.34, 0.79)
16 2 1 1 1 F T F 0.39 -0.19 0.39 0.47 (0.16, 0.73) -0.99 (-1.31, -0.61) 0.35 (0.06, 0.62)
17 2 1 1 1 T T T 0.27 0.27 0.27 -0.09 (-0.38, 0.22) -0.02 (-0.33, 0.28) 0.01 (-0.28, 0.29)
18 2 1 1 2 F F F -0.10 -0.10 0.56 -0.10 (-0.48, 0.27) -0.12 (-0.49, 0.23) 0.65 (0.23, 1.02)
19 2 1 1 2 F T F 0.24 0.08 0.21 0.04 (-0.39, 0.45) -0.55 (-0.88, -0.17) 0.52 (0.09, 0.92)
20 2 1 1 2 T F T 0.43 -0.10 -0.24 0.52 (0.17, 0.84) -0.44 (-0.80, -0.03) -0.79 (-1.09, -0.43)
21 2 1 1 3 F F F 0.10 0.10 -0.35 0.77 (0.60, 0.93) 0.81 (0.65, 0.95) -0.55 (-0.90, -0.20)
22 2 1 1 3 F F T -0.02 -0.02 -0.13 0.81 (0.49, 1.06) 0.84 (0.52, 1.08) 0.32 (-0.10, 0.70)
23 2 1 1 3 T F F -0.47 0.44 -0.35 -1.01 (-1.26, -0.76) 0.89 (0.56, 1.14) -0.65 (-1.00, -0.26)
24 2 1 1 3 T F T -0.47 0.10 -0.02 -0.65 (-0.82, -0.46) 0.54 (0.33, 0.77) 0.47 (0.23, 0.72)
25 2 1 2 2 T F T 0.74 -0.02 -0.24 0.43 (0.04, 0.79) -0.46 (-0.79, -0.10) -0.42 (-0.77, -0.05)
26 2 1 2 3 F F F -0.87 0.89 -0.89 -0.50 (-0.82, -0.16) 1.10 (0.91, 1.29) -0.45 (-0.77, -0.04)
27 2 1 2 3 T F F 0.51 0.54 -0.89 -0.16 (-0.69, 0.33) 1.02 (0.85, 1.16) -0.84 (-1.08, -0.56)
28 2 1 2 3 T T F 0.85 -0.91 -0.89 0.41 (-0.03, 0.82) -0.59 (-0.95, -0.17) -1.02 (-1.23, -0.77)
29 2 2 2 1 T T F 0.22 0.22 -0.77 -0.10 (-0.41, 0.22) -0.05 (-0.38, 0.26) -1.01 (-1.21, -0.79)
30 2 2 2 2 F F F 0.15 0.15 0.15 0.56 (0.36, 0.76) 0.47 (0.23, 0.68) 0.50 (0.27, 0.70)
31 2 2 2 2 T T F 0.51 0.51 -0.31 -0.20 (-0.49, 0.09) 0.07 (-0.27, 0.43) -0.50 (-0.80, -0.17)
32 2 2 2 2 T T T 0.17 0.17 0.17 -0.12 (-0.38, 0.15) -0.07 (-0.32, 0.16) -0.08 (-0.37, 0.22)
33 2 2 2 3 F F F 0.63 0.63 -0.93 0.49 (0.18, 0.76) 0.53 (0.26, 0.75) -0.68 (-1.02, -0.27)
34 2 3 1 2 F F F -0.89 -0.87 0.89 -0.63 (-0.93, -0.29) -0.88 (-1.18, -0.53) 1.15 (1.03, 1.26)
35 2 3 1 2 T F T 0.86 -0.87 -0.91 0.66 (0.19, 1.08) -0.81 (-1.14, -0.45) -0.71 (-1.04, -0.33)
36 3 1 1 2 F F F -0.01 -0.01 0.12 0.29 (0.06, 0.51) 0.25 (0.00, 0.49) 0.76 (0.46, 1.04)
37 3 1 1 2 F F T 0.33 0.33 -0.49 0.57 (0.35, 0.78) 0.56 (0.37, 0.77) -1.07 (-1.33, -0.72)
38 3 1 1 2 F T F -0.13 -0.13 0.47 -0.25 (-0.54, 0.03) -0.92 (-1.16, -0.60) 1.04 (0.84, 1.22)
39 3 1 3 3 F F F -0.84 0.45 0.45 -0.93 (-1.12, -0.69) 0.87 (0.57, 1.13) 0.88 (0.57, 1.10)
40 3 1 3 3 T T F 0.82 -0.24 -0.01 0.75 (0.44, 1.06) -0.87 (-1.18, -0.47) -0.78 (-1.06, -0.48)
41 3 2 1 2 F F F 0.03 0.24 0.03 0.78 (0.59, 0.95) 0.80 (0.63, 0.98) 0.66 (0.40, 0.88)
42 3 2 1 2 F F T -0.09 0.58 -0.17 -0.40 (-0.63, -0.18) 0.70 (0.31, 1.07) -1.07 (-1.23, -0.90)
43 3 2 1 2 F T F 0.37 -0.60 0.37 0.51 (0.22, 0.79) -0.88 (-1.21, -0.53) 0.65 (0.37, 0.85)
44 3 2 1 2 T F T -0.05 0.24 -0.05 -0.55 (-0.80, -0.26) 0.83 (0.49, 1.12) -0.25 (-0.56, 0.05)
45 3 2 1 3 F F F -0.38 -0.10 0.76 -0.62 (-0.89, -0.31) -0.55 (-0.86, -0.15) 1.02 (0.66, 1.34)
46 3 2 1 3 F T F -0.04 0.07 0.42 -0.17 (-0.63, 0.30) -0.03 (-0.56, 0.48) 0.89 (0.47, 1.18)
47 3 2 1 3 T F F 0.36 0.25 0.42 -0.04 (-0.41, 0.33) -0.31 (-0.72, 0.12) 0.67 (0.22, 1.06)
48 3 2 2 2 F F F 0.12 0.12 0.12 0.61 (0.47, 0.75) 0.67 (0.54, 0.80) 0.47 (0.26, 0.66)
49 3 2 2 2 F F T 0.46 0.46 -0.25 0.56 (0.39, 0.73) 0.44 (0.16, 0.71) -0.92 (-1.18, -0.59)
50 3 2 2 3 F F F -0.88 -0.88 0.90 -0.73 (-1.00, -0.43) -0.79 (-1.08, -0.47) 1.18 (0.82, 1.42)
51 3 2 2 3 F T T -0.88 0.86 -0.92 -0.74 (-1.11, -0.34) 0.73 (0.32, 1.12) -0.86 (-1.20, -0.37)
52 3 2 3 3 T T F 0.91 -0.48 0.22 0.50 (0.10, 0.86) -0.61 (-0.99, -0.19) -0.41 (-0.81, 0.01)
53 3 3 1 2 T F T -0.44 -0.10 0.70 -0.92 (-1.07, -0.75) -0.82 (-0.99, -0.63) 0.69 (0.50, 0.87)
54 3 3 1 2 T T F -0.44 0.42 -0.38 -0.88 (-1.24, -0.37) 0.64 (0.30, 0.92) -1.10 (-1.29, -0.87)
55 3 3 2 2 F F F 0.90 -0.88 -0.88 1.08 (0.74, 1.32) -0.68 (-0.94, -0.38) -0.66 (-0.92, -0.38)
56 3 3 2 2 F T T 0.44 0.40 0.40 0.97 (0.66, 1.21) 0.28 (-0.12, 0.64) 0.30 (-0.10, 0.69)
57 3 3 2 3 F F F 0.68 -0.93 0.68 0.39 (0.00, 0.71) -0.82 (-1.08, -0.52) 0.55 (0.29, 0.81)
58 3 3 3 1 T T F 0.22 0.22 -0.84 -0.18 (-0.56, 0.18) 0.12 (-0.28, 0.50) -0.93 (-1.20, -0.59)
59 3 3 3 3 T T F 0.62 0.62 -0.41 -0.14 (-0.40, 0.09) 0.13 (-0.07, 0.34) -0.90 (-1.22, -0.55)
60 3 3 3 3 T T T 0.27 0.27 0.27 0.36 (0.04, 0.68) 0.35 (0.00, 0.70) 0.40 (0.11, 0.67)
    # fun.print_table(digits = 2,
    #                 format = "latex")

5.3 EXP 3c: Inference

5.3.1 DATA

5.3.1.1 Read in data

con = dbConnect(SQLite(), dbname = "../../data/experiment3c/experiment3c-data.db")
df.exp3c = dbReadTable(con,"inference_from_blame")
dbDisconnect(con)

df.exp3c = df.exp3c %>%
  filter(status %in% c(4, 5))

5.3.1.2 Read in model predictions

df.exp3c.model = read_csv(
    "../../data/experiment3c/experiment3c-model.csv") %>% 
    select(-c(`...1`, rationality_beta, k)) %>% 
    rename(a_strength = strength_a,
           b_strength = strength_b, 
           c_strength = strength_c,
           a_action = action_a,
           b_action = action_b, 
           c_action = action_c) %>% 
    rename_with(.fn = ~ str_replace(., "action", "choice")) %>% 
    mutate(across(contains("choice"), ~ ifelse(. == 0, "fish", "trees")))

5.3.1.3 Demographic info

# demographics
df.exp3c.demographics = df.exp3c$datastring %>%
    as.tbl_json() %>%
    enter_object("questiondata") %>%
    gather_object() %>%
    append_values_string() %>%
    as_tibble() %>%
    rename(participant = document.id) %>%
    pivot_wider(names_from = name,
                values_from = string) %>%
    mutate(begin = df.exp3c$beginhit,
           end =  df.exp3c$endhit,
           time = as.duration(interval(ymd_hms(df.exp3c$beginexp), 
                                       ymd_hms(df.exp3c$endhit)))) %>%
    select(-c(begin, end))

datatable(df.exp3c.demographics)
df.exp3c.demographics %>%
    mutate(age = as.numeric(age)) %>%
    summarise(age_mean = mean(age),
              age_sd = sd(age),
              n_female = sum(sex == "female"),
              time_mean = mean(time) / 60,
              time_sd = sd(time) / 60) %>%
    fun.print_table(digits = 1)
age_mean age_sd n_female time_mean time_sd
40.6 10.7 37 10.2 4.9

5.3.1.4 Trial info

# trial information
df.exp3c.trialinfo = df.exp3c$datastring %>%
  as.tbl_json() %>%
  enter_object("data") %>%
  gather_array() %>%
  enter_object("trialdata") %>%
  gather_object("name") %>%
  filter(document.id == 1,
         name == "situation") %>%
  gather_object("index") %>%
  append_values_string() %>%
  as_tibble() %>%
  filter(index != "unknowns",) %>%
  pivot_wider(names_from = index,
              values_from = string) %>%
  select(trial = id,
         situation = base_image) %>%
  mutate(situation = str_remove(situation, ".png")) %>%
  filter(!trial %in% c("att1", "att2")) %>%
  separate(situation,
           into = c("trees", "a", "b", "c")) %>%
  mutate(trees = as.numeric(str_remove(trees, "t")),
         trial = as.numeric(trial) + 1,
         across(a:c,
                ~ str_extract(., "\\-*\\d+\\.*\\d*"),
                .names = "{.col}_strength"),
         across(a:c,
                ~ str_extract(., "fish|None|trees"),
                .names = "{.col}_choice"),
         across(a:c,
                ~ str_extract(., "low|medium|high"),
                .names = "{.col}_blame"),
         ) %>%
  select(-c(a:c)) %>%
  arrange(trial)

# change 0 to NA 
df.exp3c.trialinfo = df.exp3c.trialinfo %>% 
    mutate(across(.cols = contains("strength"),
                  .fns = ~ as.numeric(.)),
           across(.cols = where(is.numeric),
                  .fns = ~ na_if(., 0)),
           across(.cols = where(is.character),
                  .fns = ~ na_if(., 'None')))

5.3.1.5 Possibilities

df.exp3c.possibilities = tibble() 

for (i in 1:nrow(df.exp3c.trialinfo)) {
df.tmp = df.exp3c.trialinfo %>% 
    filter(trial == i) %>% 
    select(where(~ !any(is.na(.)))) %>% 
    left_join(df.exp3c.model)

df.exp3c.possibilities = bind_rows(df.exp3c.possibilities,
                                   df.tmp)
}

df.exp3c.possibilities = df.exp3c.possibilities %>% 
    mutate(across(.cols = contains("_blame"),
                  .fns = ~ factor(.,
                                  levels = c("low", "medium", "high"),
                                  labels = c(0, 0.5, 1)) %>% 
                      as.character() %>% 
                      as.numeric())) %>% 
    relocate(trees, .after = trial) %>%
    rename_with(.fn = ~ str_remove(., "blame_")) %>% 
    pivot_longer(cols = c(contains("blame"),
                          contains("rationality"),
                          contains("pivotality")),
                 names_to = c("player", "index"),
                 values_to = "value",
                 names_sep = "_") %>% 
    pivot_wider(names_from = index,
                values_from = value) %>% 
    group_by(trial) %>% 
    mutate(situation = rep(1:(n()/3), each = 3)) %>%
    ungroup() %>% 
    relocate(situation, .after = trial)

# add predictions from the full model based on how much weight participants
# put on each factor in experiment 3b 
mixture_weight = fit.exp3b.full %>% 
    tidy() %>% 
    filter(term == "rationality" | term == "pivotality") %>% 
    select(term, estimate) %>% 
    pivot_wider(names_from = term,
                values_from = estimate) %>%
    mutate(weight = rationality / (rationality + pivotality)) %>% 
    pull(weight)
    
df.exp3c.possibilities = df.exp3c.possibilities %>% 
    mutate(full = mixture_weight * rationality + (1 - mixture_weight) * pivotality)

5.3.1.6 Main info

# main data
df.exp3c.long = df.exp3c$datastring %>%
  as.tbl_json() %>%
  enter_object("data") %>%
  gather_array() %>%
  enter_object("trialdata") %>%
  gather_object() %>%
  append_values_string() %>%
  as_tibble() %>%
  rename(participant = document.id,
         trial = array.index) %>%
  mutate(test = string == "TESTTRIAL") %>%
  group_by(participant, trial) %>%
  filter(any(test)) %>%
  ungroup() %>%
  select(participant, trial, name, string) %>%
  pivot_wider(names_from = name,
              values_from = string) %>%
  clean_names() %>%
  select(participant, -trial, trial = trial_ix, order = trial_number, a_strength:fish) %>%
  mutate(across(a_strength:fish, ~ na_if(., "???")))

# filter out participants who didn't pass both of the attention checks
df.exp3c.long = df.exp3c.long %>%
  mutate(include = trial == "att1" & fish == "4",
         include = ifelse(trial == "att2" & fish == "0", T, include)) %>%
  group_by(participant) %>%
  filter(sum(include) == 2) %>%
  ungroup() %>%
  filter(!trial %in% c("att1", "att2"))

# some more restructuring
df.exp3c.long = df.exp3c.long %>%
  mutate(trial = as.numeric(trial) + 1,
         across(.cols = contains("strength"),
                .fns = ~ factor(., levels = 1:3)),
         across(.cols = contains("choice"),
                .fns = ~ factor(., levels = c("fish", "trees"))),
         trees = str_extract(trees, "\\-*\\d+\\.*\\d*"),
         trees = factor(trees, levels = 1:3)) %>%
  select(-c(fish, order, include)) %>%
  pivot_longer(cols = -c(participant, trial),
               names_to = "question",
               values_to = "choice") %>%
  na.omit() %>%
  arrange(participant, trial)

5.3.2 STATS

5.3.2.1 Percentage of choices

# human aggregate data 
df.exp3c.aggregate = df.exp3c.long %>%
    count(trial, question, choice) %>%
    group_by(trial, question) %>%
    mutate(people = n/sum(n),
           choice = as.character(choice)) %>%
    ungroup()

5.3.2.2 Boostrapped confidence intervals

set.seed(1)

# percentages with bootstrapped confidence intervals
df.exp3c.confidence = df.exp3c.long %>%
  mutate(index = str_c(trial, ".", question)) %>%
  group_by(index) %>%
  nest() %>%
  mutate(bootstraps = map(.x = data,
                          .f = ~ bootstrap(.x, n = 1000))) %>%
  unnest(bootstraps) %>%
  mutate(counts = map(.x = strap,
                      .f = ~ .x %>%
                        as_tibble() %>%
                        count(choice))) %>%
  select(index, .id, counts) %>%
  unnest(counts) %>%
  group_by(index, .id) %>%
  mutate(p = n / sum(n)) %>%
  group_by(index, choice) %>%
  summarise(low = quantile(p, probs = 0.025),
            high = quantile(p, probs = 0.975)) %>%
  ungroup() %>%
  separate(index,
           into = c("trial", "question"),
           sep = "\\.") %>%
  mutate(trial = as.numeric(trial))

# save(df.exp3c.confidence,
#      file = "cache/df.exp3c.confidence.RData")
load(file = "cache/df.exp3c.confidence.RData")

5.3.2.3 Compute predicted posterior (and fit parameters)

fun.fit.exp3c = function(p){
    df.exp3c.posteriors = df.exp3c.possibilities %>%
        mutate(likelihood_rationality = map2_dbl(.x = rationality, 
                                                 .y = blame, 
                                                 .f = ~ dnorm(.x,
                                                              mean = .y,
                                                              sd = p[1])),
               likelihood_pivotality = map2_dbl(.x = pivotality, 
                                                .y = blame, 
                                                .f = ~ dnorm(.x,
                                                             mean = .y,
                                                             sd = p[1])),
               likelihood_full = map2_dbl(.x = full, 
                                             .y = blame, 
                                             .f = ~ dnorm(.x,
                                                          mean = .y,
                                                          sd = p[1]))) %>% 
        group_by(trial, situation) %>% 
        summarize(posterior_rationality = prod(likelihood_rationality),
                  posterior_pivotality = prod(likelihood_pivotality),
                  posterior_full = prod(likelihood_full)) %>% 
        group_by(trial) %>% 
        mutate(posterior_rationality = posterior_rationality / sum(posterior_rationality),
               posterior_pivotality = posterior_pivotality / sum(posterior_pivotality),
               posterior_full = posterior_full / sum(posterior_full)) %>% 
        ungroup()
    
    # integrate with human 
    df.exp3c.aggregate = df.exp3c.aggregate %>%
        left_join(df.exp3c.possibilities %>% 
                      select(trial, situation, trees,
                             contains("strength"), contains("choice")) %>% 
                      distinct() %>%
                      left_join(df.exp3c.posteriors,
                                by = c("trial", "situation")) %>% 
                      pivot_longer(cols = c(trees, 
                                            contains("strength"), 
                                            contains("choice")),
                                   names_to = "question",
                                   values_to = "choice",
                                   values_transform = list(choice = as.character)) %>% 
                      relocate(question, choice, .after = situation),
                  by = join_by(trial, question, choice)) %>% 
        group_by(trial, question, choice, n, people) %>% 
        summarize(across(contains("posterior"), .fns = ~ sum(.))) %>% 
        ungroup()
    
    # add softmax
    df.exp3c.aggregate = df.exp3c.aggregate %>%
        group_by(trial, question) %>%
        mutate(posterior_rationality_softmax = fun.softmax(posterior_rationality,
                                                       beta = p[2]),
               posterior_pivotality_softmax = fun.softmax(posterior_pivotality,
                                                      beta = p[2]),
               posterior_full_softmax = fun.softmax(posterior_full,
                                                   beta = p[2])) %>%
        ungroup()
    
    # compare to human data 
    loss = df.exp3c.aggregate %>% 
        mutate(rationality_sse = (people - posterior_rationality_softmax)^2,
               pivotality_sse = (people - posterior_pivotality_softmax)^2,
               full_sse = (people - posterior_full_softmax)^2,
               total_sse = rationality_sse + pivotality_sse + full_sse) %>% 
        pull(total_sse) %>% 
        sum(.)
    
    return(loss)
}
 
params = optim(par = c(0.1, 3), 
               fn = fun.fit.exp3c)

# best fitting values 
# p.sd = 0.3365993 
# p.beta = 1.2038573

5.3.2.4 Model predictions by trial

# best fitting values 
# p.sd = 0.3365993 
# p.beta = 1.2038573
p.sd = params$par[1]
p.beta = params$par[2]

df.exp3c.posteriors = df.exp3c.possibilities %>%
    mutate(likelihood_rationality = map2_dbl(.x = rationality, 
                                             .y = blame, 
                                             .f = ~ dnorm(.x,
                                                          mean = .y,
                                                          sd = p.sd)),
           likelihood_pivotality = map2_dbl(.x = pivotality, 
                                            .y = blame, 
                                            .f = ~ dnorm(.x,
                                                         mean = .y,
                                                         sd = p.sd)),
           likelihood_full = map2_dbl(.x = full, 
                                         .y = blame, 
                                         .f = ~ dnorm(.x,
                                                      mean = .y,
                                                      sd = p.sd))) %>% 
    group_by(trial, situation) %>% 
    summarize(posterior_rationality = prod(likelihood_rationality),
              posterior_pivotality = prod(likelihood_pivotality),
              posterior_full = prod(likelihood_full)) %>% 
    group_by(trial) %>% 
    mutate(posterior_rationality = posterior_rationality / sum(posterior_rationality),
           posterior_pivotality = posterior_pivotality / sum(posterior_pivotality),
           posterior_full = posterior_full / sum(posterior_full)) %>% 
    ungroup()

# integrate with human 
df.exp3c.aggregate = df.exp3c.aggregate %>%
    left_join(df.exp3c.possibilities %>% 
                  select(trial, situation, trees,
                         contains("strength"), contains("choice")) %>% 
                  distinct() %>%
                  left_join(df.exp3c.posteriors,
                            by = c("trial", "situation")) %>% 
                  pivot_longer(cols = c(trees, contains("strength"), contains("choice")),
                               names_to = "question",
                               values_to = "choice",
                               values_transform = list(choice = as.character)) %>% 
                  relocate(question, choice, .after = situation),
              by = join_by(trial, question, choice)) %>% 
    group_by(trial, question, choice, n, people) %>% 
    summarize(across(contains("posterior"), .fns = ~ sum(.))) %>% 
    ungroup()

# add softmax
df.exp3c.aggregate = df.exp3c.aggregate %>%
    group_by(trial, question) %>%
    mutate(posterior_rationality_softmax = fun.softmax(posterior_rationality,
                                                       beta = p.beta),
           posterior_pivotality_softmax = fun.softmax(posterior_pivotality,
                                                      beta = p.beta),
           posterior_full_softmax = fun.softmax(posterior_full,
                                                beta = p.beta)) %>%
    ungroup()

5.3.2.5 Correlation matrix

df.exp3c.aggregate %>%
    select(people, contains("softmax")) %>%
    rename_with(~ str_remove(., "posterior_")) %>%
    rename_with(~ str_remove(., "_softmax")) %>%
    correlate() %>%
    shave() %>%
    fashion() %>%
    fun.print_table()
term people rationality pivotality full
people
rationality .78
pivotality .64 .60
full .82 .92 .83

5.3.3 PLOTS

5.3.3.1 Selection of trials

trials = c(1, 3, 25, 9, 11, 34, 19, 18)
labels = 1:8

df.plot = df.exp3c.aggregate %>%
    left_join(df.exp3c.confidence) %>% 
    relocate(low, high, .after = people) %>% 
    mutate(choice = str_replace(choice, "trees", "T"),
           choice = str_replace(choice, "fish", "F"),
           item = str_c(question, choice),
           item = str_replace(item, "trees", "t"),
           item = str_remove(item, "_strength"),
           item = str_remove(item, "_choice"),
           question_type = str_extract(question, "trees|strength|choice")) %>%
    filter(trial %in% trials) %>%
    mutate(trial = factor(trial,
                          levels = trials,
                          labels = str_c("trial ", labels))) %>% 
    select(trial:high, item, question_type,
           rationality = posterior_rationality_softmax,
           pivotality = posterior_pivotality_softmax,
           full = posterior_full_softmax) %>% 
    pivot_longer(cols = c(people, rationality, pivotality, full),
                 names_to = "index",
                 values_to = "value")
Joining with `by = join_by(trial, question, choice)`
fun.load_image = function(situation){
  readPNG(str_c("../../figures/stimuli/experiment3c/images/experiment3c_trial_",
                situation, ".png"))
}

# linking images and clips
df.trials = df.plot %>%
  distinct(trial) %>%
  arrange(trial) %>%
  mutate(number = trials,
         grob = map(.x = number, .f = ~ fun.load_image(situation = .x)),
         label = str_c("trial ", number))

df.text = df.trials %>%
    distinct(trial) %>% 
    mutate(index = 1:8)

# plotting
ggplot(data = df.plot,
       mapping = aes(x = item,
                     y = value)) +
    geom_col(data = df.plot %>%
                 filter(index == "people"),
             mapping = aes(fill = question_type,
                           linetype = question_type),
             alpha = 0.5,
             color = "black") +
    geom_linerange(mapping = aes(ymin = low,
                                 ymax = high),
                   size = 1.5) +
    geom_point(data = df.plot %>%
                   filter(index %in% c("rationality",
                                       "pivotality",
                                       "full")) %>%
                   mutate(index = factor(index, levels = c("rationality",
                                                           "pivotality",
                                                           "full"))),
               mapping = aes(shape = index,
                             fill = index),
               position = position_dodge(width = 0.8),
               color = "black",
               stroke = 1,
               size = 4) +
    geom_custom(data = df.trials,
                mapping = aes(data = grob,
                              x = -Inf,
                              y = Inf),
                grob_fun = function(x) rasterGrob(x,
                                                  interpolate = T,
                                                  vjust = 0,
                                                  hjust = 0)) +
    geom_text(data = df.text,
              mapping = aes(x = -Inf,
                            y = Inf,
                            label = index),
              size = 14,
              hjust = -0.5,
              vjust = -0.25,
              color = "white") +
    facet_wrap(~trial,
               labeller = labeller(label),
               scales = "free_x",
               nrow = 2) +
    labs(y = "% percent selected",
         linetype = "question",
         shape = "model") +
    coord_cartesian(clip = "off",
                    ylim = c(0, 1)) +
    scale_y_continuous(breaks = seq(0, 1, 0.25),
                       labels = str_c(seq(0, 100, 25), "%"),
                       limits = c(0, 1),
                       expand = c(0, 0)) +
    scale_shape_manual(values = c(21, 23, 24)) +
    scale_linetype_manual(values = rep(1, 3)) +
    scale_fill_manual(breaks = c("choice", "strength", "trees",
                                 "rationality", "pivotality"),
                      values = c(choice = "#80b4ee",
                                 strength = "gray80",
                                 trees = "darkgreen",
                                 rationality = "blue",
                                 pivotality = "red",
                                 full = "purple")) +
    theme(legend.position = "bottom",
          legend.title = element_text(size = 30),
          legend.text = element_text(size = 25),
          panel.grid.major.y = element_line(),
          axis.text.y = element_text(size = 25),
          axis.title.y = element_text(size = 30),
          axis.text.x = element_text(size = 25),
          axis.title.x = element_blank(),
          strip.background = element_blank(),
          panel.background = element_blank(),
          strip.text = element_blank(),
          panel.spacing.x = unit(1, "cm"),
          panel.spacing.y = unit(7, "cm"),
          plot.margin = margin(t = 6.5, l = 0.2, r = 0.2, b = 0.1, unit = "cm")
    ) +
    guides(fill = F,
           linetype = guide_legend(override.aes = list(fill = c("#80b4ee",
                                                                "gray80",
                                                                "darkgreen"))),
           shape = guide_legend(override.aes = list(fill = c("blue",
                                                             "red",
                                                             "purple"),
                                                    size = 8)))

ggsave("../../figures/paper/figure13.pdf",
       width = 20,
       height = 13)

5.3.3.2 Scatter plots

df.plot = df.exp3c.aggregate %>%
    left_join(df.exp3c.confidence) %>% 
    relocate(low, high, .after = people) %>% 
    select(trial, question, choice, people, low, high,
           rationality = posterior_rationality_softmax,
           pivotality = posterior_pivotality_softmax,
           full = posterior_full_softmax) %>%
    mutate(question_type = str_extract(question, "trees|strength|choice"))

df.models = tibble(name = c("rationality", "pivotality", "full"),
                   color = c("blue", "red", "purple"))

fun.scatter = function(df_plot, df_models, index){
    p = ggplot(data = df_plot,
               mapping = aes(x = .data[[df_models$name[index]]],
                             y = people)) +
        geom_abline(intercept = 0,
                    slope = 1,
                    color = "black",
                    linetype = "dashed") +
        geom_smooth(method = "lm",
                    mapping = aes(group = 1),
                    fill = df_models$color[index],
                    color = df_models$color[index],
                    alpha = 0.1,
                    show.legend = F) +
        geom_linerange(mapping = aes(ymin = low,
                                     ymax = high),
                       alpha = 0.2,
                       color = "black",
                       size = 1) +
        geom_point(mapping = aes(fill = question_type),
                   size = 2.5,
                   shape = 21) +
        annotate(geom = "text",
                 label = df_plot %>%
                     summarise(r = cor(people, .data[[df_models$name[index]]]),
                               rmse = fun.rmse(people, .data[[df_models$name[index]]])) %>%
                     mutate(across(.fns = ~ round(., 2))) %>%
                     unlist() %>%
                     str_c(names(.), " = ", .),
                 x = c(0, 0),
                 y = c(1, 0.92),
                 size = 7,
                 hjust = 0) +
        scale_x_continuous(breaks = seq(0, 1, 0.25),
                           labels = scales::label_percent(),
                           limits = c(0, 1)) +
        scale_y_continuous(breaks = seq(0, 1, 0.25),
                           labels = scales::label_percent(),
                           limits = c(0, 1)) +
        scale_fill_manual(values = c("#80b4ee", "gray80", "darkgreen")) +
        labs(x = str_c(df_models$name[index], " model"),
             y = "% percent selected",
             fill = "question") +
        theme(legend.position = "none",
              axis.title.x = element_markdown(color = df_models$color[index]))
    
    # add legend to first plot
    if (index == 1){
        p = p +
            theme(legend.position = c(1, 0),
                  legend.justification = c(1, -0.1)) +
            guides(fill = guide_legend(override.aes = list(size = 6)))
    }
    else{
        p = p + 
            theme(axis.title.y = element_blank())
    }
    return(p)
}

l.plots = map(.x = 1:3, ~ fun.scatter(df.plot, df.models, .x))

wrap_plots(l.plots,
           ncol = 3) +
  plot_annotation(tag_levels = "A") & 
    theme(plot.tag = element_text(face = "bold"))

ggsave("../../figures/paper/figure14.pdf",
       width = 21,
       height = 6)

5.3.4 TABLES

5.3.4.1 Trial information

df.exp3c.trialinfo %>% 
    mutate(across(everything(), ~ as.character(.)),
           across(contains("choice"), ~ ifelse(. == "fish", "F", "T")),
           across(everything(), ~ replace_na(., "--"))) %>%
    fun.print_table()
trial trees a_strength b_strength c_strength a_choice b_choice c_choice a_blame b_blame c_blame
1 1 1 3 F F F high high low
2 3 1 1 F F F low medium medium
3 1 3 1 F F F low high low
4 1 1 3 T T T medium medium high
5 3 1 1 T T T high low low
6 1 3 1 T T T high low high
7 2 2 3 T T F high low low
8 2 3 2 F T T low medium medium
9 2 3 3 T F T medium medium high
10 2 3 3 T T F high low low
11 2 3 3 F T T low high high
12 1 1 3 2 T F low high low
13 1 1 2 3 F T high low high
14 3 3 2 2 F F high medium low
15 3 2 3 2 F F low high low
16 3 3 1 2 F T medium medium medium
17 3 2 3 1 T F medium medium high
18 1 2 1 F high low high
19 3 3 T T T medium medium low
20 3 1 1 F F low high low
21 3 2 1 F F F low low high
22 3 1 1 2 F low low high
23 1 2 2 T T T low medium medium
24 2 1 2 T T T medium medium medium
25 2 2 1 T T T high high low
26 1 2 3 1 T T high high low
27 1 2 1 1 F F high medium medium
28 1 1 2 1 F F medium low medium
29 3 1 2 3 F high medium medium
30 3 3 1 2 F high medium medium
31 2 1 1 2 F high medium medium
32 2 2 1 1 F high medium medium
33 3 1 3 T F F medium medium low
34 3 1 3 T F F medium high medium
35 3 3 1 F T F medium medium medium
36 3 3 3 1 T F high high low

6 Session info

R version 4.4.2 (2024-10-31)
Platform: aarch64-apple-darwin20
Running under: macOS Sequoia 15.2

Matrix products: default
BLAS:   /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/lib/libRblas.0.dylib 
LAPACK: /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/lib/libRlapack.dylib;  LAPACK version 3.12.0

locale:
[1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8

time zone: America/Los_Angeles
tzcode source: internal

attached base packages:
[1] grid      stats     graphics  grDevices utils     datasets  methods  
[8] base     

other attached packages:
 [1] forcats_1.0.0       stringr_1.5.1       dplyr_1.1.4        
 [4] purrr_1.0.2         readr_2.1.5         tidyr_1.3.1        
 [7] tibble_3.2.1        tidyverse_2.0.0     boot_1.3-31        
[10] emojifont_0.5.5     scales_1.3.0        ggeffects_2.0.0    
[13] emmeans_1.10.6      tidybayes_3.0.7     Hmisc_5.2-1        
[16] broom.mixed_0.2.9.6 lme4_1.1-35.5       Matrix_1.7-1       
[19] rsample_1.2.1       modelr_0.1.11       patchwork_1.3.0    
[22] egg_0.4.5           ggplot2_3.5.1       gridExtra_2.3      
[25] png_0.1-8           xtable_1.8-4        DT_0.33            
[28] tidyjson_0.3.2      RSQLite_2.3.7       brms_2.22.0        
[31] Rcpp_1.0.13         lubridate_1.9.3     kableExtra_1.4.0   
[34] ggtext_0.1.2        corrr_0.4.4         janitor_2.2.1      
[37] knitr_1.49         

loaded via a namespace (and not attached):
  [1] RColorBrewer_1.1-3   tensorA_0.36.2.1     rstudioapi_0.16.0   
  [4] jsonlite_1.8.8       magrittr_2.0.3       estimability_1.5.1  
  [7] farver_2.1.2         nloptr_2.1.1         rmarkdown_2.29      
 [10] ragg_1.3.2           vctrs_0.6.5          memoise_2.0.1       
 [13] minqa_1.2.7          base64enc_0.1-3      htmltools_0.5.8.1   
 [16] curl_5.2.1           distributional_0.4.0 broom_1.0.7         
 [19] Formula_1.2-5        StanHeaders_2.32.9   sass_0.4.9          
 [22] parallelly_1.37.1    bslib_0.7.0          htmlwidgets_1.6.4   
 [25] plyr_1.8.9           cachem_1.1.0         commonmark_1.9.1    
 [28] lifecycle_1.0.4      pkgconfig_2.0.3      R6_2.5.1            
 [31] fastmap_1.2.0        future_1.33.2        snakecase_0.11.1    
 [34] digest_0.6.36        showtext_0.9-7       colorspace_2.1-0    
 [37] furrr_0.3.1          crosstalk_1.2.1      textshaping_0.4.0   
 [40] labeling_0.4.3       fansi_1.0.6          timechange_0.3.0    
 [43] mgcv_1.9-1           abind_1.4-5          compiler_4.4.2      
 [46] bit64_4.0.5          withr_3.0.2          inline_0.3.19       
 [49] htmlTable_2.4.2      backports_1.5.0      DBI_1.2.3           
 [52] QuickJSR_1.3.0       pkgbuild_1.4.4       MASS_7.3-64         
 [55] loo_2.8.0            tools_4.4.2          foreign_0.8-87      
 [58] nnet_7.3-19          glue_1.8.0           nlme_3.1-166        
 [61] gridtext_0.1.5       checkmate_2.3.1      reshape2_1.4.4      
 [64] cluster_2.1.6        generics_0.1.3       gtable_0.3.5        
 [67] tzdb_0.4.0           hms_1.1.3            data.table_1.15.4   
 [70] xml2_1.3.6           utf8_1.2.4           markdown_1.13       
 [73] pillar_1.9.0         ggdist_3.3.2         vroom_1.6.5         
 [76] posterior_1.6.0      splines_4.4.2        lattice_0.22-6      
 [79] showtextdb_3.0       bit_4.0.5            tidyselect_1.2.1    
 [82] arrayhelpers_1.1-0   V8_5.0.0             bookdown_0.42       
 [85] svglite_2.1.3        stats4_4.4.2         xfun_0.49           
 [88] bridgesampling_1.1-2 matrixStats_1.3.0    rstan_2.32.6        
 [91] proto_1.0.0          stringi_1.8.4        yaml_2.3.10         
 [94] evaluate_0.24.0      codetools_0.2-20     cli_3.6.3           
 [97] RcppParallel_5.1.8   rpart_4.1.23         systemfonts_1.1.0   
[100] munsell_0.5.1        jquerylib_0.1.4      globals_0.16.3      
[103] coda_0.19-4.1        svUnit_1.0.6         parallel_4.4.2      
[106] rstantools_2.4.0     assertthat_0.2.1     blob_1.2.4          
[109] bayesplot_1.11.1     Brobdingnag_1.2-9    listenv_0.9.1       
[112] viridisLite_0.4.2    mvtnorm_1.2-5        sysfonts_0.8.9      
[115] crayon_1.5.3         insight_1.0.0        rlang_1.1.4