COVID-19 Message Framing Project
Overview, descriptives & correlations# load packages
if (!require(tidyverse)) {
install.packages('tidyverse')
}
if (!require(knitr)) {
install.packages('knitr')
}
if (!require(kableExtra)) {
install.packages('kableExtra')
}
if (!require(DT)) {
install.packages('DT')
}
if (!require(brms)) {
install.packages('brms')
}
if (!require(tidybayes)) {
install.packages('tidybayes')
}
# define palettes
palette = wesanderson::wes_palette("Zissou1", n = 3, type = "continuous")
# load and tidy data
data = read.csv("../covid19_study1/covid19_study1_clean_long.csv", stringsAsFactors = FALSE) %>%
bind_rows(read.csv("../covid19_study1_pilot2/covid19_study1_pilot2_clean_long.csv", stringsAsFactors = FALSE)) %>%
bind_rows(read.csv("../covid19_study1_pilot/covid19_study1_pilot_clean_long.csv", stringsAsFactors = FALSE)) %>%
mutate(group = ifelse(is.na(group), condition, group),
value = ifelse(item == "covid_studies" & as.numeric(value) > 100, NA, value)) %>% # remove 8 extreme outliers
filter(!grepl("time", survey_name)) %>%
mutate(SID = sprintf("%s_%s", study, SID)) %>%
filter(!grepl("dehumanization", item))
# demographics
demo = data %>%
filter(grepl("gender|race|hispanic_latino|ses_degree|income_household", item)) %>%
ungroup() %>%
select(study, SID, item, value) %>%
mutate(value = ifelse(grepl("hispanic_latino", item), recode(value, "1" = "yes", "0" = "no"), value),
value = ifelse(grepl("gender", item), recode(value, "1" = "male", "2" = "female", "3" = "other", "4" = "would rather not say"), value),
value = ifelse(grepl("race", item), recode(value, "1" = "White", "2" = "Black or African American",
"3" = "Asian", "4" = "American Indian or Alaskan Native",
"5" = "Native Hawaiian or Other Pacific Islander", "6" = "Other"), value),
value = ifelse(grepl("degree", item), recode(value, "1" = "Less than high school", "2" = "High school graduate (diploma)",
"3" = "High school graduate (GED)", "4" = "Some college (1-4 years, no degree)",
"5" = "Associate's degree (including occupational or academic degrees)",
"6" = "Bachelor's degree (BA, BS, etc)", "7" = "Master's degree (MA, MS, MENG, MSW, etc)",
"8" = "Professional school degree (MD, DDC, JD, etc)",
"9" = "Doctorate degree (PhD, EdD, etc)"), value),
value = ifelse(grepl("household", item), recode(value, "1" = "Less than $5,000", "2" = "$5,000 through $11,999",
"3" = "$12,000 through $15,999", "4" = "$16,000 through $24,999",
"5" = "$25,000 through $34,999", "6" = "$35,000 through $49,999",
"7" = "$50,000 through $74,999", "8" = "$75,000 through $99,999",
"9" = "$100,000 and greater", "10" = "NA", "11" = "NA"), value),
item = gsub("hispanic_latino", "Hispanic / Latinx", item),
item = gsub("ses_income_household", "household income", item),
item = gsub("ses_degree", "highest degree completed", item)) %>%
group_by(study, item, value) %>%
summarize(n = n()) %>%
ungroup() %>%
rename(" " = item)
# states
states = data %>%
filter(grepl("state", item)) %>%
select(study, SID, item, value) %>%
unique() %>%
spread(item, value) %>%
group_by(study, state) %>%
summarize(n = n())
# load survey item info and merge with data
items = read.csv("item_text.csv", stringsAsFactors = FALSE)
merged = data %>%
left_join(., items, by = "item") %>%
mutate(value = as.numeric(value),
text = ifelse(survey_name == "msg_familiarity", filter(items, item == "msg_familiarity")$text,
ifelse(survey_name == "msg_rel_self", filter(items, item == "msg_rel_self")$text,
ifelse(survey_name == "msg_rel_social", filter(items, item == "msg_rel_social")$text,
ifelse(survey_name == "msg_motiv_self", filter(items, item == "msg_motiv_self")$text,
ifelse(survey_name == "msg_motiv_other", filter(items, item == "msg_motiv_other")$text,
ifelse(survey_name == "msg_positive", filter(items, item == "msg_positive")$text,
ifelse(survey_name == "msg_negative", filter(items, item == "msg_negative")$text,
ifelse(survey_name == "msg_share", filter(items, item == "msg_share")$text, text)))))))),
measure = ifelse(survey_name == "msg_familiarity", filter(items, item == "msg_familiarity")$measure,
ifelse(survey_name == "msg_rel_self", filter(items, item == "msg_rel_self")$measure,
ifelse(survey_name == "msg_rel_social", filter(items, item == "msg_rel_social")$measure,
ifelse(survey_name == "msg_motiv_self", filter(items, item == "msg_motiv_self")$measure,
ifelse(survey_name == "msg_motiv_other", filter(items, item == "msg_motiv_other")$measure,
ifelse(survey_name == "msg_positive", filter(items, item == "msg_positive")$measure,
ifelse(survey_name == "msg_negative", filter(items, item == "msg_negative")$measure,
ifelse(survey_name == "msg_share", filter(items, item == "msg_share")$measure, measure))))))))) %>%
filter(!is.na(text))
# subset studies
pilot1 = merged %>%
filter(study == "study1_pilot1")
pilot2 = merged %>%
filter(study == "study1_pilot2")
study1 = merged %>%
filter(study == "study1")
# subset and tidy message data
messages = data %>%
mutate(value = as.numeric(value)) %>%
filter(grepl("msg", survey_name)) %>%
filter(!grepl("time", survey_name)) %>%
extract(item, "item", "msg_.*_(.*)") %>%
spread(survey_name, value) %>%
mutate(msg_favorability = msg_positive - msg_negative) %>%
select(-msg_negative, -msg_positive) %>%
gather(survey_name, value, contains("msg")) %>%
mutate(item = sprintf("%s_%s", survey_name, item))
# tidy data for person-level analyses
data_rev = data %>%
filter(!item %in% c("purpose_8", "purpose_9", "beliefs_infect_now", "beliefs_infect_month",
"beliefs_1", "beliefs_2", "beliefs_3", "beliefs_4", "beliefs_5")) %>%
filter(grepl("intentions1|norms.*1|beliefs|agency|trust|age|autonomy|purpose|victimhood|selfconstrual|politics_party|politics_conserv|empathy|cognition|usefulness|covid_studies", survey_name)) %>%
mutate(value = as.numeric(value),
value = ifelse(item == "agency_2", abs(value - 8), value), # reverse-code agency item
value = ifelse(item %in% paste0("purpose_", c(2, 4, 5, 6)), abs(value - 7), value), # reverse-code purpose items
value = ifelse(item %in% paste0("cognitiion_", c(2, 4, 5)), abs(value - 6), value), # reverse-code need for cognitiion items
value = ifelse(item %in% paste0("empathy_", c(2, 4, 5, 10, 12)), abs(value - 6), value), # reverse-code empathy items
value = ifelse(item %in% c(paste0("beliefs_mental", c(2, 3)),
paste0("beliefs_norms", c(1))), abs(value - 8), value), # reverse-code beliefs items
value = ifelse(item %in% paste0("intentions1_", c(7, 8, 9)), abs(value - 8), value), # reverse-code intentions items
value = ifelse(item %in% c(paste0("norms_close1_", c(7, 8, 9)),
paste0("norms_town1_", c(7, 8, 9))), abs(value - 100), value), # reverse-code norm items
survey_name = ifelse(item %in% paste0("autonomy_", 1:5),
"IAF_autonomous",
ifelse(item %in% paste0("autonomy_", 6:10),
"IAF_controlling", survey_name)),
survey_name = ifelse(item %in% paste0("selfconstrual_", 1:4),
"SC_independence",
ifelse(item %in% paste0("selfconstrual_", 5:8),
"SC_interdependence", survey_name)),
survey_name = ifelse(item %in% paste0("empathy_", 1:7),
"IRI_empathic_concern",
ifelse(item %in% paste0("empathy_", 8:14),
"IRI_personal_distress", survey_name)),
survey_name = ifelse(item %in% paste0("intentions1_", c(2:6, 10)),
"intentions_social_dist",
ifelse(item %in% paste0("intentions1_", c(1, 7, 8, 9)),
"intentions_hygiene", survey_name)),
survey_name = ifelse(item %in% paste0("norms_close1_", c(2:6, 10)),
"norms_close_social_dist",
ifelse(item %in% paste0("norms_close1_", c(1, 7, 8, 9)),
"norms_close_hygiene", survey_name)),
survey_name = ifelse(item %in% paste0("norms_town1_", c(2:6, 10)),
"norms_town_social_dist",
ifelse(item %in% paste0("norms_town1_", c(1, 7, 8, 9)),
"norms_town_hygiene", survey_name)),
value = ifelse(survey_name == "covid_studies", log(value), value),
value = ifelse(survey_name == "covid_studies" & value == -Inf, NA, value)) %>%
filter(!is.na(value)) %>%
bind_rows(., messages)
# raw data for correlations
data_corr = data_rev %>%
group_by(study, SID, survey_name) %>%
summarize(value = mean(value, na.rm = TRUE))
# winsorized data for correlations
data_win = data_rev %>%
group_by(study, survey_name) %>%
mutate(mean = mean(value, na.rm = TRUE),
sd = sd(value, na.rm = TRUE),
sd3 = 3*sd(value, na.rm = TRUE),
value = ifelse(!grepl("familiarity", item) & value > mean + sd3, mean + sd3, value), # winsorize outliers
value = ifelse(!grepl("familiarity", item) & value < mean - sd3, mean - sd3, value)) %>%
group_by(study, SID, survey_name) %>%
summarize(value = mean(value, na.rm = TRUE))
# tidy data for brm models
data_pred = data_corr %>%
group_by(survey_name) %>%
mutate(value_scaled = scale(value, center = TRUE, scale = TRUE))
# create key for brm plots
key = data_pred %>%
select(survey_name) %>%
rename("condition" = survey_name) %>%
unique() %>%
mutate(`measure type` = ifelse(grepl("^age$|cognition|IAF|IRI|purpose|SC|usefulness|trust|victimhood|covid_studies|politics", condition), "individual difference",
ifelse(grepl("msg", condition), "message-level", "person-level")))
Our initial goal of for this project was to determine the degree to which several message framing interventions might enhance message effectiveness and intentions, norms, and beliefs related to social distancing, and the degree to which specific individual differences might be associated with these outcomes. We also measured additional outcomes, including public policies related to COVID-19, as well as various individual difference measures. This document provides an overview of the studies as well as basic descriptives for the primary variables of interest common across studies. All data are collapsed across message framing conditions, as this information will appear in separate reports, which will be posted on OSF and linked to our lab’s github page. Please see our preregistrations for more information about our planned analyses related to the message framing interventions and individual differences related to key outcomes of interest.
In each study, participants were randomly assigned to either a message framing intervention group (using autonomy-supportive language, encouraging or mocking humor, or descriptive norms, a fact-based control), or a group that saw no messages. Each participant in the intervention and fact-based message control groups saw a series of 5 messages about social distancing related to COVID-19 randomly sampled from a pool of 15 messages previously normed for argument strength (M = 4.16, SD = 0.18, possible range = 1-5). For more information about message norming, please view the report on argument stength norming.
Each message was created to look like an Instagram post that included a visual message about COVID-19 accompanied by a “post” about the message. For each message, the post began with the same stem (e.g., “Staying home protects our community by stopping the spread of #covid19.”). The message control condition contained this stem only, whereas the experimental conditions contained additional text framing the messages. Participants then completed various outcome and individual differences measures.
Below are example messages; all message stimuli used in this project can be viewed by clicking on the links under each study (below the “message framing conditions” sub-heading).
Although the pilot studies differed slightly from full study 1 in how English language comprehension was assessed, exclusions across studies were made based on the criteria listed in the standard operating procedures for this project.
Before exclusions = 99
After exclusions = 80
geo = states %>%
filter(study == "study1_pilot1")
geo %>%
usmap::plot_usmap(data = ., values = "n", color = "grey50") +
scale_fill_gradient2(low = palette[1], mid = palette[2], midpoint = max(geo$n) / 2, high = palette[3],
name = "", na.value = NA, limits = c(min(geo$n), max(geo$n)), breaks = seq(0, max(geo$n), 3)) +
theme(legend.position = "right")
study = "study1_pilot1"
data %>%
filter(study == !!(study) & item == "age") %>%
summarize(`age range` = sprintf("%s - %s", min(value, na.rm = TRUE), max(value, na.rm = TRUE)),
`mean age` = mean(as.numeric(value, na.rm = TRUE)),
`sd age` = sd(as.numeric(value, na.rm = TRUE))) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
age range | mean age | sd age |
---|---|---|
22 - 73 | 38 | 11.3 |
demo %>%
filter(study == !!(study) & ` ` == "gender") %>%
ungroup() %>%
select(-study) %>%
mutate(total = sum(n),
percent = sprintf("%.1f%s", (n / total) * 100, "%")) %>%
select(-n, -total) %>%
spread(value, percent) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
female | male | |
---|---|---|
gender | 41.2% | 58.8% |
demo %>%
filter(study == !!(study) & ` ` == "Hispanic / Latinx") %>%
ungroup() %>%
select(-study) %>%
mutate(total = sum(n),
percent = sprintf("%.1f%s", (n / total) * 100, "%")) %>%
select(-n, -total) %>%
spread(value, percent) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
no | yes | |
---|---|---|
Hispanic / Latinx | 95.0% | 5.0% |
demo %>%
filter(study == !!(study) & ` ` == "race") %>%
ungroup() %>%
select(-study) %>%
mutate(total = sum(n),
percent = sprintf("%.1f%s", (n / total) * 100, "%")) %>%
select(value, percent) %>%
rename("race" = value) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
race | percent |
---|---|
Asian | 5.0% |
Black or African American | 6.2% |
Other | 2.5% |
White | 86.2% |
demo %>%
filter(study == !!(study) & ` ` == "highest degree completed") %>%
ungroup() %>%
select(-study) %>%
mutate(total = sum(n),
percent = sprintf("%.1f%s", (n / total) * 100, "%")) %>%
mutate(value = factor(value, levels = c("Less than high school", "High school graduate (diploma)", "High school graduate (GED)",
"Some college (1-4 years, no degree)", "Associate's degree (including occupational or academic degrees)",
"Bachelor's degree (BA, BS, etc)", "Master's degree (MA, MS, MENG, MSW, etc)",
"Professional school degree (MD, DDC, JD, etc)", "Doctorate degree (PhD, EdD, etc)"))) %>%
arrange(value) %>%
select(value, percent) %>%
rename("highest degree completed" = value) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
highest degree completed | percent |
---|---|
High school graduate (diploma) | 6.2% |
High school graduate (GED) | 5.0% |
Some college (1-4 years, no degree) | 12.5% |
Associate’s degree (including occupational or academic degrees) | 8.8% |
Bachelor’s degree (BA, BS, etc) | 52.5% |
Master’s degree (MA, MS, MENG, MSW, etc) | 13.8% |
Professional school degree (MD, DDC, JD, etc) | 1.2% |
demo %>%
filter(study == !!(study) & ` ` == "household income") %>%
ungroup() %>%
select(-study) %>%
mutate(total = sum(n),
percent = sprintf("%.1f%s", (n / total) * 100, "%")) %>%
mutate(value = ifelse(is.na(value), "not reported", value),
value = factor(value, levels = c("Less than $5,000", "$5,000 through $11,999", "$12,000 through $15,999", "$16,000 through $24,999",
"$25,000 through $34,999", "$35,000 through $49,999", "$50,000 through $74,999", "$75,000 through $99,999",
"$100,000 and greater", "not reported"))) %>%
arrange(value) %>%
select(value, percent) %>%
rename("household income" = value) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
household income | percent |
---|---|
Less than $5,000 | 1.2% |
$5,000 through $11,999 | 1.2% |
$12,000 through $15,999 | 1.2% |
$16,000 through $24,999 | 5.0% |
$25,000 through $34,999 | 8.8% |
$35,000 through $49,999 | 17.5% |
$50,000 through $74,999 | 27.5% |
$75,000 through $99,999 | 16.2% |
$100,000 and greater | 21.2% |
Before exclusions = 240
After exclusions = 179
geo = states %>%
filter(study == "study1_pilot2")
geo %>%
usmap::plot_usmap(data = ., values = "n", color = "grey50") +
scale_fill_gradient2(low = palette[1], mid = palette[2], midpoint = max(geo$n) / 2, high = palette[3],
name = "", na.value = NA, limits = c(min(geo$n), max(geo$n)), breaks = seq(0, max(geo$n), 5)) +
theme(legend.position = "right")
study = "study1_pilot2"
data %>%
filter(study == !!(study) & item == "age") %>%
summarize(`age range` = sprintf("%s - %s", min(value, na.rm = TRUE), max(value, na.rm = TRUE)),
`mean age` = mean(as.numeric(value, na.rm = TRUE)),
`sd age` = sd(as.numeric(value, na.rm = TRUE))) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
age range | mean age | sd age |
---|---|---|
20 - 70 | 37.7 | 12.1 |
demo %>%
filter(study == !!(study) & ` ` == "gender") %>%
ungroup() %>%
select(-study) %>%
mutate(total = sum(n),
percent = sprintf("%.1f%s", (n / total) * 100, "%")) %>%
select(-n, -total) %>%
spread(value, percent) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
female | male | other | would rather not say | |
---|---|---|---|---|
gender | 44.7% | 53.6% | 1.1% | 0.6% |
demo %>%
filter(study == !!(study) & ` ` == "Hispanic / Latinx") %>%
ungroup() %>%
select(-study) %>%
mutate(total = sum(n),
percent = sprintf("%.1f%s", (n / total) * 100, "%")) %>%
select(-n, -total) %>%
spread(value, percent) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
no | yes | |
---|---|---|
Hispanic / Latinx | 87.7% | 12.3% |
demo %>%
filter(study == !!(study) & ` ` == "race") %>%
ungroup() %>%
select(-study) %>%
mutate(total = sum(n),
percent = sprintf("%.1f%s", (n / total) * 100, "%")) %>%
select(value, percent) %>%
rename("race" = value) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
race | percent |
---|---|
American Indian or Alaskan Native | 0.6% |
Asian | 10.1% |
Black or African American | 12.8% |
Other | 2.2% |
White | 74.3% |
demo %>%
filter(study == !!(study) & ` ` == "highest degree completed") %>%
ungroup() %>%
select(-study) %>%
mutate(total = sum(n),
percent = sprintf("%.1f%s", (n / total) * 100, "%")) %>%
mutate(value = factor(value, levels = c("Less than high school", "High school graduate (diploma)", "High school graduate (GED)",
"Some college (1-4 years, no degree)", "Associate's degree (including occupational or academic degrees)",
"Bachelor's degree (BA, BS, etc)", "Master's degree (MA, MS, MENG, MSW, etc)",
"Professional school degree (MD, DDC, JD, etc)", "Doctorate degree (PhD, EdD, etc)"))) %>%
arrange(value) %>%
select(value, percent) %>%
rename("highest degree completed" = value) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
highest degree completed | percent |
---|---|
High school graduate (diploma) | 5.6% |
High school graduate (GED) | 5.0% |
Some college (1-4 years, no degree) | 17.9% |
Associate’s degree (including occupational or academic degrees) | 10.6% |
Bachelor’s degree (BA, BS, etc) | 43.6% |
Master’s degree (MA, MS, MENG, MSW, etc) | 14.5% |
Professional school degree (MD, DDC, JD, etc) | 1.1% |
Doctorate degree (PhD, EdD, etc) | 1.7% |
demo %>%
filter(study == !!(study) & ` ` == "household income") %>%
ungroup() %>%
select(-study) %>%
mutate(total = sum(n),
percent = sprintf("%.1f%s", (n / total) * 100, "%")) %>%
mutate(value = ifelse(is.na(value), "not reported", value),
value = factor(value, levels = c("Less than $5,000", "$5,000 through $11,999", "$12,000 through $15,999", "$16,000 through $24,999",
"$25,000 through $34,999", "$35,000 through $49,999", "$50,000 through $74,999", "$75,000 through $99,999",
"$100,000 and greater", "not reported"))) %>%
arrange(value) %>%
select(value, percent) %>%
rename("household income" = value) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
household income | percent |
---|---|
$5,000 through $11,999 | 4.5% |
$12,000 through $15,999 | 2.8% |
$16,000 through $24,999 | 6.7% |
$25,000 through $34,999 | 12.8% |
$35,000 through $49,999 | 13.4% |
$50,000 through $74,999 | 24.0% |
$75,000 through $99,999 | 15.6% |
$100,000 and greater | 18.4% |
not reported | 1.7% |
Before exclusions = 1103
After exclusions = 865
geo = states %>%
filter(study == "study1")
geo %>%
usmap::plot_usmap(data = ., values = "n", color = "grey50") +
scale_fill_gradient2(low = palette[1], mid = palette[2], midpoint = max(geo$n) / 2, high = palette[3],
name = "", na.value = NA, limits = c(min(geo$n), max(geo$n)), breaks = seq(0, max(geo$n), 20)) +
theme(legend.position = "right")
study = "study1"
data %>%
filter(study == !!(study) & item == "age") %>%
summarize(`age range` = sprintf("%s - %s", min(value, na.rm = TRUE), max(value, na.rm = TRUE)),
`mean age` = mean(as.numeric(value, na.rm = TRUE)),
`sd age` = sd(as.numeric(value, na.rm = TRUE))) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
age range | mean age | sd age |
---|---|---|
19 - 74 | 37.7 | 11.4 |
demo %>%
filter(study == !!(study) & ` ` == "gender") %>%
ungroup() %>%
select(-study) %>%
mutate(total = sum(n),
percent = sprintf("%.1f%s", (n / total) * 100, "%")) %>%
select(-n, -total) %>%
spread(value, percent) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
female | male | other | would rather not say | |
---|---|---|---|---|
gender | 44.6% | 54.8% | 0.3% | 0.2% |
demo %>%
filter(study == !!(study) & ` ` == "Hispanic / Latinx") %>%
ungroup() %>%
select(-study) %>%
mutate(total = sum(n),
percent = sprintf("%.1f%s", (n / total) * 100, "%")) %>%
select(-n, -total) %>%
spread(value, percent) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
no | yes | |
---|---|---|
Hispanic / Latinx | 87.5% | 12.5% |
demo %>%
filter(study == !!(study) & ` ` == "race") %>%
ungroup() %>%
select(-study) %>%
mutate(total = sum(n),
percent = sprintf("%.1f%s", (n / total) * 100, "%")) %>%
select(value, percent) %>%
rename("race" = value) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
race | percent |
---|---|
American Indian or Alaskan Native | 1.0% |
Asian | 7.6% |
Black or African American | 15.6% |
Native Hawaiian or Other Pacific Islander | 0.1% |
Other | 2.7% |
White | 72.9% |
demo %>%
filter(study == !!(study) & ` ` == "highest degree completed") %>%
ungroup() %>%
select(-study) %>%
mutate(total = sum(n),
percent = sprintf("%.1f%s", (n / total) * 100, "%")) %>%
mutate(value = factor(value, levels = c("Less than high school", "High school graduate (diploma)", "High school graduate (GED)",
"Some college (1-4 years, no degree)", "Associate's degree (including occupational or academic degrees)",
"Bachelor's degree (BA, BS, etc)", "Master's degree (MA, MS, MENG, MSW, etc)",
"Professional school degree (MD, DDC, JD, etc)", "Doctorate degree (PhD, EdD, etc)"))) %>%
arrange(value) %>%
select(value, percent) %>%
rename("highest degree completed" = value) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
highest degree completed | percent |
---|---|
Less than high school | 0.5% |
High school graduate (diploma) | 5.9% |
High school graduate (GED) | 3.1% |
Some college (1-4 years, no degree) | 17.0% |
Associate’s degree (including occupational or academic degrees) | 11.1% |
Bachelor’s degree (BA, BS, etc) | 45.2% |
Master’s degree (MA, MS, MENG, MSW, etc) | 13.9% |
Professional school degree (MD, DDC, JD, etc) | 1.8% |
Doctorate degree (PhD, EdD, etc) | 1.5% |
demo %>%
filter(study == !!(study) & ` ` == "household income") %>%
ungroup() %>%
select(-study) %>%
mutate(total = sum(n),
percent = sprintf("%.1f%s", (n / total) * 100, "%")) %>%
mutate(value = ifelse(is.na(value), "not reported", value),
value = factor(value, levels = c("Less than $5,000", "$5,000 through $11,999", "$12,000 through $15,999", "$16,000 through $24,999",
"$25,000 through $34,999", "$35,000 through $49,999", "$50,000 through $74,999", "$75,000 through $99,999",
"$100,000 and greater", "not reported"))) %>%
arrange(value) %>%
select(value, percent) %>%
rename("household income" = value) %>%
kable(digits = 1) %>%
kable_styling(bootstrap_options = "striped", full_width = F, position = "left")
household income | percent |
---|---|
Less than $5,000 | 0.8% |
$5,000 through $11,999 | 2.1% |
$12,000 through $15,999 | 2.0% |
$16,000 through $24,999 | 6.2% |
$25,000 through $34,999 | 10.2% |
$35,000 through $49,999 | 14.9% |
$50,000 through $74,999 | 25.4% |
$75,000 through $99,999 | 17.7% |
$100,000 and greater | 18.5% |
not reported | 2.2% |
Below is a list of measures that were included across studies. For a comprehensive list of all items that were assessed in each study, please email lab.manager@falklab.org to request more information.
merged %>%
mutate(survey_name = ifelse(item %in% paste0("autonomy_", 1:5),
"IAF_autonomous",
ifelse(item %in% paste0("autonomy_", 6:10),
"IAF_controlling", survey_name)),
survey_name = ifelse(item %in% paste0("selfconstrual_", 1:4),
"SC_independence",
ifelse(item %in% paste0("selfconstrual_", 5:8),
"SC_interdependence", survey_name)),
survey_name = ifelse(item %in% paste0("empathy_", 1:7),
"IRI_empathic_concern",
ifelse(item %in% paste0("empathy_", 8:14),
"IRI_personal_distress", survey_name)),
survey_name = ifelse(item %in% paste0("intentions1_", c(2:6, 10)),
"intentions_social_dist",
ifelse(item %in% paste0("intentions1_", c(1, 7, 8, 9)),
"intentions_hygiene", survey_name)),
survey_name = ifelse(item %in% paste0("norms_close1_", c(2:6, 10)),
"norms_close_social_dist",
ifelse(item %in% paste0("norms_close1_", c(1, 7, 8, 9)),
"norms_close_hygiene", survey_name)),
survey_name = ifelse(item %in% paste0("norms_town1_", c(2:6, 10)),
"norms_town_social_dist",
ifelse(item %in% paste0("norms_town1_", c(1, 7, 8, 9)),
"norms_town_hygiene", survey_name))) %>%
filter(survey_name %in% unique(data_corr$survey_name)) %>%
select(survey_name, measure, citation) %>%
bind_rows(data.frame(survey_name = "dehumanization", measure = "Ascent of (Hu)Man Scale", citation = "Kteily et al., 2015")) %>%
unique() %>%
arrange(survey_name) %>%
mutate(citation = ifelse(is.na(citation), "", citation)) %>%
kable(format = "pandoc", caption = "measure key")
survey_name | measure | citation |
---|---|---|
age | Age | |
agency | Agency in mitigating the spread of COVID-19 | |
beliefs_environment | Beliefs: the environment | |
beliefs_mental | Beliefs: mental health | |
beliefs_norms | Beliefs: social norms | |
beliefs_others_home | Beliefs: importance of social distancing if others do it | |
beliefs_safe_others | Beliefs: protecting others | |
beliefs_safe_self | Beliefs: protecting onself | |
cognition | Need for Cognition | Adapted from Cacioppo & Petty, 1982 |
covid_studies | Number of COVID-19 studies | |
dehumanization | Ascent of (Hu)Man Scale | Kteily et al., 2015 |
IAF_autonomous | Index of Autonomous Functioning: self-congruence/authorship | Weinstein, Przybylski, & Ryan, 2012 |
IAF_controlling | Index of Autonomous Functioning: susceptibility to control | Weinstein, Przybylski, & Ryan, 2012 |
intentions_hygiene | Intentions: hygiene | |
intentions_social_dist | Intentions: social distancing | |
IRI_empathic_concern | Interpersonal Reactivity Index: empathic concern | Davis, 1980 |
IRI_personal_distress | Interpersonal Reactivity Index: personal distress | Davis, 1980 |
msg_familiarity | Message familiarity | |
msg_motiv_other | Message others motivation to social distance | |
msg_motiv_self | Message self motivation to social distance | |
msg_rel_self | Message self-relevance | |
msg_rel_social | Message social relevance | |
msg_share | Message sharing intention | |
norms_close_hygiene | Norms close others: hygiene | |
norms_close_social_dist | Norms close others: social distancing | |
norms_town_hygiene | Norms community: hygiene | |
norms_town_social_dist | Norms community: social distancing | |
politics_conserv | Political ideology | |
politics_party | Political party | |
purpose | Purpose in life | Adapted from Ryff, 1989 |
SC_independence | Self-Construal Scale: independence | Adapted from Singelis, 1994 |
SC_interdependence | Self-Construal Scale: interdependence | Adapted from Singelis, 1994 |
trust | Trust for COVID-19 information sources | |
usefulness | Usefulness to family/community/society | |
victimhood | Tendency for Interpersonal Victimhood | Gabay et al., 2020 |
Below are descriptive statistics and response distributions for the primary measures of interest in this project.
FIGURE LEGEND
For each survey item, individual responses, the density distribution, and a box and whisker plot are shown.
Each dot represents the response for one individual person.
The box and whisker plots depicted below represent the following statistics: the vertical line in the middle is the median, the box encompasses the inter-quartile range (25th to 75th percentile), and the whiskers capture +/- 1.5 times the inter-quartile range from the box hinge. Points outside the whiskers are likely outliers.
These plots are generated across studies, as well as for each study individually.
plot_desc = function(data, survey, item=TRUE, message=FALSE,
palette=palette,
text_size=2.5, alpha=.1) {
source("https://gist.githubusercontent.com/benmarwick/2a1bb0133ff568cbe28d/raw/fb53bd97121f7f9ce947837ef1a4c65a73bffb3f/geom_flat_violin.R")
if (item == FALSE){
if (message == TRUE) {
labels = data %>%
filter(grepl(!!(survey), survey_name)) %>%
group_by(survey_name, text) %>%
summarize(y = (min(value, na.rm = TRUE) + max(value, na.rm = TRUE)) / 2)
data %>%
filter(grepl(!!(survey), survey_name)) %>%
ggplot(aes(survey_name, value)) +
geom_flat_violin(position = position_nudge(x = .1, y = 0), color = FALSE, fill = palette[1]) +
geom_point(position = position_jitter(width = .05), size = .5, alpha = alpha, color = palette[1]) +
geom_boxplot(width = .1, outlier.shape = NA, alpha = 0) +
geom_text(data = labels, aes(x = survey_name, y = y, label = text), nudge_x = .55, size = text_size) +
coord_flip() +
scale_x_discrete(expand = expansion(add = c(0, .75))) +
labs(x = "", y = "rating") +
theme_minimal() +
theme(axis.text.y = element_blank(),
legend.position = "top")
} else {
labels = data %>%
filter(grepl(!!(survey), survey_name)) %>%
group_by(survey_name) %>%
summarize(y = (min(value, na.rm = TRUE) + max(value, na.rm = TRUE)) / 2)
data %>%
filter(grepl(!!(survey), survey_name)) %>%
ggplot(aes(survey_name, value)) +
geom_flat_violin(position = position_nudge(x = .1, y = 0), color = FALSE, fill = palette[1]) +
geom_point(position = position_jitter(width = .05), size = .5, alpha = alpha, color = palette[1]) +
geom_boxplot(width = .1, outlier.shape = NA, alpha = 0) +
geom_text(data = labels, aes(x = survey_name, y = y, label = survey_name), nudge_x = .55, size = text_size) +
coord_flip() +
scale_x_discrete(expand = expansion(add = c(0, .75))) +
labs(x = "", y = "rating") +
theme_minimal() +
theme(axis.text.y = element_blank(),
legend.position = "top")
}
} else {
labels = data %>%
filter(grepl(!!(survey), survey_name)) %>%
group_by(survey_name, item, text) %>%
summarize(y = (min(value, na.rm = TRUE) + max(value, na.rm = TRUE)) / 2)
data %>%
filter(grepl(!!(survey), survey_name)) %>%
ggplot(aes(item, value)) +
geom_flat_violin(position = position_nudge(x = .1, y = 0), color = FALSE, fill = palette[1]) +
geom_point(position = position_jitter(width = .05), size = .5, alpha = alpha, color = palette[1]) +
geom_boxplot(width = .1, outlier.shape = NA, alpha = 0) +
geom_text(data = labels, aes(x = item, y = y, label = text), nudge_x = .55, size = text_size) +
coord_flip() +
scale_x_discrete(expand = expansion(add = c(0, .75))) +
labs(x = "", y = "rating") +
theme_minimal() +
theme(axis.text.y = element_blank(),
legend.position = "top")
}
}
table_desc = function(data, survey, item=TRUE, message=FALSE) {
if (item == FALSE) {
if (message == TRUE) {
data %>%
filter(grepl(!!(survey), survey_name)) %>%
filter(!is.na(value)) %>%
group_by(survey_name, text) %>%
summarize(n = n(),
min = min(value, na.rm = TRUE),
max = max(value, na.rm = TRUE),
mean = mean(value, na.rm = TRUE),
median = median(value, na.rm = TRUE),
sd = sd(value, na.rm = TRUE)) %>%
mutate_if(is.numeric, round, 2) %>%
DT::datatable(rownames = FALSE, extensions = 'FixedColumns',
options = list(scrollX = TRUE,
scrollY = TRUE,
fixedColumns = list(leftColumns = 2)))
} else {
data %>%
filter(grepl(!!(survey), survey_name)) %>%
filter(!is.na(value)) %>%
group_by(survey_name) %>%
summarize(n = n(),
min = min(value, na.rm = TRUE),
max = max(value, na.rm = TRUE),
mean = mean(value, na.rm = TRUE),
median = median(value, na.rm = TRUE),
sd = sd(value, na.rm = TRUE)) %>%
mutate_if(is.numeric, round, 2) %>%
DT::datatable(rownames = FALSE, extensions = 'FixedColumns',
options = list(scrollX = TRUE,
scrollY = TRUE,
fixedColumns = list(leftColumns = 2)))
}
} else {
data %>%
filter(grepl(!!(survey), survey_name)) %>%
filter(!is.na(value)) %>%
group_by(text) %>%
summarize(n = n(),
min = min(value, na.rm = TRUE),
max = max(value, na.rm = TRUE),
mean = mean(value, na.rm = TRUE),
median = median(value, na.rm = TRUE),
sd = sd(value, na.rm = TRUE)) %>%
mutate_if(is.numeric, round, 2) %>%
DT::datatable(rownames = FALSE, extensions = 'FixedColumns',
options = list(scrollX = TRUE,
scrollY = TRUE,
fixedColumns = list(leftColumns = 2)))
}
}
measure
Message familiarity
scale
1 = yes, 0 = no
measures
Intentions: hygiene
Intentions: social distancing
question text
Please rate how much you intend to do the following things over the next two weeks.
scale
1 = definitely will not, 7 = definitely will
measures
Intentins: hygiene
Intentions: social distancing
Intentions: sharing
question text
Please estimate how many times you intend to do the following things over the next two weeks. Please select 15 to indicate more than 14 times.
scale
0-14+
measures
Norms close others: hygiene
Norms close others: social distancing
question text
For these questions, think about your closest friends and family. What proportion of them will do the following things in the next two weeks?
scale
0-100%
measures
Norms close others: hygiene
Norms close others: social distancing
Norms close others: sharing
measures
Norms community: hygiene
Norms community: social distancing
question text
For these questions, think about the people in your city or town. What proportion of them will do the following things in the next two weeks?
scale
0-100%
measures
Norms community: hygiene
Norms community: social distancing
Norms community: sharing
measure
Beliefs: protecting onself
question text
If I stay home every day for the next two weeks, and avoid social contact, even if I’m not sick:
scale
1 = strongly disagree, 7 = strongly agree
measure
Beliefs: protecting others
question text
If I stay home every day for the next two weeks, and avoid social contact, even if I’m not sick:
scale
1 = strongly disagree, 7 = strongly agree
measure
Beliefs: mental health
question text
If I stay home every day for the next two weeks, and avoid social contact, even if I’m not sick:
scale
1 = strongly disagree, 7 = strongly agree
measure
Beliefs: the environment
question text
If I stay home every day for the next two weeks, and avoid social contact, even if I’m not sick:
scale
1 = strongly disagree, 7 = strongly agree
measure
Agency in mitigating the spread of COVID-19
question text
To what extent do you agree or disagree with the following statements?
scale
1 = strongly disagree, 7 = strongly agree
measure
Public policy support
question text
To what extent do you support the following policies in response to the COVID-19 pandemic?
scale
1 = do not support at all, 7 = support very much
measure
Index of Autonomous Functioning
question text
Please indicate how true each statement is of your experiences on the whole. Remember that there are no right or wrong answers. Please answer according to what really reflects your experience rather than what you think your experience should be.
scale
1 = not at all true, 5 = completely true
measure
Purpose in life
question text
Please indicate the extent to which you agree with the following statements.
scale
1 = strongly disagree, 6 = strongly agree
measure
Interpersonal Reactivity Index
question text
The following statements inquire about your thoughts and feelings in a variety of situations.
scale
1 = does not describe me well, 5 = describes me very well
measure
Self-Construal Scale
question text
This is a questionnaire that measures a variety of feelings and behaviors in various situations. Listed below are a number of statements. Read each one as if it referred to you.
scale
1 = strongly disagree, 7 = strongly agree
measure
Tendency for Interpersonal Victimhood
question text
The following items relate to the manner by which you experience situations you felt that you were hurt, victimized, or treated unjustly, as well as additional thoughts and feelings related to your relations with other people. Please indicate the extent to which you agree with each of the following statements
scale
1 = not at all, 7 = very much
This measure was only included in full study 1
measure
Usefulness to family/community/society
question text
To what extent do you agree or disagree with the following statements?
scale
1 = strongly disagree, 7 = strongly agree
measure
Trust for COVID-19 information sources
question text
To what extent do you trust information about the COVID-19 pandemic from the following sources?
scale
1 = do not trust at all, 7 = trust very much
measure
Political party
question text
In politics today, would you consider yourself a:
scale
1 = Strong Democrat, 2 = Weak Democrate, 3 = Independent leaning Democrat, 4 = Independent, 5 = Independent leaning Republican, 6 = Weak Repebulican, 7 = Strong Republican
measure
Political ideology
question text
We hear a lot of talk these days about liberals and conservatives. Here is a seven-point scale on which the political views that people might hold are arranged from extremely liberal to extremely conservative. Where would you place yourself on this scale?
scale
1 = extremely liberal, 7 = extremely conservative
measure
Age
question text
How old are you?
measure
Number of COVID-19 studies
question text
How many COVID-related studies have you participated prior to this one? Please enter a number.
Note 8 extreme outliers who reported 200 or more studies were removed.
Below are correlation matrices for the primary measures of interest in this project.
FIGURE LEGEND
Correlations below are Pearson correlations using pair-wise removal for missing data. These correlations are generated across studies, as well as for each study individually. We provide correlations with outliers (+/- 3 SD for the mean) winsorized to +/- 3 SD for comparison.
plot_corr = function(data, text_size=3) {
data %>%
spread(survey_name, value) %>%
select(-c(SID, study)) %>%
GGally::ggcorr(., hjust = 1, size = text_size,
label = TRUE, label_size = 3, label_round = 2) +
scale_x_discrete(expand = expansion(add = c(3, 0))) +
theme(legend.position = "none",
axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank())
}
social norms
measure
Beliefs: social norms
question text
If I stay home every day for the next two weeks, and avoid social contact, even if I’m not sick:
scale
1 = strongly disagree, 7 = strongly agree
all studies
table
figure
pilot 2
table
figure
full study 1
table
figure