diff --git a/.gitignore b/.gitignore index c591ceb1f473580b415ba4f659edb0992b4daf67..e983046b25ed63c2c6b23959cf9155e22cef80f9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,18 +1,18 @@ -.Rproj.user -.Rhistory -*.RData -.Ruserdata -*.rds -*.xlsx -*.png -Thumbs.db -/Estimation_results/ -*.pptx -/Data -/Tables -*.pdf - -# Presentation quarto output -project_start.html -/project_start_cache +.Rproj.user +.Rhistory +*.RData +.Ruserdata +*.rds +*.xlsx +*.png +Thumbs.db +/Estimation_results/ +*.pptx +/Data +/Tables +*.pdf + +# Presentation quarto output +project_start.html +/project_start_cache /project_start_files \ No newline at end of file diff --git a/Scripts/MAKE_FILE.R b/Scripts/MAKE_FILE.R index 84e3ec01a3b603c8ad6a5be5dbf5f0fdb6f3205e..08efb1fba10654a2628dc70a24b15d03a87c059a 100644 --- a/Scripts/MAKE_FILE.R +++ b/Scripts/MAKE_FILE.R @@ -1,99 +1,99 @@ -rm(list=ls()) -library(tidyverse) -library(tidylog) -library(apollo) -library(reshape2) -library(xtable) -library(stargazer) -library(texreg) -#test -# Set values for estimation in Apollo -n_draws <- 2000 -n_cores <- min(parallel::detectCores()-1, 25) - -# Load data -load("Data/database_full.RData") -load("Data/database.RData") - -# Data preparation -source("Scripts/data_prep.R") -source("Scripts/treatment.R") - -####### Estimate models ###### - -### Logit -source("Scripts/logit/chr_vol_treat.R") -source("Scripts/logit/protesters.R") - -### OLS -source("Scripts/ols/ols_time_spent.R") -source("Scripts/ols/ols_quiz.R") -source("Scripts/ols/ols_opt_out.R") -source("Scripts/ols/ols_nr.R") -source("Scripts/ols/ols_consequentiality.R") - -##### Conditional Logits ##### -#source("Scripts/clogit.R") -#source("Scripts/clogit_wtp.R") - -##### Mixed Logit Models ###### - -#source("Scripts/mxl/mxl_wtp_space.R") -#source("Scripts/mxl/mxl_wtp_space_4d.R") -#source("Scripts/mxl/mxl_wtp_space_interact_all.R") -#source("Scripts/mxl/mxl_socio_int.R") -#source("Scripts/mxl/mxl_treatment_time.R") -#source("Scripts/mxl/mxl_treatment_time_interaction.R") -#source("Scripts/mxl/mxl_treatment_time_Dummies.R") -#source("Scripts/mxl/mxl_wtp_space_interact_everything.R") -#source("Scripts/mxl/mxl_wtp_space_4d_interact_everything.R") -############################# - -##### Load models ############ - -mxl_wtp <- apollo_loadModel("Estimation_results/mxl/MXL_wtp") -mxl_wtp_4d <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_4d") -mxl_wtp_all_int <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_interact_all") -mxl_wtp_socio <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_socio_int") -mxl_wtp_tt <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_treatment_time") -mxl_wtp_tt_interaction <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_treatment_time_interaction") -mxl_wtp_time_groups <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_time_groups") -mxl_wtp_everything <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_interact_everything") -mxl_wtp_case_a <- apollo_loadModel("Estimation_results/mxl/MXL_wtp Case A") -mxl_wtp_case_a_NR <- apollo_loadModel("Estimation_results/mxl/MXL_wtp NR A") -mxl_wtp_case_b <- apollo_loadModel("Estimation_results/mxl/MXL_wtp Case B") -mxl_wtp_case_b_NR <- apollo_loadModel("Estimation_results/mxl/MXL_wtp NR B") -mxl_wtp_case_c <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_Case_C") -mxl_wtp_case_c_NR <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_NR_Case_C") - -# rent interactions models -mxl_wtp_case_a_rentINT <- apollo_loadModel("Estimation_results/mxl/MXL_wtp Case A Rent Int") -mxl_wtp_case_b_rentINT <- apollo_loadModel("Estimation_results/mxl/MXL_wtp Case B Rent Int") -mxl_wtp_case_c_rentINT <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_Case_C Rent INT") - -# rent interactions models NR -mxl_wtp_NR_case_a_rentINT <- apollo_loadModel("Estimation_results/mxl/MXL_wtp NR A Rent INT") -mxl_wtp_NR_case_c_rentINT <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_NR_Case_C RENT INT X") - -# Alternative case -case_d <- apollo_loadModel("Estimation_results/mxl/MXL_wtp Case D Rent Int") - -############################## - -# Model analysis -source("Scripts/visualize_models.R") - -source("Scripts/compare_split_samples.R") - -source("Scripts/create_tables.R") - -source("Scripts/interaction_plots_presi.R") - - -### Old models ### - - -# # without protesters -# mxl_wtp_case_a_prot <- apollo_loadModel("Estimation_results/mxl/without_protesters/MXL_wtp Case A prot") -# mxl_wtp_case_b_prot <- apollo_loadModel("Estimation_results/mxl/without_protesters/MXL_wtp Case B prot") +rm(list=ls()) +library(tidyverse) +library(tidylog) +library(apollo) +library(reshape2) +library(xtable) +library(stargazer) +library(texreg) +#test +# Set values for estimation in Apollo +n_draws <- 2000 +n_cores <- min(parallel::detectCores()-1, 25) + +# Load data +load("Data/database_full.RData") +load("Data/database.RData") + +# Data preparation +source("Scripts/data_prep.R") +source("Scripts/treatment.R") + +####### Estimate models ###### + +### Logit +source("Scripts/logit/chr_vol_treat.R") +source("Scripts/logit/protesters.R") + +### OLS +source("Scripts/ols/ols_time_spent.R") +source("Scripts/ols/ols_quiz.R") +source("Scripts/ols/ols_opt_out.R") +source("Scripts/ols/ols_nr.R") +source("Scripts/ols/ols_consequentiality.R") + +##### Conditional Logits ##### +#source("Scripts/clogit.R") +#source("Scripts/clogit_wtp.R") + +##### Mixed Logit Models ###### + +#source("Scripts/mxl/mxl_wtp_space.R") +#source("Scripts/mxl/mxl_wtp_space_4d.R") +#source("Scripts/mxl/mxl_wtp_space_interact_all.R") +#source("Scripts/mxl/mxl_socio_int.R") +#source("Scripts/mxl/mxl_treatment_time.R") +#source("Scripts/mxl/mxl_treatment_time_interaction.R") +#source("Scripts/mxl/mxl_treatment_time_Dummies.R") +#source("Scripts/mxl/mxl_wtp_space_interact_everything.R") +#source("Scripts/mxl/mxl_wtp_space_4d_interact_everything.R") +############################# + +##### Load models ############ + +mxl_wtp <- apollo_loadModel("Estimation_results/mxl/MXL_wtp") +mxl_wtp_4d <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_4d") +mxl_wtp_all_int <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_interact_all") +mxl_wtp_socio <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_socio_int") +mxl_wtp_tt <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_treatment_time") +mxl_wtp_tt_interaction <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_treatment_time_interaction") +mxl_wtp_time_groups <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_time_groups") +mxl_wtp_everything <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_interact_everything") +mxl_wtp_case_a <- apollo_loadModel("Estimation_results/mxl/MXL_wtp Case A") +mxl_wtp_case_a_NR <- apollo_loadModel("Estimation_results/mxl/MXL_wtp NR A") +mxl_wtp_case_b <- apollo_loadModel("Estimation_results/mxl/MXL_wtp Case B") +mxl_wtp_case_b_NR <- apollo_loadModel("Estimation_results/mxl/MXL_wtp NR B") +mxl_wtp_case_c <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_Case_C") +mxl_wtp_case_c_NR <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_NR_Case_C") + +# rent interactions models +mxl_wtp_case_a_rentINT <- apollo_loadModel("Estimation_results/mxl/MXL_wtp Case A Rent Int") +mxl_wtp_case_b_rentINT <- apollo_loadModel("Estimation_results/mxl/MXL_wtp Case B Rent Int") +mxl_wtp_case_c_rentINT <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_Case_C Rent INT") + +# rent interactions models NR +mxl_wtp_NR_case_a_rentINT <- apollo_loadModel("Estimation_results/mxl/MXL_wtp NR A Rent INT") +mxl_wtp_NR_case_c_rentINT <- apollo_loadModel("Estimation_results/mxl/MXL_wtp_NR_Case_C RENT INT X") + +# Alternative case +case_d <- apollo_loadModel("Estimation_results/mxl/MXL_wtp Case D Rent Int") + +############################## + +# Model analysis +source("Scripts/visualize_models.R") + +source("Scripts/compare_split_samples.R") + +source("Scripts/create_tables.R") + +source("Scripts/interaction_plots_presi.R") + + +### Old models ### + + +# # without protesters +# mxl_wtp_case_a_prot <- apollo_loadModel("Estimation_results/mxl/without_protesters/MXL_wtp Case A prot") +# mxl_wtp_case_b_prot <- apollo_loadModel("Estimation_results/mxl/without_protesters/MXL_wtp Case B prot") # mxl_wtp_case_c_prot <- apollo_loadModel("Estimation_results/mxl/without_protesters/MXL_wtp_Case_C prot") \ No newline at end of file diff --git a/Scripts/compare_split_samples.R b/Scripts/compare_split_samples.R index 9fbb6064d2547d33c12581360ba840e111a2fbaf..ab93b95f547579cd013578fbe609128faf50e4ab 100644 --- a/Scripts/compare_split_samples.R +++ b/Scripts/compare_split_samples.R @@ -1,203 +1,203 @@ -#### Load WTP models for different treatments ##### - -# Case A - -mxl_tr <- apollo_loadModel("Estimation_results/mxl/Split_samples/MXL_wtp Treated A") -mxl_vol_tr <- apollo_loadModel("Estimation_results/mxl/Split_samples/MXL_wtp Vol_Treated A") -mxl_not_tr <- apollo_loadModel("Estimation_results/mxl/Split_samples/MXL_wtp Not_Treated A") - - -mxl_info_compare <- as.data.frame(mxl_tr$estimate) -mxl_info_compare[2] <- as.data.frame(mxl_vol_tr$estimate) -mxl_info_compare[3] <- as.data.frame(mxl_not_tr$estimate) - -alpha = 0.1 -mxl_info_compare$margin_of_error1 <- qnorm(1-alpha)*mxl_tr$robse -mxl_info_compare$margin_of_error2 <- qnorm(1-alpha)*mxl_vol_tr$robse -mxl_info_compare$margin_of_error3 <- qnorm(1-alpha)*mxl_not_tr$robse - -mxl_info_compare <- rownames_to_column(mxl_info_compare, "Coefficent") -colnames(mxl_info_compare) <- c("Coefficent", "Estimate_TR", "Estimate_VOL_TR", "Estimate_NOT_TR", "Margin_of_error_TR", - "Margin_of_error_VOL_TR", "Margin_of_error_NOT_TR") - - -mxl_melt_info <- melt(mxl_info_compare[1:4], id = "Coefficent") -mxl_melt_info$ME <- mxl_info_compare$Margin_of_error_TR -mxl_melt_info$ME[9:16] <- mxl_info_compare$Margin_of_error_VOL_TR -mxl_melt_info$ME[17:24] <- mxl_info_compare$Margin_of_error_NOT_TR - - -# Figure paper compare info treatments # -ggplot(data=mxl_melt_info, aes(x=Coefficent, y=abs(value), fill=variable)) + - geom_bar(stat="identity", position='dodge', width = 0.9) + - geom_errorbar(aes(x=Coefficent, ymin=abs(value)-ME, ymax=abs(value)+ME), width=0.3, position=position_dodge(0.8)) + - ylab("Absolute Value") + - xlab("Coefficient") + - scale_x_discrete(guide = guide_axis(angle = 45)) + - scale_fill_brewer(palette = "Set2", labels = c("Treated", "Voluntary Treatment", "Not Treated"), name="Treatment") + - theme(legend.position = c(0.85, 0.8)) - -ggsave("Figures/compare_split_samplesA.png", dpi = "print", width = 7, height = 5) - -# Case A log rent - -mxl_tr <- apollo_loadModel("Estimation_results/mxl/Split_samples/MXL_wtp Treated A log") -mxl_vol_tr <- apollo_loadModel("Estimation_results/mxl/Split_samples/MXL_wtp Vol_Treated A log") -mxl_not_tr <- apollo_loadModel("Estimation_results/mxl/Split_samples/MXL_wtp Not_Treated A log") - - -mxl_info_compare <- as.data.frame(mxl_tr$estimate) -mxl_info_compare[2] <- as.data.frame(mxl_vol_tr$estimate) -mxl_info_compare[3] <- as.data.frame(mxl_not_tr$estimate) - -alpha = 0.05 -mxl_info_compare$margin_of_error1 <- qnorm(1-alpha)*mxl_tr$robse -mxl_info_compare$margin_of_error2 <- qnorm(1-alpha)*mxl_vol_tr$robse -mxl_info_compare$margin_of_error3 <- qnorm(1-alpha)*mxl_not_tr$robse - -mxl_info_compare <- rownames_to_column(mxl_info_compare, "Coefficent") -colnames(mxl_info_compare) <- c("Coefficent", "Estimate_TR", "Estimate_VOL_TR", "Estimate_NOT_TR", "Margin_of_error_TR", - "Margin_of_error_VOL_TR", "Margin_of_error_NOT_TR") - - -mxl_melt_info_log <- melt(mxl_info_compare[1:4], id = "Coefficent") -mxl_melt_info_log$ME <- mxl_info_compare$Margin_of_error_TR -mxl_melt_info_log$ME[8:14] <- mxl_info_compare$Margin_of_error_VOL_TR -mxl_melt_info_log$ME[15:21] <- mxl_info_compare$Margin_of_error_NOT_TR - -mxl_melt_info_log <- mxl_melt_info_log %>% mutate(ME = case_when(abs(value) < 1 ~ ME*100, TRUE~ME), - value = case_when(abs(value) < 1 ~ value*100, TRUE~value)) - -# Figure paper compare info treatments # -ggplot(data=mxl_melt_info_log, aes(x=Coefficent, y=abs(value), fill=variable)) + - geom_bar(stat="identity", position='dodge', width = 0.9) + - geom_errorbar(aes(x=Coefficent, ymin=abs(value)-ME, ymax=abs(value)+ME), width=0.3, position=position_dodge(0.8)) + - ylab("Absolute Value") + - xlab("Coefficient") + - scale_x_discrete(guide = guide_axis(angle = 45)) + - scale_fill_brewer(palette = "Set2", labels = c("Treated", "Voluntary Treatment", "Not Treated"), name="Treatment") + - theme(legend.position = c(0.85, 0.8)) - - - -### Compare clogit models prefspace - -clogit_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A TR") -clogit_vol_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A VOL TR") -clogit_not_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A NOT TR") - - -clogit_compare <- as.data.frame(clogit_tr$estimate) -clogit_compare[2] <- as.data.frame(clogit_vol_tr$estimate) -clogit_compare[3] <- as.data.frame(clogit_not_tr$estimate) - -alpha = 0.1 -clogit_compare$margin_of_error1 <- qnorm(1-alpha)*clogit_tr$robse -clogit_compare$margin_of_error2 <- qnorm(1-alpha)*clogit_vol_tr$robse -clogit_compare$margin_of_error3 <- qnorm(1-alpha)*clogit_not_tr$robse - -clogit_compare <- rownames_to_column(clogit_compare, "Coefficent") -colnames(clogit_compare) <- c("Coefficent", "Estimate_TR", "Estimate_VOL_TR", "Estimate_NOT_TR", "Margin_of_error_TR", - "Margin_of_error_VOL_TR", "Margin_of_error_NOT_TR") - - -clogit_melt <- melt(clogit_compare[1:4], id = "Coefficent") -clogit_melt$ME <-clogit_compare$Margin_of_error_TR -clogit_melt$ME[5:8] <- clogit_compare$Margin_of_error_VOL_TR -clogit_melt$ME[9:12] <- clogit_compare$Margin_of_error_NOT_TR - -# Scale up rent and wd coefficients for visibility -clogit_melt <- clogit_melt %>% mutate(ME = case_when(value <= 0.1 ~ ME*10, TRUE ~ ME), - value = case_when(value <= 0.1 ~ value*10, TRUE ~ value)) - -# Figure clogit prefspace -ggplot(data=clogit_melt, aes(x=Coefficent, y=abs(value), fill=variable)) + - geom_bar(stat="identity", position='dodge', width = 0.9) + - geom_errorbar(aes(x=Coefficent, ymin=abs(value)-ME, ymax=abs(value)+ME), width=0.3, position=position_dodge(0.8)) + - ylab("Absolute Value") + - xlab("Coefficient") + - scale_x_discrete(guide = guide_axis(angle = 45)) + - scale_fill_brewer(palette = "Set2", labels = c("Treated", "Voluntary Treatment", "Not Treated"), name="Treatment") + - theme(legend.position = c(0.85, 0.8)) - - -## Clogit WTP space - -clogit_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A TR wtp") -clogit_vol_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A VOL TR wtp") -clogit_not_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A NOT TR wtp") - - -clogit_compare <- as.data.frame(clogit_tr$estimate) -clogit_compare[2] <- as.data.frame(clogit_vol_tr$estimate) -clogit_compare[3] <- as.data.frame(clogit_not_tr$estimate) - -alpha = 0.1 -clogit_compare$margin_of_error1 <- qnorm(1-alpha)*clogit_tr$robse -clogit_compare$margin_of_error2 <- qnorm(1-alpha)*clogit_vol_tr$robse -clogit_compare$margin_of_error3 <- qnorm(1-alpha)*clogit_not_tr$robse - -clogit_compare <- rownames_to_column(clogit_compare, "Coefficent") -colnames(clogit_compare) <- c("Coefficent", "Estimate_TR", "Estimate_VOL_TR", "Estimate_NOT_TR", "Margin_of_error_TR", - "Margin_of_error_VOL_TR", "Margin_of_error_NOT_TR") - - -clogit_melt <- melt(clogit_compare[1:4], id = "Coefficent") -clogit_melt$ME <-clogit_compare$Margin_of_error_TR -clogit_melt$ME[5:8] <- clogit_compare$Margin_of_error_VOL_TR -clogit_melt$ME[9:12] <- clogit_compare$Margin_of_error_NOT_TR - -# Scale up rent and wd coefficients for visibility -clogit_melt <- clogit_melt %>% mutate(ME = case_when(abs(value) <= 0.5 ~ ME*1000, abs(value) <= 2 ~ ME*10, TRUE ~ ME), - value = case_when(abs(value) <= 0.5 ~ abs(value)*1000, value <= 2 ~ value*10, TRUE ~ value)) - -# Figure clogit prefspace -ggplot(data=clogit_melt, aes(x=Coefficent, y=abs(value), fill=variable)) + - geom_bar(stat="identity", position='dodge', width = 0.9) + - geom_errorbar(aes(x=Coefficent, ymin=abs(value)-ME, ymax=abs(value)+ME), width=0.3, position=position_dodge(0.8)) + - ylab("Absolute Value") + - xlab("Coefficient") + - scale_x_discrete(guide = guide_axis(angle = 45)) + - scale_fill_brewer(palette = "Set2", labels = c("Treated", "Voluntary Treatment", "Not Treated"), name="Treatment") + - theme(legend.position = c(0.85, 0.8)) - - -## Clogit WTP space log rent - -clogit_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A TR wtp log") -clogit_vol_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A VOL TR wtp log") -clogit_not_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A NOT TR wtp log") - - -clogit_compare <- as.data.frame(clogit_tr$estimate) -clogit_compare[2] <- as.data.frame(clogit_vol_tr$estimate) -clogit_compare[3] <- as.data.frame(clogit_not_tr$estimate) - -alpha = 0.1 -clogit_compare$margin_of_error1 <- qnorm(1-alpha)*clogit_tr$robse -clogit_compare$margin_of_error2 <- qnorm(1-alpha)*clogit_vol_tr$robse -clogit_compare$margin_of_error3 <- qnorm(1-alpha)*clogit_not_tr$robse - -clogit_compare <- rownames_to_column(clogit_compare, "Coefficent") -colnames(clogit_compare) <- c("Coefficent", "Estimate_TR", "Estimate_VOL_TR", "Estimate_NOT_TR", "Margin_of_error_TR", - "Margin_of_error_VOL_TR", "Margin_of_error_NOT_TR") - - -clogit_melt <- melt(clogit_compare[1:4], id = "Coefficent") -clogit_melt$ME <-clogit_compare$Margin_of_error_TR -clogit_melt$ME[5:8] <- clogit_compare$Margin_of_error_VOL_TR -clogit_melt$ME[9:12] <- clogit_compare$Margin_of_error_NOT_TR - -# Scale up rent and wd coefficients for visibility -clogit_melt <- clogit_melt %>% mutate(ME = case_when(Coefficent != "mu_rent" ~ ME*1000, TRUE ~ ME), - value = case_when(Coefficent != "mu_rent" ~ value*1000, TRUE ~ value)) - -# Figure clogit prefspace -ggplot(data=clogit_melt, aes(x=Coefficent, y=abs(value), fill=variable)) + - geom_bar(stat="identity", position='dodge', width = 0.9) + - geom_errorbar(aes(x=Coefficent, ymin=abs(value)-ME, ymax=abs(value)+ME), width=0.3, position=position_dodge(0.8)) + - ylab("Absolute Value") + - xlab("Coefficient") + - scale_x_discrete(guide = guide_axis(angle = 45)) + - scale_fill_brewer(palette = "Set2", labels = c("Treated", "Voluntary Treatment", "Not Treated"), name="Treatment") + - theme(legend.position = c(0.85, 0.8)) +#### Load WTP models for different treatments ##### + +# Case A + +mxl_tr <- apollo_loadModel("Estimation_results/mxl/Split_samples/MXL_wtp Treated A") +mxl_vol_tr <- apollo_loadModel("Estimation_results/mxl/Split_samples/MXL_wtp Vol_Treated A") +mxl_not_tr <- apollo_loadModel("Estimation_results/mxl/Split_samples/MXL_wtp Not_Treated A") + + +mxl_info_compare <- as.data.frame(mxl_tr$estimate) +mxl_info_compare[2] <- as.data.frame(mxl_vol_tr$estimate) +mxl_info_compare[3] <- as.data.frame(mxl_not_tr$estimate) + +alpha = 0.1 +mxl_info_compare$margin_of_error1 <- qnorm(1-alpha)*mxl_tr$robse +mxl_info_compare$margin_of_error2 <- qnorm(1-alpha)*mxl_vol_tr$robse +mxl_info_compare$margin_of_error3 <- qnorm(1-alpha)*mxl_not_tr$robse + +mxl_info_compare <- rownames_to_column(mxl_info_compare, "Coefficent") +colnames(mxl_info_compare) <- c("Coefficent", "Estimate_TR", "Estimate_VOL_TR", "Estimate_NOT_TR", "Margin_of_error_TR", + "Margin_of_error_VOL_TR", "Margin_of_error_NOT_TR") + + +mxl_melt_info <- melt(mxl_info_compare[1:4], id = "Coefficent") +mxl_melt_info$ME <- mxl_info_compare$Margin_of_error_TR +mxl_melt_info$ME[9:16] <- mxl_info_compare$Margin_of_error_VOL_TR +mxl_melt_info$ME[17:24] <- mxl_info_compare$Margin_of_error_NOT_TR + + +# Figure paper compare info treatments # +ggplot(data=mxl_melt_info, aes(x=Coefficent, y=abs(value), fill=variable)) + + geom_bar(stat="identity", position='dodge', width = 0.9) + + geom_errorbar(aes(x=Coefficent, ymin=abs(value)-ME, ymax=abs(value)+ME), width=0.3, position=position_dodge(0.8)) + + ylab("Absolute Value") + + xlab("Coefficient") + + scale_x_discrete(guide = guide_axis(angle = 45)) + + scale_fill_brewer(palette = "Set2", labels = c("Treated", "Voluntary Treatment", "Not Treated"), name="Treatment") + + theme(legend.position = c(0.85, 0.8)) + +ggsave("Figures/compare_split_samplesA.png", dpi = "print", width = 7, height = 5) + +# Case A log rent + +mxl_tr <- apollo_loadModel("Estimation_results/mxl/Split_samples/MXL_wtp Treated A log") +mxl_vol_tr <- apollo_loadModel("Estimation_results/mxl/Split_samples/MXL_wtp Vol_Treated A log") +mxl_not_tr <- apollo_loadModel("Estimation_results/mxl/Split_samples/MXL_wtp Not_Treated A log") + + +mxl_info_compare <- as.data.frame(mxl_tr$estimate) +mxl_info_compare[2] <- as.data.frame(mxl_vol_tr$estimate) +mxl_info_compare[3] <- as.data.frame(mxl_not_tr$estimate) + +alpha = 0.05 +mxl_info_compare$margin_of_error1 <- qnorm(1-alpha)*mxl_tr$robse +mxl_info_compare$margin_of_error2 <- qnorm(1-alpha)*mxl_vol_tr$robse +mxl_info_compare$margin_of_error3 <- qnorm(1-alpha)*mxl_not_tr$robse + +mxl_info_compare <- rownames_to_column(mxl_info_compare, "Coefficent") +colnames(mxl_info_compare) <- c("Coefficent", "Estimate_TR", "Estimate_VOL_TR", "Estimate_NOT_TR", "Margin_of_error_TR", + "Margin_of_error_VOL_TR", "Margin_of_error_NOT_TR") + + +mxl_melt_info_log <- melt(mxl_info_compare[1:4], id = "Coefficent") +mxl_melt_info_log$ME <- mxl_info_compare$Margin_of_error_TR +mxl_melt_info_log$ME[8:14] <- mxl_info_compare$Margin_of_error_VOL_TR +mxl_melt_info_log$ME[15:21] <- mxl_info_compare$Margin_of_error_NOT_TR + +mxl_melt_info_log <- mxl_melt_info_log %>% mutate(ME = case_when(abs(value) < 1 ~ ME*100, TRUE~ME), + value = case_when(abs(value) < 1 ~ value*100, TRUE~value)) + +# Figure paper compare info treatments # +ggplot(data=mxl_melt_info_log, aes(x=Coefficent, y=abs(value), fill=variable)) + + geom_bar(stat="identity", position='dodge', width = 0.9) + + geom_errorbar(aes(x=Coefficent, ymin=abs(value)-ME, ymax=abs(value)+ME), width=0.3, position=position_dodge(0.8)) + + ylab("Absolute Value") + + xlab("Coefficient") + + scale_x_discrete(guide = guide_axis(angle = 45)) + + scale_fill_brewer(palette = "Set2", labels = c("Treated", "Voluntary Treatment", "Not Treated"), name="Treatment") + + theme(legend.position = c(0.85, 0.8)) + + + +### Compare clogit models prefspace + +clogit_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A TR") +clogit_vol_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A VOL TR") +clogit_not_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A NOT TR") + + +clogit_compare <- as.data.frame(clogit_tr$estimate) +clogit_compare[2] <- as.data.frame(clogit_vol_tr$estimate) +clogit_compare[3] <- as.data.frame(clogit_not_tr$estimate) + +alpha = 0.1 +clogit_compare$margin_of_error1 <- qnorm(1-alpha)*clogit_tr$robse +clogit_compare$margin_of_error2 <- qnorm(1-alpha)*clogit_vol_tr$robse +clogit_compare$margin_of_error3 <- qnorm(1-alpha)*clogit_not_tr$robse + +clogit_compare <- rownames_to_column(clogit_compare, "Coefficent") +colnames(clogit_compare) <- c("Coefficent", "Estimate_TR", "Estimate_VOL_TR", "Estimate_NOT_TR", "Margin_of_error_TR", + "Margin_of_error_VOL_TR", "Margin_of_error_NOT_TR") + + +clogit_melt <- melt(clogit_compare[1:4], id = "Coefficent") +clogit_melt$ME <-clogit_compare$Margin_of_error_TR +clogit_melt$ME[5:8] <- clogit_compare$Margin_of_error_VOL_TR +clogit_melt$ME[9:12] <- clogit_compare$Margin_of_error_NOT_TR + +# Scale up rent and wd coefficients for visibility +clogit_melt <- clogit_melt %>% mutate(ME = case_when(value <= 0.1 ~ ME*10, TRUE ~ ME), + value = case_when(value <= 0.1 ~ value*10, TRUE ~ value)) + +# Figure clogit prefspace +ggplot(data=clogit_melt, aes(x=Coefficent, y=abs(value), fill=variable)) + + geom_bar(stat="identity", position='dodge', width = 0.9) + + geom_errorbar(aes(x=Coefficent, ymin=abs(value)-ME, ymax=abs(value)+ME), width=0.3, position=position_dodge(0.8)) + + ylab("Absolute Value") + + xlab("Coefficient") + + scale_x_discrete(guide = guide_axis(angle = 45)) + + scale_fill_brewer(palette = "Set2", labels = c("Treated", "Voluntary Treatment", "Not Treated"), name="Treatment") + + theme(legend.position = c(0.85, 0.8)) + + +## Clogit WTP space + +clogit_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A TR wtp") +clogit_vol_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A VOL TR wtp") +clogit_not_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A NOT TR wtp") + + +clogit_compare <- as.data.frame(clogit_tr$estimate) +clogit_compare[2] <- as.data.frame(clogit_vol_tr$estimate) +clogit_compare[3] <- as.data.frame(clogit_not_tr$estimate) + +alpha = 0.1 +clogit_compare$margin_of_error1 <- qnorm(1-alpha)*clogit_tr$robse +clogit_compare$margin_of_error2 <- qnorm(1-alpha)*clogit_vol_tr$robse +clogit_compare$margin_of_error3 <- qnorm(1-alpha)*clogit_not_tr$robse + +clogit_compare <- rownames_to_column(clogit_compare, "Coefficent") +colnames(clogit_compare) <- c("Coefficent", "Estimate_TR", "Estimate_VOL_TR", "Estimate_NOT_TR", "Margin_of_error_TR", + "Margin_of_error_VOL_TR", "Margin_of_error_NOT_TR") + + +clogit_melt <- melt(clogit_compare[1:4], id = "Coefficent") +clogit_melt$ME <-clogit_compare$Margin_of_error_TR +clogit_melt$ME[5:8] <- clogit_compare$Margin_of_error_VOL_TR +clogit_melt$ME[9:12] <- clogit_compare$Margin_of_error_NOT_TR + +# Scale up rent and wd coefficients for visibility +clogit_melt <- clogit_melt %>% mutate(ME = case_when(abs(value) <= 0.5 ~ ME*1000, abs(value) <= 2 ~ ME*10, TRUE ~ ME), + value = case_when(abs(value) <= 0.5 ~ abs(value)*1000, value <= 2 ~ value*10, TRUE ~ value)) + +# Figure clogit prefspace +ggplot(data=clogit_melt, aes(x=Coefficent, y=abs(value), fill=variable)) + + geom_bar(stat="identity", position='dodge', width = 0.9) + + geom_errorbar(aes(x=Coefficent, ymin=abs(value)-ME, ymax=abs(value)+ME), width=0.3, position=position_dodge(0.8)) + + ylab("Absolute Value") + + xlab("Coefficient") + + scale_x_discrete(guide = guide_axis(angle = 45)) + + scale_fill_brewer(palette = "Set2", labels = c("Treated", "Voluntary Treatment", "Not Treated"), name="Treatment") + + theme(legend.position = c(0.85, 0.8)) + + +## Clogit WTP space log rent + +clogit_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A TR wtp log") +clogit_vol_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A VOL TR wtp log") +clogit_not_tr <- apollo_loadModel("Estimation_results/clogit/Clogit case A NOT TR wtp log") + + +clogit_compare <- as.data.frame(clogit_tr$estimate) +clogit_compare[2] <- as.data.frame(clogit_vol_tr$estimate) +clogit_compare[3] <- as.data.frame(clogit_not_tr$estimate) + +alpha = 0.1 +clogit_compare$margin_of_error1 <- qnorm(1-alpha)*clogit_tr$robse +clogit_compare$margin_of_error2 <- qnorm(1-alpha)*clogit_vol_tr$robse +clogit_compare$margin_of_error3 <- qnorm(1-alpha)*clogit_not_tr$robse + +clogit_compare <- rownames_to_column(clogit_compare, "Coefficent") +colnames(clogit_compare) <- c("Coefficent", "Estimate_TR", "Estimate_VOL_TR", "Estimate_NOT_TR", "Margin_of_error_TR", + "Margin_of_error_VOL_TR", "Margin_of_error_NOT_TR") + + +clogit_melt <- melt(clogit_compare[1:4], id = "Coefficent") +clogit_melt$ME <-clogit_compare$Margin_of_error_TR +clogit_melt$ME[5:8] <- clogit_compare$Margin_of_error_VOL_TR +clogit_melt$ME[9:12] <- clogit_compare$Margin_of_error_NOT_TR + +# Scale up rent and wd coefficients for visibility +clogit_melt <- clogit_melt %>% mutate(ME = case_when(Coefficent != "mu_rent" ~ ME*1000, TRUE ~ ME), + value = case_when(Coefficent != "mu_rent" ~ value*1000, TRUE ~ value)) + +# Figure clogit prefspace +ggplot(data=clogit_melt, aes(x=Coefficent, y=abs(value), fill=variable)) + + geom_bar(stat="identity", position='dodge', width = 0.9) + + geom_errorbar(aes(x=Coefficent, ymin=abs(value)-ME, ymax=abs(value)+ME), width=0.3, position=position_dodge(0.8)) + + ylab("Absolute Value") + + xlab("Coefficient") + + scale_x_discrete(guide = guide_axis(angle = 45)) + + scale_fill_brewer(palette = "Set2", labels = c("Treated", "Voluntary Treatment", "Not Treated"), name="Treatment") + + theme(legend.position = c(0.85, 0.8)) diff --git a/Scripts/create_tables.R b/Scripts/create_tables.R index 5eab95439940dff541b51214feaf5789e2d7d4f9..9513164353755ef5a3feb9b76ecf8823810e7a90 100644 --- a/Scripts/create_tables.R +++ b/Scripts/create_tables.R @@ -1,166 +1,166 @@ -library(choiceTools) - -dir.create("Tables/mxl") -dir.create("Tables/logit") -dir.create("Tables/ols/") - - -list_ols <- list("(Intercept)" = "Intercept", "as.factor(Treatment_A)Treated" = "Treated", "as.factor(Treatment_A)Vol_Treated" = "Vol. Treated", - "as.factor(Treatment_C)No Info 2" = "No Info 2", "as.factor(Treatment_C)No Video 1" = "Text 1", - "as.factor(Treatment_C)No Video 2" = "Text 2", "as.factor(Treatment_C)Video 1" = "Video 1", - "as.factor(Treatment_C)Video 2" = "Video 2", "Z_Mean_NR" = "NR-Index", "as.factor(Gender)2" = "Female", - "Age_mean" = "Age", "QFIncome" = "Income", "Uni_degree" = "University Degree") - -# Manipulation check -texreg(l=list(ols_percentage_correct_A, ols_percentage_correct_control_A, ols_percentage_correct_C, ols_percentage_correct_control_C), - custom.model.names = c("Case A", "with Controls", "Case B", "with Controls"), - custom.header = list("Dependent Variable: Percentage of correct quiz statements" = 1:4), - custom.coef.map = list_ols, stars = c(0.01, 0.05, 0.1), float.pos="tb", - custom.note = "%stars. Standard errors in parentheses.", - label = "tab:mani", - caption = "Results of OLS on percentage of correct quiz statements.", - file="Tables/ols/manipulation.tex") - - -# Net interview time -texreg(l=list(ols_time_spent_A, ols_time_spent_control_A, ols_time_spent_C, ols_time_spent_control_C), - custom.model.names = c("Case A", "with Controls", "Case B", "with Controls"), - custom.header = list("Dependent variable: Net interview time" = 1:4), - custom.coef.map = list_ols, stars = c(0.01, 0.05, 0.1), float.pos="tb", - custom.note = "%stars. Standard errors in parentheses.", - label = "tab:net_int", - caption = "Results of OLS on net interview time.", - file="Tables/ols/interviewtime.tex") - - -# CC Time -texreg(l=list(ols_time_cc_A, ols_time_cc_control_A, ols_time_cc_C, ols_time_cc_control_C), - custom.model.names = c("Case A", "with Controls", "Case B", "with Controls"), - custom.header = list("Dependent variable: Mean choice card time" = 1:4), - custom.coef.map = list_ols, stars = c(0.01, 0.05, 0.1), float.pos="tb", - custom.note = "%stars. Standard errors in parentheses.", - label = "tab:cctime", - caption = "Results of OLS on mean choice card time.", - file="Tables/ols/cctime.tex") - -# Consequentiality -texreg(l=list(conseq_model_A, conseq_model_control_A, conseq_model_C, conseq_model_control_C), - custom.model.names = c("Case A", "with Controls", "Case B", "with Controls"), - custom.header = list("Dependent variable: Consequentiality score" = 1:4), - custom.coef.map = list_ols, stars = c(0.01, 0.05, 0.1), float.pos="tb", - custom.note = "%stars. Standard errors in parentheses.", - label = "tab:conseq", - caption = "Results of OLS on consequentiality score.", - file="Tables/ols/consequentiality.tex") - -# Opt Out -texreg(l=list(ols_opt_out_A, ols_opt_out_control_A, ols_opt_out_C, ols_opt_out_control_C), - custom.model.names = c("Case A", "with Controls", "Case B", "with Controls"), - custom.header = list("Dependent variable: Number of opt-out choices" = 1:4), - custom.coef.map = list_ols, stars = c(0.01, 0.05, 0.1), float.pos="tb", - custom.note = "%stars. Standard errors in parentheses.", - label = "tab:optout", - caption = "Results of OLS on number of opt-out choices.", - file="Tables/ols/optout.tex") - -# NR -texreg(l=list(nr_model_treat_A), - custom.model.names = c("OLS regression"), - custom.header = list("Dependent variable: NR-Index" = 1), - custom.coef.map = list("(Intercept)" = "Intercept", "as.factor(Treatment_A)Treated" = "Treated", "as.factor(Treatment_A)Vol_Treated" = "Vol. Treated", - "as.factor(Treatment_C)No Info 2" = "No Info 2", "as.factor(Treatment_C)No Video 1" = "Text 1", - "as.factor(Treatment_C)No Video 2" = "Text 2", "as.factor(Treatment_C)Video 1" = "Video 1", - "as.factor(Treatment_C)Video 2" = "Video 2", "Z_Mean_NR" = "NR-Index", "as.factor(Gender)2" = "Female", - "Age_mean" = "Age", "QFIncome" = "Income", "Uni_degree" = "University Degree", "Kids_Dummy" = "Children", - "Naturalness_SQ" = "Naturalness SQ", "WalkingDistance_SQ" = "Walking Distance SQ"), - stars = c(0.01, 0.05, 0.1), float.pos="tb", - custom.note = "%stars. Standard errors in parentheses.", - label = "tab:nr_ols", - caption = "Results of OLS on the NR-index.", - file="Tables/ols/nr_ols.tex") - -#### Logit ##### - -texreg(l=list(logit_choice_treat_uni), stars = c(0.01, 0.05, 0.1), float.pos="tb", - custom.model.names = c("Logit regression"), - custom.header = list("Dependent variable: Voluntary Information Access" = 1), - custom.coef.map = list_ols, custom.note = "%stars. Standard errors in parentheses.", - label = "tab:logit_vt", - caption = "Results of logit regression on the access of optional information.", - file="Tables/logit/chose_treatment.tex") - - -##### MXL ####### - -### Baseline case A -case_A <- quicktexregapollo(mxl_wtp_case_a_rentINT) - -coef_names <- case_A@coef.names -coef_names <- sub("^(mu_)(.*)(_T|_VT)$", "\\2\\3", coef_names) -coef_names[4] <- "mu_ASC_sq" -case_A@coef.names <- coef_names - -case_A_cols <- map(c("^mu_", "^sig_", "_T$", "_VT$"), subcoef, case_A) - -texreg(c(case_A_cols[1], remGOF(case_A_cols[2:4])), - custom.coef.map = list("natural" = "Naturalness", "walking" = "Walking Distance", "rent" = "Rent", - "ASC_sq" = "ASC SQ", "_natural" = "Naturalness", "nat" = "Naturalness", - "wd" = "Walking Distance", "asc" = "ASC SQ"), - custom.model.names = c("Mean", "SD", "Treated", "Voluntary Treated"), custom.note = "%stars. Robust standard errors in parentheses.", - stars = c(0.01, 0.05, 0.1), float.pos="tb", - label = "tab:mxl_A", - caption = "Results of mixed logit model with treatment interactions for Case A.", - file="Tables/mxl/case_A_rent_INT.tex") - -### Baseline case C -case_C <- quicktexregapollo(mxl_wtp_case_c_rentINT) - -coef_names <- case_C@coef.names -coef_names <- sub("^(mu_)(.*)(1|2|info)$", "\\2\\3", coef_names) -coef_names[4] <- "mu_ASC_sq" -case_C@coef.names <- coef_names - - -case_C_cols <- map(c("^mu_", "^sig_", "_vid1$", "_vid2$", "_nv1$", "_nv2$", "_no_info$"), subcoef, case_C) - -texreg(c(case_C_cols[1], remGOF(case_C_cols[2:7])), - custom.coef.map = list("natural" = "Naturalness", "walking" = "Walking Distance", "rent" = "Rent", - "ASC_sq" = "ASC SQ", "_natural" = "Naturalness", "nat" = "Naturalness", - "wd" = "Walking Distance", "asc" = "ASC SQ", - "ASC_sq_info" = "ASC SQ", "rent_info" = "Rent", "nat_info" = "Naturalness", "walking_info" = "Walking Distance"), - custom.model.names = c("Mean", "SD", "Video 1", "Video 2", "Text 1", "Text 2", "No Info"), custom.note = "%stars. Robust standard errors in parentheses.", - stars = c(0.01, 0.05, 0.1), float.pos="tb", - label = "tab:mxl_C", - caption = "Results of mixed logit model with treatment interactions for Case B.", - file="Tables/mxl/case_C_rent_INT.tex") - -### Rent NR model case C -case_C_NR <- quicktexregapollo(mxl_wtp_NR_case_c_rentINT) - -coef_names <- case_C_NR@coef.names -coef_names <- sub("^(mu_)(.*)(1|2|info|NR)$", "\\2\\3", coef_names) -coef_names[4] <- "mu_ASC_sq" -case_C_NR@coef.names <- coef_names - - -case_C_cols_NR <- map(c("^mu_", "^sig_", "_vid1$", "_vid2$", "_nv1$", "_nv2$", "_no_info$", "_NR$"), subcoef, case_C_NR) - -texreg(c(case_C_cols_NR[1], remGOF(case_C_cols_NR[2:8])), - custom.coef.map = list("natural" = "Naturalness", "walking" = "Walking Distance", "rent" = "Rent", - "ASC_sq" = "ASC SQ", "_natural" = "Naturalness", "nat" = "Naturalness", - "wd" = "Walking Distance", "asc" = "ASC SQ", - "ASC_sq_info" = "ASC SQ", "rent_info" = "Rent", "nat_info" = "Naturalness", "walking_info" = "Walking Distance"), - custom.model.names = c("Mean", "SD", "Video 1", "Video 2", "Text 1", "Text 2", "No Info", "NR"), custom.note = "%stars. Robust standard errors in parentheses.", - stars = c(0.01, 0.05, 0.1), float.pos="tb", - label = "tab:mxl_NR", - caption = "Results of mixed logit model with treatment and NR-index interactions for Case B.", - file="Tables/mxl/case_C_rent_INT_NR.tex") -# Main model -# texreg(l=list(mxl_wtp_case_a_rentINT), -# custom.coef.map = list("mu_natural" = "Naturalness", "mu_walking" = "Walking Distance", "mu_rent" = "Rent", -# "ASC_sq" = "ASC SQ", "sig_natural" = "Naturalness SD", "sig_walking" = "Walking Distance SD", -# "sig_rent" = "Rent SD", "sig_ASC_sq" = "ASC SD", -# "mu_nat_T" = "Naturalness X Treated", "mu_wd_T" = "Walking Distance X Treated", "mu_rent_T" = "Rent X Treated", -# "mu_asc_T" = "ASC X Treated", "mu_nat_VT" = "Naturalness X Vol. Treated", "mu_wd_VT" = "Walking Distance X Vol. Treated", -# "mu_rent_VT" = "Rent X Vol. Treated", "mu_asc_VT" = "ASC X Vol. Treated"), -# stars = c(0.01, 0.05, 0.1), override.se = mxl_wtp_case_a_rentINT$robse, file="Tables/mxl/case_A_rent_INT.tex") +library(choiceTools) + +dir.create("Tables/mxl") +dir.create("Tables/logit") +dir.create("Tables/ols/") + + +list_ols <- list("(Intercept)" = "Intercept", "as.factor(Treatment_A)Treated" = "Treated", "as.factor(Treatment_A)Vol_Treated" = "Vol. Treated", + "as.factor(Treatment_C)No Info 2" = "No Info 2", "as.factor(Treatment_C)No Video 1" = "Text 1", + "as.factor(Treatment_C)No Video 2" = "Text 2", "as.factor(Treatment_C)Video 1" = "Video 1", + "as.factor(Treatment_C)Video 2" = "Video 2", "Z_Mean_NR" = "NR-Index", "as.factor(Gender)2" = "Female", + "Age_mean" = "Age", "QFIncome" = "Income", "Uni_degree" = "University Degree") + +# Manipulation check +texreg(l=list(ols_percentage_correct_A, ols_percentage_correct_control_A, ols_percentage_correct_C, ols_percentage_correct_control_C), + custom.model.names = c("Case A", "with Controls", "Case B", "with Controls"), + custom.header = list("Dependent Variable: Percentage of correct quiz statements" = 1:4), + custom.coef.map = list_ols, stars = c(0.01, 0.05, 0.1), float.pos="tb", + custom.note = "%stars. Standard errors in parentheses.", + label = "tab:mani", + caption = "Results of OLS on percentage of correct quiz statements.", + file="Tables/ols/manipulation.tex") + + +# Net interview time +texreg(l=list(ols_time_spent_A, ols_time_spent_control_A, ols_time_spent_C, ols_time_spent_control_C), + custom.model.names = c("Case A", "with Controls", "Case B", "with Controls"), + custom.header = list("Dependent variable: Net interview time" = 1:4), + custom.coef.map = list_ols, stars = c(0.01, 0.05, 0.1), float.pos="tb", + custom.note = "%stars. Standard errors in parentheses.", + label = "tab:net_int", + caption = "Results of OLS on net interview time.", + file="Tables/ols/interviewtime.tex") + + +# CC Time +texreg(l=list(ols_time_cc_A, ols_time_cc_control_A, ols_time_cc_C, ols_time_cc_control_C), + custom.model.names = c("Case A", "with Controls", "Case B", "with Controls"), + custom.header = list("Dependent variable: Mean choice card time" = 1:4), + custom.coef.map = list_ols, stars = c(0.01, 0.05, 0.1), float.pos="tb", + custom.note = "%stars. Standard errors in parentheses.", + label = "tab:cctime", + caption = "Results of OLS on mean choice card time.", + file="Tables/ols/cctime.tex") + +# Consequentiality +texreg(l=list(conseq_model_A, conseq_model_control_A, conseq_model_C, conseq_model_control_C), + custom.model.names = c("Case A", "with Controls", "Case B", "with Controls"), + custom.header = list("Dependent variable: Consequentiality score" = 1:4), + custom.coef.map = list_ols, stars = c(0.01, 0.05, 0.1), float.pos="tb", + custom.note = "%stars. Standard errors in parentheses.", + label = "tab:conseq", + caption = "Results of OLS on consequentiality score.", + file="Tables/ols/consequentiality.tex") + +# Opt Out +texreg(l=list(ols_opt_out_A, ols_opt_out_control_A, ols_opt_out_C, ols_opt_out_control_C), + custom.model.names = c("Case A", "with Controls", "Case B", "with Controls"), + custom.header = list("Dependent variable: Number of opt-out choices" = 1:4), + custom.coef.map = list_ols, stars = c(0.01, 0.05, 0.1), float.pos="tb", + custom.note = "%stars. Standard errors in parentheses.", + label = "tab:optout", + caption = "Results of OLS on number of opt-out choices.", + file="Tables/ols/optout.tex") + +# NR +texreg(l=list(nr_model_treat_A), + custom.model.names = c("OLS regression"), + custom.header = list("Dependent variable: NR-Index" = 1), + custom.coef.map = list("(Intercept)" = "Intercept", "as.factor(Treatment_A)Treated" = "Treated", "as.factor(Treatment_A)Vol_Treated" = "Vol. Treated", + "as.factor(Treatment_C)No Info 2" = "No Info 2", "as.factor(Treatment_C)No Video 1" = "Text 1", + "as.factor(Treatment_C)No Video 2" = "Text 2", "as.factor(Treatment_C)Video 1" = "Video 1", + "as.factor(Treatment_C)Video 2" = "Video 2", "Z_Mean_NR" = "NR-Index", "as.factor(Gender)2" = "Female", + "Age_mean" = "Age", "QFIncome" = "Income", "Uni_degree" = "University Degree", "Kids_Dummy" = "Children", + "Naturalness_SQ" = "Naturalness SQ", "WalkingDistance_SQ" = "Walking Distance SQ"), + stars = c(0.01, 0.05, 0.1), float.pos="tb", + custom.note = "%stars. Standard errors in parentheses.", + label = "tab:nr_ols", + caption = "Results of OLS on the NR-index.", + file="Tables/ols/nr_ols.tex") + +#### Logit ##### + +texreg(l=list(logit_choice_treat_uni), stars = c(0.01, 0.05, 0.1), float.pos="tb", + custom.model.names = c("Logit regression"), + custom.header = list("Dependent variable: Voluntary Information Access" = 1), + custom.coef.map = list_ols, custom.note = "%stars. Standard errors in parentheses.", + label = "tab:logit_vt", + caption = "Results of logit regression on the access of optional information.", + file="Tables/logit/chose_treatment.tex") + + +##### MXL ####### + +### Baseline case A +case_A <- quicktexregapollo(mxl_wtp_case_a_rentINT) + +coef_names <- case_A@coef.names +coef_names <- sub("^(mu_)(.*)(_T|_VT)$", "\\2\\3", coef_names) +coef_names[4] <- "mu_ASC_sq" +case_A@coef.names <- coef_names + +case_A_cols <- map(c("^mu_", "^sig_", "_T$", "_VT$"), subcoef, case_A) + +texreg(c(case_A_cols[1], remGOF(case_A_cols[2:4])), + custom.coef.map = list("natural" = "Naturalness", "walking" = "Walking Distance", "rent" = "Rent", + "ASC_sq" = "ASC SQ", "_natural" = "Naturalness", "nat" = "Naturalness", + "wd" = "Walking Distance", "asc" = "ASC SQ"), + custom.model.names = c("Mean", "SD", "Treated", "Voluntary Treated"), custom.note = "%stars. Robust standard errors in parentheses.", + stars = c(0.01, 0.05, 0.1), float.pos="tb", + label = "tab:mxl_A", + caption = "Results of mixed logit model with treatment interactions for Case A.", + file="Tables/mxl/case_A_rent_INT.tex") + +### Baseline case C +case_C <- quicktexregapollo(mxl_wtp_case_c_rentINT) + +coef_names <- case_C@coef.names +coef_names <- sub("^(mu_)(.*)(1|2|info)$", "\\2\\3", coef_names) +coef_names[4] <- "mu_ASC_sq" +case_C@coef.names <- coef_names + + +case_C_cols <- map(c("^mu_", "^sig_", "_vid1$", "_vid2$", "_nv1$", "_nv2$", "_no_info$"), subcoef, case_C) + +texreg(c(case_C_cols[1], remGOF(case_C_cols[2:7])), + custom.coef.map = list("natural" = "Naturalness", "walking" = "Walking Distance", "rent" = "Rent", + "ASC_sq" = "ASC SQ", "_natural" = "Naturalness", "nat" = "Naturalness", + "wd" = "Walking Distance", "asc" = "ASC SQ", + "ASC_sq_info" = "ASC SQ", "rent_info" = "Rent", "nat_info" = "Naturalness", "walking_info" = "Walking Distance"), + custom.model.names = c("Mean", "SD", "Video 1", "Video 2", "Text 1", "Text 2", "No Info"), custom.note = "%stars. Robust standard errors in parentheses.", + stars = c(0.01, 0.05, 0.1), float.pos="tb", + label = "tab:mxl_C", + caption = "Results of mixed logit model with treatment interactions for Case B.", + file="Tables/mxl/case_C_rent_INT.tex") + +### Rent NR model case C +case_C_NR <- quicktexregapollo(mxl_wtp_NR_case_c_rentINT) + +coef_names <- case_C_NR@coef.names +coef_names <- sub("^(mu_)(.*)(1|2|info|NR)$", "\\2\\3", coef_names) +coef_names[4] <- "mu_ASC_sq" +case_C_NR@coef.names <- coef_names + + +case_C_cols_NR <- map(c("^mu_", "^sig_", "_vid1$", "_vid2$", "_nv1$", "_nv2$", "_no_info$", "_NR$"), subcoef, case_C_NR) + +texreg(c(case_C_cols_NR[1], remGOF(case_C_cols_NR[2:8])), + custom.coef.map = list("natural" = "Naturalness", "walking" = "Walking Distance", "rent" = "Rent", + "ASC_sq" = "ASC SQ", "_natural" = "Naturalness", "nat" = "Naturalness", + "wd" = "Walking Distance", "asc" = "ASC SQ", + "ASC_sq_info" = "ASC SQ", "rent_info" = "Rent", "nat_info" = "Naturalness", "walking_info" = "Walking Distance"), + custom.model.names = c("Mean", "SD", "Video 1", "Video 2", "Text 1", "Text 2", "No Info", "NR"), custom.note = "%stars. Robust standard errors in parentheses.", + stars = c(0.01, 0.05, 0.1), float.pos="tb", + label = "tab:mxl_NR", + caption = "Results of mixed logit model with treatment and NR-index interactions for Case B.", + file="Tables/mxl/case_C_rent_INT_NR.tex") +# Main model +# texreg(l=list(mxl_wtp_case_a_rentINT), +# custom.coef.map = list("mu_natural" = "Naturalness", "mu_walking" = "Walking Distance", "mu_rent" = "Rent", +# "ASC_sq" = "ASC SQ", "sig_natural" = "Naturalness SD", "sig_walking" = "Walking Distance SD", +# "sig_rent" = "Rent SD", "sig_ASC_sq" = "ASC SD", +# "mu_nat_T" = "Naturalness X Treated", "mu_wd_T" = "Walking Distance X Treated", "mu_rent_T" = "Rent X Treated", +# "mu_asc_T" = "ASC X Treated", "mu_nat_VT" = "Naturalness X Vol. Treated", "mu_wd_VT" = "Walking Distance X Vol. Treated", +# "mu_rent_VT" = "Rent X Vol. Treated", "mu_asc_VT" = "ASC X Vol. Treated"), +# stars = c(0.01, 0.05, 0.1), override.se = mxl_wtp_case_a_rentINT$robse, file="Tables/mxl/case_A_rent_INT.tex") diff --git a/Scripts/data_prep.R b/Scripts/data_prep.R index 6261dcaa760d8ce911a871eb9ad95e4bfa971b77..567faf4d92d9b7bab63e32ad6ee02d104108f3a9 100644 --- a/Scripts/data_prep.R +++ b/Scripts/data_prep.R @@ -1,126 +1,126 @@ -# Prepare variables that we want to use - -database_full <- database_full %>% rename(Gender = "Q03W123", Education = "Q06W123", HHSize = "Q41W123", - WorkingTime = "Q44W123", Birthyear = "Q01W123", Rent_net = "Q07W123", - Number_Kids = "Q42W123", Employment_type = "Q43W123", Conseq_UGS = "Q28W3", - Conseq_Money = "Q29W3") - - -database_full <- database_full %>% mutate(Gender = dplyr::recode(Gender, "A1" = 1, "A2" = 2, "A3"=3), - Education = dplyr::recode(Education, "A1" = 1, "A2" = 2, "A3"=3, "A4" = 4, "A5" = 5), - Employment_type = dplyr::recode(Employment_type, "A1" = 1, "A2" = 2, "A3"=3, "A4" = 4, - "A5" = 5, "A6" = 6), - Conseq_UGS = dplyr::recode(Conseq_UGS, "A1" = 5, "A2" = 4, "A3"=3, "A4" = 2, "A5" = 1, "A6" = NA_real_), - Conseq_Money = dplyr::recode(Conseq_Money, "A1" = 5, "A2" = 4, "A3"=3, "A4" = 2, "A5" = 1, "A6" = NA_real_)) - -database_full <- database_full %>% mutate(Gender_female = case_when(Gender == 2 ~1, TRUE~0), - Age = 2023-Birthyear, - Uni_degree = case_when(Education == 5 ~1, TRUE~0), - Kids_Dummy = case_when(Number_Kids > 0 ~ 1, TRUE ~0), - Employment_full = case_when(Employment_type == 1 ~ 1, TRUE~0), - Pensioner = case_when(Employment_type == 6 ~ 1, TRUE~0), - Age_mean = Age - mean(Age)) - -# Data cleaning - - - -database_full <- database_full %>% filter(Rent_SQ <= 10000 & Rent_SQ >=50) %>% - filter(WalkingDistance_SQ > 0 & WalkingDistance_SQ <= 300) %>% - filter(Gender!=3) - -database <- database %>% filter(Rent_SQ <= 10000 & Rent_SQ >=50) %>% - filter(WalkingDistance_SQ > 0 & WalkingDistance_SQ <= 300) - - -summary(database_full$interviewtime) - -database_full <- database_full %>% filter(interviewtime >= 300) # make change time to 10 seconds? - - -database_full <- database_full %>% - filter(!is.na(Treatment_new)) %>% - mutate(Treatment_A = case_when( - Treatment == 1 ~ "Treated", - Treatment == 2 ~ "Vol_Treated", - Treatment == 3 ~ "Not_Treated", - TRUE ~ NA_character_ - )) %>% - mutate(Treatment_B = case_when( - Treatment_new == 1 | Treatment_new == 2 | Treatment_new == 4 | Treatment_new == 5 ~ "Treated", - Treatment_new == 3 | Treatment_new == 6 ~ "Not_Treated" - )) %>% - mutate(Treatment_C = case_when( - Treatment_new == 1 ~ 'Video 1', - Treatment_new == 2 ~ 'No Video 1', - Treatment_new == 3 ~ 'No Info 2', - Treatment_new == 4 ~ 'No Video 2', - Treatment_new == 5 ~ 'Video 2', - Treatment_new == 6 ~ 'No Treatment 3', - TRUE ~ NA_character_ - )) - -id_list <- unique(database_full$id) - -# Do we sill want to use this? or only database full? -database <- database %>% filter(id %in% id_list) %>% filter(!is.na(Treatment_new)) -# Building NR Index - -for (i in 1:21) { - variable_name <- paste0("Q38S", sprintf("%02d", i), "W3") # Generate variable name - cat("Table for", variable_name, ":\n") - print(table(database_full[[variable_name]])) - cat("\n") - database_full[[variable_name]] <- as.numeric(factor(database_full[[variable_name]], levels = c("A1", "A2", "A3", "A4", "A5"))) - cat("Table for", variable_name, ":\n") - print(table(database_full[[variable_name]])) - cat("\n") -} - -variables_to_reverse <- c("Q38S02W3", "Q38S03W3", "Q38S10W3", "Q38S11W3", "Q38S13W3", "Q38S14W3", "Q38S15W3", "Q38S18W3") -for (variable_name in variables_to_reverse) { - cat("Table for", variable_name, ":\n") - - # Convert the variable to a factor with numerical levels and reverse the scores - database_full[[variable_name]] <- 6 - as.numeric(database_full[[variable_name]]) - - # Print the table - print(table(database_full[[variable_name]])) - cat("\n") -} -q38_variables <- grep("^Q38", names(database_full), value = TRUE) -database_full$Total_NR <- rowSums(database_full[q38_variables]) -hist(database_full$Total_NR) -database_full <- database_full %>% - mutate(Mean_NR=Total_NR/21) -mean_nr<-mean(database_full$Mean_NR, na.rm = TRUE) -sd_nr<-sd(database_full$Mean_NR, na.rm = TRUE) -database_full <- database_full %>% - mutate(Z_Mean_NR=(Mean_NR-mean_nr)/sd_nr) -database$Z_Mean_NR<- database_full$Z_Mean_NR -summary(database$Z_Mean_NR) - -#Self-Reference Index - -for (i in 8:10) { - variable_name <- paste0("TV", sprintf("%02d", i), "W3") # Generate variable name - cat("Table for", variable_name, ":\n") - print(table(database_full[[variable_name]])) - cat("\n") - database_full[[variable_name]] <- as.numeric(factor(database_full[[variable_name]], levels = c("A1", "A2", "A3", "A4", "A5"))) - cat("Table for", variable_name, ":\n") - print(table(database_full[[variable_name]])) - cat("\n") -} - - -database_full$Total_SR <- database_full$TV08W3+database_full$TV09W3+database_full$TV10W3 -hist(database_full$Total_SR) -database_full <- database_full %>% - mutate(Mean_SR=Total_SR/3) -mean_sr<-mean(database_full$Mean_SR, na.rm = TRUE) -sd_sr<-sd(database_full$Mean_SR, na.rm = TRUE) -database_full <- database_full %>% - mutate(Z_Mean_SR=(Mean_SR-mean_sr)/sd_sr) -database$Z_Mean_SR<- database_full$Z_Mean_SR -summary(database$Z_Mean_SR) +# Prepare variables that we want to use + +database_full <- database_full %>% rename(Gender = "Q03W123", Education = "Q06W123", HHSize = "Q41W123", + WorkingTime = "Q44W123", Birthyear = "Q01W123", Rent_net = "Q07W123", + Number_Kids = "Q42W123", Employment_type = "Q43W123", Conseq_UGS = "Q28W3", + Conseq_Money = "Q29W3") + + +database_full <- database_full %>% mutate(Gender = dplyr::recode(Gender, "A1" = 1, "A2" = 2, "A3"=3), + Education = dplyr::recode(Education, "A1" = 1, "A2" = 2, "A3"=3, "A4" = 4, "A5" = 5), + Employment_type = dplyr::recode(Employment_type, "A1" = 1, "A2" = 2, "A3"=3, "A4" = 4, + "A5" = 5, "A6" = 6), + Conseq_UGS = dplyr::recode(Conseq_UGS, "A1" = 5, "A2" = 4, "A3"=3, "A4" = 2, "A5" = 1, "A6" = NA_real_), + Conseq_Money = dplyr::recode(Conseq_Money, "A1" = 5, "A2" = 4, "A3"=3, "A4" = 2, "A5" = 1, "A6" = NA_real_)) + +database_full <- database_full %>% mutate(Gender_female = case_when(Gender == 2 ~1, TRUE~0), + Age = 2023-Birthyear, + Uni_degree = case_when(Education == 5 ~1, TRUE~0), + Kids_Dummy = case_when(Number_Kids > 0 ~ 1, TRUE ~0), + Employment_full = case_when(Employment_type == 1 ~ 1, TRUE~0), + Pensioner = case_when(Employment_type == 6 ~ 1, TRUE~0), + Age_mean = Age - mean(Age)) + +# Data cleaning + + + +database_full <- database_full %>% filter(Rent_SQ <= 10000 & Rent_SQ >=50) %>% + filter(WalkingDistance_SQ > 0 & WalkingDistance_SQ <= 300) %>% + filter(Gender!=3) + +database <- database %>% filter(Rent_SQ <= 10000 & Rent_SQ >=50) %>% + filter(WalkingDistance_SQ > 0 & WalkingDistance_SQ <= 300) + + +summary(database_full$interviewtime) + +database_full <- database_full %>% filter(interviewtime >= 300) # make change time to 10 seconds? + + +database_full <- database_full %>% + filter(!is.na(Treatment_new)) %>% + mutate(Treatment_A = case_when( + Treatment == 1 ~ "Treated", + Treatment == 2 ~ "Vol_Treated", + Treatment == 3 ~ "Not_Treated", + TRUE ~ NA_character_ + )) %>% + mutate(Treatment_B = case_when( + Treatment_new == 1 | Treatment_new == 2 | Treatment_new == 4 | Treatment_new == 5 ~ "Treated", + Treatment_new == 3 | Treatment_new == 6 ~ "Not_Treated" + )) %>% + mutate(Treatment_C = case_when( + Treatment_new == 1 ~ 'Video 1', + Treatment_new == 2 ~ 'No Video 1', + Treatment_new == 3 ~ 'No Info 2', + Treatment_new == 4 ~ 'No Video 2', + Treatment_new == 5 ~ 'Video 2', + Treatment_new == 6 ~ 'No Treatment 3', + TRUE ~ NA_character_ + )) + +id_list <- unique(database_full$id) + +# Do we sill want to use this? or only database full? +database <- database %>% filter(id %in% id_list) %>% filter(!is.na(Treatment_new)) +# Building NR Index + +for (i in 1:21) { + variable_name <- paste0("Q38S", sprintf("%02d", i), "W3") # Generate variable name + cat("Table for", variable_name, ":\n") + print(table(database_full[[variable_name]])) + cat("\n") + database_full[[variable_name]] <- as.numeric(factor(database_full[[variable_name]], levels = c("A1", "A2", "A3", "A4", "A5"))) + cat("Table for", variable_name, ":\n") + print(table(database_full[[variable_name]])) + cat("\n") +} + +variables_to_reverse <- c("Q38S02W3", "Q38S03W3", "Q38S10W3", "Q38S11W3", "Q38S13W3", "Q38S14W3", "Q38S15W3", "Q38S18W3") +for (variable_name in variables_to_reverse) { + cat("Table for", variable_name, ":\n") + + # Convert the variable to a factor with numerical levels and reverse the scores + database_full[[variable_name]] <- 6 - as.numeric(database_full[[variable_name]]) + + # Print the table + print(table(database_full[[variable_name]])) + cat("\n") +} +q38_variables <- grep("^Q38", names(database_full), value = TRUE) +database_full$Total_NR <- rowSums(database_full[q38_variables]) +hist(database_full$Total_NR) +database_full <- database_full %>% + mutate(Mean_NR=Total_NR/21) +mean_nr<-mean(database_full$Mean_NR, na.rm = TRUE) +sd_nr<-sd(database_full$Mean_NR, na.rm = TRUE) +database_full <- database_full %>% + mutate(Z_Mean_NR=(Mean_NR-mean_nr)/sd_nr) +database$Z_Mean_NR<- database_full$Z_Mean_NR +summary(database$Z_Mean_NR) + +#Self-Reference Index + +for (i in 8:10) { + variable_name <- paste0("TV", sprintf("%02d", i), "W3") # Generate variable name + cat("Table for", variable_name, ":\n") + print(table(database_full[[variable_name]])) + cat("\n") + database_full[[variable_name]] <- as.numeric(factor(database_full[[variable_name]], levels = c("A1", "A2", "A3", "A4", "A5"))) + cat("Table for", variable_name, ":\n") + print(table(database_full[[variable_name]])) + cat("\n") +} + + +database_full$Total_SR <- database_full$TV08W3+database_full$TV09W3+database_full$TV10W3 +hist(database_full$Total_SR) +database_full <- database_full %>% + mutate(Mean_SR=Total_SR/3) +mean_sr<-mean(database_full$Mean_SR, na.rm = TRUE) +sd_sr<-sd(database_full$Mean_SR, na.rm = TRUE) +database_full <- database_full %>% + mutate(Z_Mean_SR=(Mean_SR-mean_sr)/sd_sr) +database$Z_Mean_SR<- database_full$Z_Mean_SR +summary(database$Z_Mean_SR) diff --git a/Scripts/interaction_plots_presi.R b/Scripts/interaction_plots_presi.R index 4fde3d444f95a33b91aa9ba45e5bfbd8a29ea973..0ab7a740a6d05476f9cb0b34fc2db2a8861a3045 100644 --- a/Scripts/interaction_plots_presi.R +++ b/Scripts/interaction_plots_presi.R @@ -1,58 +1,58 @@ -# Create Interaction Term Plot for Presentation - - -#### -create_interaction_term_plot <- function(ols_summary, treatment_labels, ord, unit, down, up) { - alpha <- 0.1 - z_value <- qnorm(1 - alpha / 2) - - plot_data <- summary(ols_summary) - plot_data <- as.data.frame(plot_data$coefficients) - plot_data$ME <- z_value * plot_data$`Std. Error` - plot_data <- rownames_to_column(plot_data, "Coefficient") - - plot_data <- plot_data %>% filter(str_detect(Coefficient, "Treatment")) - - plot_data$Coefficient <- treatment_labels - - plot <- ggplot(data = plot_data) + - geom_bar(aes(x = factor(Coefficient, levels=c(ord)), y = Estimate, fill = Coefficient), stat = "identity", position = 'dodge', width = 0.5, alpha = 0.7) + - geom_errorbar(aes(x = Coefficient, ymin = Estimate - ME, ymax = Estimate + ME), width = 0.3, position = position_dodge(0.8)) + - scale_x_discrete(guide = guide_axis(angle = 0)) + - guides(fill = "none") + - coord_cartesian(ylim=c(down, up)) + - xlab("Treatment Group") + - ylab(paste0(unit)) - - return(plot) -} - - -case_A_labels <- c("Treated", "Voluntary Treated") -case_C_labels <- c("No Info 2", "Text 1", "Text 2", "Video 1", "Video 2") -case_C_labels_re <- c("Text 1", "Text 2", "Video 1", "Video 2", "No Info 2") - -plot_interview_A <- create_interaction_term_plot(ols_time_spent_control_A, case_A_labels, case_A_labels, - "Interview Time in Seconds", -250, 380) -plot_interview_C <- create_interaction_term_plot(ols_time_spent_control_C, case_C_labels, case_C_labels_re, - "Interview Time in Seconds", -250, 380) - -plot_cc_A <- create_interaction_term_plot(ols_time_cc_control_A, case_A_labels, case_A_labels, - "Mean Choice Card Time in Seconds", -5, 5) -plot_cc_C <- create_interaction_term_plot(ols_time_cc_control_C, case_C_labels, case_C_labels_re, - "Mean Choice Card Time in Seconds", -5, 5) - -plot_mani_A <- create_interaction_term_plot(ols_percentage_correct_control_A, case_A_labels, case_A_labels, - "Percentage of Correct Quiz Statements", -5, 15) -plot_mani_C <- create_interaction_term_plot(ols_percentage_correct_control_C, case_C_labels, case_C_labels_re, - "Percentage of Correct Quiz Statements", -5, 15) - -plot_cons_A <- create_interaction_term_plot(conseq_model_control_A, case_A_labels, case_A_labels, - "Consequentiality Score", -0.5, 0.8) -plot_cons_C <- create_interaction_term_plot(conseq_model_control_C, case_C_labels, case_C_labels_re, - "Consequentiality Score", -0.5, 0.8) - -plot_opt_A <- create_interaction_term_plot(ols_opt_out_control_A, case_A_labels, case_A_labels, - "Number of Opt-out Choices", -1.5, 1) -plot_opt_C <- create_interaction_term_plot(ols_opt_out_control_C, case_C_labels, case_C_labels_re, - "Number of Opt-out Choices", -1.5, 1) +# Create Interaction Term Plot for Presentation + + +#### +create_interaction_term_plot <- function(ols_summary, treatment_labels, ord, unit, down, up) { + alpha <- 0.1 + z_value <- qnorm(1 - alpha / 2) + + plot_data <- summary(ols_summary) + plot_data <- as.data.frame(plot_data$coefficients) + plot_data$ME <- z_value * plot_data$`Std. Error` + plot_data <- rownames_to_column(plot_data, "Coefficient") + + plot_data <- plot_data %>% filter(str_detect(Coefficient, "Treatment")) + + plot_data$Coefficient <- treatment_labels + + plot <- ggplot(data = plot_data) + + geom_bar(aes(x = factor(Coefficient, levels=c(ord)), y = Estimate, fill = Coefficient), stat = "identity", position = 'dodge', width = 0.5, alpha = 0.7) + + geom_errorbar(aes(x = Coefficient, ymin = Estimate - ME, ymax = Estimate + ME), width = 0.3, position = position_dodge(0.8)) + + scale_x_discrete(guide = guide_axis(angle = 0)) + + guides(fill = "none") + + coord_cartesian(ylim=c(down, up)) + + xlab("Treatment Group") + + ylab(paste0(unit)) + + return(plot) +} + + +case_A_labels <- c("Treated", "Voluntary Treated") +case_C_labels <- c("No Info 2", "Text 1", "Text 2", "Video 1", "Video 2") +case_C_labels_re <- c("Text 1", "Text 2", "Video 1", "Video 2", "No Info 2") + +plot_interview_A <- create_interaction_term_plot(ols_time_spent_control_A, case_A_labels, case_A_labels, + "Interview Time in Seconds", -250, 380) +plot_interview_C <- create_interaction_term_plot(ols_time_spent_control_C, case_C_labels, case_C_labels_re, + "Interview Time in Seconds", -250, 380) + +plot_cc_A <- create_interaction_term_plot(ols_time_cc_control_A, case_A_labels, case_A_labels, + "Mean Choice Card Time in Seconds", -5, 5) +plot_cc_C <- create_interaction_term_plot(ols_time_cc_control_C, case_C_labels, case_C_labels_re, + "Mean Choice Card Time in Seconds", -5, 5) + +plot_mani_A <- create_interaction_term_plot(ols_percentage_correct_control_A, case_A_labels, case_A_labels, + "Percentage of Correct Quiz Statements", -5, 15) +plot_mani_C <- create_interaction_term_plot(ols_percentage_correct_control_C, case_C_labels, case_C_labels_re, + "Percentage of Correct Quiz Statements", -5, 15) + +plot_cons_A <- create_interaction_term_plot(conseq_model_control_A, case_A_labels, case_A_labels, + "Consequentiality Score", -0.5, 0.8) +plot_cons_C <- create_interaction_term_plot(conseq_model_control_C, case_C_labels, case_C_labels_re, + "Consequentiality Score", -0.5, 0.8) + +plot_opt_A <- create_interaction_term_plot(ols_opt_out_control_A, case_A_labels, case_A_labels, + "Number of Opt-out Choices", -1.5, 1) +plot_opt_C <- create_interaction_term_plot(ols_opt_out_control_C, case_C_labels, case_C_labels_re, + "Number of Opt-out Choices", -1.5, 1) diff --git a/Scripts/mxl/mxl_wtp_space_NR_caseC_RentINT_X.R b/Scripts/mxl/mxl_wtp_space_NR_caseC_RentINT_X.R index f659244af4f120bb0f57b2c9a4e5bed19b7d1ec4..f2104869009e20118f8f8b65569a740c6e54bbda 100644 --- a/Scripts/mxl/mxl_wtp_space_NR_caseC_RentINT_X.R +++ b/Scripts/mxl/mxl_wtp_space_NR_caseC_RentINT_X.R @@ -1,194 +1,194 @@ -#### Apollo standard script ##### - -library(apollo) # Load apollo package - - - -# Test treatment effect - -database <- database_full %>% - filter(!is.na(Treatment_new)) %>% - mutate(Dummy_Video_1 = case_when(Treatment_new == 1 ~ 1, TRUE ~ 0), - Dummy_Video_2 = case_when(Treatment_new == 5 ~ 1, TRUE ~ 0), - Dummy_no_info = case_when(Treatment_new == 3 ~ 1, TRUE~0), - Dummy_Info_nv1 = case_when(Treatment_new == 2 ~1, TRUE~0), - Dummy_Info_nv2 = case_when(Treatment_new == 4 ~1 , TRUE~0)) - -#initialize model - -apollo_initialise() - - -### Set core controls -apollo_control = list( - modelName = "MXL_wtp_NR_Case_C Rent INT X", - modelDescr = "MXL wtp space NR Case C Rent INT X", - indivID ="id", - mixing = TRUE, - HB= FALSE, - nCores = n_cores, - outputDirectory = "Estimation_results/mxl" -) - -##### Define model parameters depending on your attributes and model specification! #### -# set values to 0 for conditional logit model - -apollo_beta=c(mu_natural = 15, - mu_walking = -1, - mu_rent = -2, - ASC_sq = 0, - mu_rent_NR = 0, - mu_nat_NR = 0, - mu_wd_NR = 0, - mu_asc_NR = 0, - mu_ASC_sq_vid1 = 0, - mu_ASC_sq_vid2 = 0, - mu_ASC_sq_no_info = 0, - mu_ASC_sq_info_nv1 = 0, - mu_ASC_sq_info_nv2 = 0, - mu_rent_vid1 = 0, - mu_rent_vid2 = 0, - mu_rent_no_info = 0, - mu_rent_info_nv1 = 0, - mu_rent_info_nv2 = 0, - mu_nat_vid1 =0, - mu_nat_vid2 = 0, - mu_nat_no_info = 0, - mu_nat_info_nv1 = 0, - mu_nat_info_nv2 = 0, - mu_walking_vid1 =0, - mu_walking_vid2 = 0, - mu_walking_no_info = 0, - mu_walking_info_nv1 = 0, - mu_walking_info_nv2 = 0, - sig_natural = 15, - sig_walking = 2, - sig_rent = 2, - sig_ASC_sq = 2) - -### specify parameters that should be kept fixed, here = none -apollo_fixed = c() - -### Set parameters for generating draws, use 2000 sobol draws -apollo_draws = list( - interDrawsType = "sobol", - interNDraws = n_draws, - interUnifDraws = c(), - interNormDraws = c("draws_natural", "draws_walking", "draws_rent", "draws_asc"), - intraDrawsType = "halton", - intraNDraws = 0, - intraUnifDraws = c(), - intraNormDraws = c() -) - -### Create random parameters, define distribution of the parameters -apollo_randCoeff = function(apollo_beta, apollo_inputs){ - randcoeff = list() - - randcoeff[["b_mu_natural"]] = mu_natural + sig_natural * draws_natural - randcoeff[["b_mu_walking"]] = mu_walking + sig_walking * draws_walking - randcoeff[["b_mu_rent"]] = -exp(mu_rent + sig_rent * draws_rent) - randcoeff[["b_ASC_sq"]] = ASC_sq + sig_ASC_sq * draws_asc - - return(randcoeff) -} - - -### validate -apollo_inputs = apollo_validateInputs() -apollo_probabilities=function(apollo_beta, apollo_inputs, functionality="estimate"){ - - ### Function initialisation: do not change the following three commands - ### Attach inputs and detach after function exit - apollo_attach(apollo_beta, apollo_inputs) - on.exit(apollo_detach(apollo_beta, apollo_inputs)) - - ### Create list of probabilities P - P = list() - - #### List of utilities (later integrated in mnl_settings below) #### - # Define utility functions here: - - V = list() - V[['alt1']] = -(b_mu_rent + mu_rent_vid1 *Dummy_Video_1 + mu_rent_vid2 * Dummy_Video_2 + mu_rent_no_info * Dummy_no_info + - mu_rent_info_nv1 * Dummy_Info_nv1 + mu_rent_info_nv2 * Dummy_Info_nv2 + mu_rent_NR * Z_Mean_NR) * - (b_mu_natural * Naturalness_1 + b_mu_walking * WalkingDistance_1 + - mu_nat_NR * Naturalness_1 * Z_Mean_NR + mu_wd_NR * WalkingDistance_1 * Z_Mean_NR + - mu_nat_vid1 * Naturalness_1 *Dummy_Video_1 + mu_nat_no_info * Naturalness_1 * Dummy_no_info - + mu_nat_info_nv1 * Naturalness_1 *Dummy_Info_nv1 + mu_nat_vid2 * Naturalness_1 * Dummy_Video_2 - + mu_nat_info_nv2 * Naturalness_1 *Dummy_Info_nv2 + - mu_walking_vid1 * WalkingDistance_1 *Dummy_Video_1 + mu_walking_no_info * WalkingDistance_1 * Dummy_no_info - + mu_walking_info_nv1 * WalkingDistance_1 *Dummy_Info_nv1 + mu_walking_vid2 * WalkingDistance_1 * Dummy_Video_2 - + mu_walking_info_nv2 * WalkingDistance_1 *Dummy_Info_nv2- Rent_1) - - V[['alt2']] = -(b_mu_rent + mu_rent_vid1 *Dummy_Video_1 + mu_rent_vid2 * Dummy_Video_2 + mu_rent_no_info * Dummy_no_info + - mu_rent_info_nv1 * Dummy_Info_nv1 + mu_rent_info_nv2 * Dummy_Info_nv2 + mu_rent_NR * Z_Mean_NR)* - (b_mu_natural * Naturalness_2 + b_mu_walking * WalkingDistance_2 + - mu_nat_NR * Naturalness_2 * Z_Mean_NR + mu_wd_NR * WalkingDistance_2 * Z_Mean_NR + - mu_nat_vid1 * Naturalness_2 *Dummy_Video_1 + mu_nat_no_info * Naturalness_2 * Dummy_no_info - + mu_nat_info_nv1 * Naturalness_2 *Dummy_Info_nv1 + mu_nat_vid2 * Naturalness_2 * Dummy_Video_2 - + mu_nat_info_nv2 * Naturalness_2 *Dummy_Info_nv2+ - mu_walking_vid1 * WalkingDistance_2 *Dummy_Video_1 + mu_walking_no_info * WalkingDistance_2 * Dummy_no_info - + mu_walking_info_nv1 * WalkingDistance_2 *Dummy_Info_nv1 + mu_walking_vid2 * WalkingDistance_2 * Dummy_Video_2 - + mu_walking_info_nv2 * WalkingDistance_2 *Dummy_Info_nv2 - Rent_2) - - V[['alt3']] = -(b_mu_rent + mu_rent_vid1 *Dummy_Video_1 + mu_rent_vid2 * Dummy_Video_2 + mu_rent_no_info * Dummy_no_info + - mu_rent_info_nv1 * Dummy_Info_nv1 + mu_rent_info_nv2 * Dummy_Info_nv2 + mu_rent_NR * Z_Mean_NR) * - (b_ASC_sq + b_mu_natural * Naturalness_3 + b_mu_walking * WalkingDistance_3 + - mu_asc_NR * Z_Mean_NR + mu_nat_NR * Naturalness_3 * Z_Mean_NR + - mu_wd_NR * WalkingDistance_3 * Z_Mean_NR + - mu_nat_vid1 * Naturalness_3 *Dummy_Video_1 + mu_nat_no_info * Naturalness_3 * Dummy_no_info - + mu_nat_info_nv1 * Naturalness_3 *Dummy_Info_nv1 + mu_nat_vid2 * Naturalness_3 * Dummy_Video_2 - + mu_nat_info_nv2 * Naturalness_3 *Dummy_Info_nv2+ - mu_walking_vid1 * WalkingDistance_3 *Dummy_Video_1 + mu_walking_no_info * WalkingDistance_3 * Dummy_no_info - + mu_walking_info_nv1 * WalkingDistance_3 *Dummy_Info_nv1 + mu_walking_vid2 * WalkingDistance_3 * Dummy_Video_2 - + mu_walking_info_nv2 * WalkingDistance_3 *Dummy_Info_nv2 - + mu_ASC_sq_vid1 * Dummy_Video_1 + mu_ASC_sq_vid2 * Dummy_Video_2 - + mu_ASC_sq_no_info * Dummy_no_info + mu_ASC_sq_info_nv1 * Dummy_Info_nv1 - + mu_ASC_sq_info_nv2 * Dummy_Info_nv2 - Rent_3) - - - ### Define settings for MNL model component - mnl_settings = list( - alternatives = c(alt1=1, alt2=2, alt3=3), - avail = 1, # all alternatives are available in every choice - choiceVar = choice, - V = V#, # tell function to use list vector defined above - - ) - - ### Compute probabilities using MNL model - P[['model']] = apollo_mnl(mnl_settings, functionality) - - ### Take product across observation for same individual - P = apollo_panelProd(P, apollo_inputs, functionality) - - ### Average across inter-individual draws - nur bei Mixed Logit! - P = apollo_avgInterDraws(P, apollo_inputs, functionality) - - ### Prepare and return outputs of function - P = apollo_prepareProb(P, apollo_inputs, functionality) - return(P) -} - - - -# ################################################################# # -#### MODEL ESTIMATION ## -# ################################################################# # -# estimate model with bfgs algorithm - -mxl_wtp_NR_case_c_rentINTX = apollo_estimate(apollo_beta, apollo_fixed, - apollo_probabilities, apollo_inputs, - estimate_settings=list(maxIterations=400, - estimationRoutine="bfgs", - hessianRoutine="analytic")) - - - -# ################################################################# # -#### MODEL OUTPUTS ## -# ################################################################# # -apollo_saveOutput(mxl_wtp_NR_case_c_rentINTX) - - +#### Apollo standard script ##### + +library(apollo) # Load apollo package + + + +# Test treatment effect + +database <- database_full %>% + filter(!is.na(Treatment_new)) %>% + mutate(Dummy_Video_1 = case_when(Treatment_new == 1 ~ 1, TRUE ~ 0), + Dummy_Video_2 = case_when(Treatment_new == 5 ~ 1, TRUE ~ 0), + Dummy_no_info = case_when(Treatment_new == 3 ~ 1, TRUE~0), + Dummy_Info_nv1 = case_when(Treatment_new == 2 ~1, TRUE~0), + Dummy_Info_nv2 = case_when(Treatment_new == 4 ~1 , TRUE~0)) + +#initialize model + +apollo_initialise() + + +### Set core controls +apollo_control = list( + modelName = "MXL_wtp_NR_Case_C Rent INT X", + modelDescr = "MXL wtp space NR Case C Rent INT X", + indivID ="id", + mixing = TRUE, + HB= FALSE, + nCores = n_cores, + outputDirectory = "Estimation_results/mxl" +) + +##### Define model parameters depending on your attributes and model specification! #### +# set values to 0 for conditional logit model + +apollo_beta=c(mu_natural = 15, + mu_walking = -1, + mu_rent = -2, + ASC_sq = 0, + mu_rent_NR = 0, + mu_nat_NR = 0, + mu_wd_NR = 0, + mu_asc_NR = 0, + mu_ASC_sq_vid1 = 0, + mu_ASC_sq_vid2 = 0, + mu_ASC_sq_no_info = 0, + mu_ASC_sq_info_nv1 = 0, + mu_ASC_sq_info_nv2 = 0, + mu_rent_vid1 = 0, + mu_rent_vid2 = 0, + mu_rent_no_info = 0, + mu_rent_info_nv1 = 0, + mu_rent_info_nv2 = 0, + mu_nat_vid1 =0, + mu_nat_vid2 = 0, + mu_nat_no_info = 0, + mu_nat_info_nv1 = 0, + mu_nat_info_nv2 = 0, + mu_walking_vid1 =0, + mu_walking_vid2 = 0, + mu_walking_no_info = 0, + mu_walking_info_nv1 = 0, + mu_walking_info_nv2 = 0, + sig_natural = 15, + sig_walking = 2, + sig_rent = 2, + sig_ASC_sq = 2) + +### specify parameters that should be kept fixed, here = none +apollo_fixed = c() + +### Set parameters for generating draws, use 2000 sobol draws +apollo_draws = list( + interDrawsType = "sobol", + interNDraws = n_draws, + interUnifDraws = c(), + interNormDraws = c("draws_natural", "draws_walking", "draws_rent", "draws_asc"), + intraDrawsType = "halton", + intraNDraws = 0, + intraUnifDraws = c(), + intraNormDraws = c() +) + +### Create random parameters, define distribution of the parameters +apollo_randCoeff = function(apollo_beta, apollo_inputs){ + randcoeff = list() + + randcoeff[["b_mu_natural"]] = mu_natural + sig_natural * draws_natural + randcoeff[["b_mu_walking"]] = mu_walking + sig_walking * draws_walking + randcoeff[["b_mu_rent"]] = -exp(mu_rent + sig_rent * draws_rent) + randcoeff[["b_ASC_sq"]] = ASC_sq + sig_ASC_sq * draws_asc + + return(randcoeff) +} + + +### validate +apollo_inputs = apollo_validateInputs() +apollo_probabilities=function(apollo_beta, apollo_inputs, functionality="estimate"){ + + ### Function initialisation: do not change the following three commands + ### Attach inputs and detach after function exit + apollo_attach(apollo_beta, apollo_inputs) + on.exit(apollo_detach(apollo_beta, apollo_inputs)) + + ### Create list of probabilities P + P = list() + + #### List of utilities (later integrated in mnl_settings below) #### + # Define utility functions here: + + V = list() + V[['alt1']] = -(b_mu_rent + mu_rent_vid1 *Dummy_Video_1 + mu_rent_vid2 * Dummy_Video_2 + mu_rent_no_info * Dummy_no_info + + mu_rent_info_nv1 * Dummy_Info_nv1 + mu_rent_info_nv2 * Dummy_Info_nv2 + mu_rent_NR * Z_Mean_NR) * + (b_mu_natural * Naturalness_1 + b_mu_walking * WalkingDistance_1 + + mu_nat_NR * Naturalness_1 * Z_Mean_NR + mu_wd_NR * WalkingDistance_1 * Z_Mean_NR + + mu_nat_vid1 * Naturalness_1 *Dummy_Video_1 + mu_nat_no_info * Naturalness_1 * Dummy_no_info + + mu_nat_info_nv1 * Naturalness_1 *Dummy_Info_nv1 + mu_nat_vid2 * Naturalness_1 * Dummy_Video_2 + + mu_nat_info_nv2 * Naturalness_1 *Dummy_Info_nv2 + + mu_walking_vid1 * WalkingDistance_1 *Dummy_Video_1 + mu_walking_no_info * WalkingDistance_1 * Dummy_no_info + + mu_walking_info_nv1 * WalkingDistance_1 *Dummy_Info_nv1 + mu_walking_vid2 * WalkingDistance_1 * Dummy_Video_2 + + mu_walking_info_nv2 * WalkingDistance_1 *Dummy_Info_nv2- Rent_1) + + V[['alt2']] = -(b_mu_rent + mu_rent_vid1 *Dummy_Video_1 + mu_rent_vid2 * Dummy_Video_2 + mu_rent_no_info * Dummy_no_info + + mu_rent_info_nv1 * Dummy_Info_nv1 + mu_rent_info_nv2 * Dummy_Info_nv2 + mu_rent_NR * Z_Mean_NR)* + (b_mu_natural * Naturalness_2 + b_mu_walking * WalkingDistance_2 + + mu_nat_NR * Naturalness_2 * Z_Mean_NR + mu_wd_NR * WalkingDistance_2 * Z_Mean_NR + + mu_nat_vid1 * Naturalness_2 *Dummy_Video_1 + mu_nat_no_info * Naturalness_2 * Dummy_no_info + + mu_nat_info_nv1 * Naturalness_2 *Dummy_Info_nv1 + mu_nat_vid2 * Naturalness_2 * Dummy_Video_2 + + mu_nat_info_nv2 * Naturalness_2 *Dummy_Info_nv2+ + mu_walking_vid1 * WalkingDistance_2 *Dummy_Video_1 + mu_walking_no_info * WalkingDistance_2 * Dummy_no_info + + mu_walking_info_nv1 * WalkingDistance_2 *Dummy_Info_nv1 + mu_walking_vid2 * WalkingDistance_2 * Dummy_Video_2 + + mu_walking_info_nv2 * WalkingDistance_2 *Dummy_Info_nv2 - Rent_2) + + V[['alt3']] = -(b_mu_rent + mu_rent_vid1 *Dummy_Video_1 + mu_rent_vid2 * Dummy_Video_2 + mu_rent_no_info * Dummy_no_info + + mu_rent_info_nv1 * Dummy_Info_nv1 + mu_rent_info_nv2 * Dummy_Info_nv2 + mu_rent_NR * Z_Mean_NR) * + (b_ASC_sq + b_mu_natural * Naturalness_3 + b_mu_walking * WalkingDistance_3 + + mu_asc_NR * Z_Mean_NR + mu_nat_NR * Naturalness_3 * Z_Mean_NR + + mu_wd_NR * WalkingDistance_3 * Z_Mean_NR + + mu_nat_vid1 * Naturalness_3 *Dummy_Video_1 + mu_nat_no_info * Naturalness_3 * Dummy_no_info + + mu_nat_info_nv1 * Naturalness_3 *Dummy_Info_nv1 + mu_nat_vid2 * Naturalness_3 * Dummy_Video_2 + + mu_nat_info_nv2 * Naturalness_3 *Dummy_Info_nv2+ + mu_walking_vid1 * WalkingDistance_3 *Dummy_Video_1 + mu_walking_no_info * WalkingDistance_3 * Dummy_no_info + + mu_walking_info_nv1 * WalkingDistance_3 *Dummy_Info_nv1 + mu_walking_vid2 * WalkingDistance_3 * Dummy_Video_2 + + mu_walking_info_nv2 * WalkingDistance_3 *Dummy_Info_nv2 + + mu_ASC_sq_vid1 * Dummy_Video_1 + mu_ASC_sq_vid2 * Dummy_Video_2 + + mu_ASC_sq_no_info * Dummy_no_info + mu_ASC_sq_info_nv1 * Dummy_Info_nv1 + + mu_ASC_sq_info_nv2 * Dummy_Info_nv2 - Rent_3) + + + ### Define settings for MNL model component + mnl_settings = list( + alternatives = c(alt1=1, alt2=2, alt3=3), + avail = 1, # all alternatives are available in every choice + choiceVar = choice, + V = V#, # tell function to use list vector defined above + + ) + + ### Compute probabilities using MNL model + P[['model']] = apollo_mnl(mnl_settings, functionality) + + ### Take product across observation for same individual + P = apollo_panelProd(P, apollo_inputs, functionality) + + ### Average across inter-individual draws - nur bei Mixed Logit! + P = apollo_avgInterDraws(P, apollo_inputs, functionality) + + ### Prepare and return outputs of function + P = apollo_prepareProb(P, apollo_inputs, functionality) + return(P) +} + + + +# ################################################################# # +#### MODEL ESTIMATION ## +# ################################################################# # +# estimate model with bfgs algorithm + +mxl_wtp_NR_case_c_rentINTX = apollo_estimate(apollo_beta, apollo_fixed, + apollo_probabilities, apollo_inputs, + estimate_settings=list(maxIterations=400, + estimationRoutine="bfgs", + hessianRoutine="analytic")) + + + +# ################################################################# # +#### MODEL OUTPUTS ## +# ################################################################# # +apollo_saveOutput(mxl_wtp_NR_case_c_rentINTX) + + diff --git a/Scripts/nat_interaction_plot.R b/Scripts/nat_interaction_plot.R index b29594ac72624b73fd5804d340fa5897842ad858..706e331d7bfe7d91c75972a12f4e99aa8a9674ee 100644 --- a/Scripts/nat_interaction_plot.R +++ b/Scripts/nat_interaction_plot.R @@ -1,26 +1,26 @@ -df_NR_C <- as.data.frame(mxl_wtp_NR_case_c_rentINT$estimate) -alpha = 0.1 -z_value <- qnorm(1-alpha) -df_NR_C$margin_of_error1 <- z_value*mxl_wtp_NR_case_c_rentINT$robse - -df_NR_C <- rownames_to_column(df_NR_C, "Coefficent") - -colnames(df_NR_C) <- c("Coefficent", "Estimate", "Margin_of_error") - -df_NR_C_melt <- melt(df_NR_C[1:2], id = "Coefficent") - -df_NR_C_melt$ME <- df_NR_C$Margin_of_error - -df_NR_C_melt <- df_NR_C_melt %>% - filter(str_starts(Coefficent, "mu_nat_")) - -df_NR_C_melt$Coefficent <- c("NR", "Video 1", "Video 2", "No Info 2", "Text 1", "Text 2") - -ggplot(data=df_NR_C_melt, - aes(x=factor(Coefficent, levels = c("NR", "No Info 2", "Text 1", "Text 2", "Video 1", "Video 2")), - y=value, fill=variable)) + - geom_bar(aes(fill=Coefficent), stat="identity", position='dodge', width = 0.9, alpha=0.7) + - geom_errorbar(aes(x=Coefficent, ymin=value-ME, ymax=value+ME), width=0.3, position=position_dodge(0.8)) + - ylab("Interaction Effect WTP Naturalness") + - xlab("Interaction") + - scale_x_discrete(guide = guide_axis(angle = 45)) +df_NR_C <- as.data.frame(mxl_wtp_NR_case_c_rentINT$estimate) +alpha = 0.1 +z_value <- qnorm(1-alpha) +df_NR_C$margin_of_error1 <- z_value*mxl_wtp_NR_case_c_rentINT$robse + +df_NR_C <- rownames_to_column(df_NR_C, "Coefficent") + +colnames(df_NR_C) <- c("Coefficent", "Estimate", "Margin_of_error") + +df_NR_C_melt <- melt(df_NR_C[1:2], id = "Coefficent") + +df_NR_C_melt$ME <- df_NR_C$Margin_of_error + +df_NR_C_melt <- df_NR_C_melt %>% + filter(str_starts(Coefficent, "mu_nat_")) + +df_NR_C_melt$Coefficent <- c("NR", "Video 1", "Video 2", "No Info 2", "Text 1", "Text 2") + +ggplot(data=df_NR_C_melt, + aes(x=factor(Coefficent, levels = c("NR", "No Info 2", "Text 1", "Text 2", "Video 1", "Video 2")), + y=value, fill=variable)) + + geom_bar(aes(fill=Coefficent), stat="identity", position='dodge', width = 0.9, alpha=0.7) + + geom_errorbar(aes(x=Coefficent, ymin=value-ME, ymax=value+ME), width=0.3, position=position_dodge(0.8)) + + ylab("Interaction Effect WTP Naturalness") + + xlab("Interaction") + + scale_x_discrete(guide = guide_axis(angle = 45)) diff --git a/Scripts/ols/ols_consequentiality.R b/Scripts/ols/ols_consequentiality.R index 3e00225be10bdb6e8e82af790eecde0955abbebb..446c328f23ebb7366b5b9e6a6768749650275f83 100644 --- a/Scripts/ols/ols_consequentiality.R +++ b/Scripts/ols/ols_consequentiality.R @@ -1,28 +1,28 @@ -# Q28W3 - -# Q29W3 - -# A1 I belive, A5 I don't belive; A6 i don't know - -data <- data %>% mutate(Conseq_score = Conseq_UGS + Conseq_Money) - - -conseq_model_A <- lm(Conseq_score ~ as.factor(Treatment_A), data) -summary(conseq_model_A) - -conseq_model_control_A <- lm(Conseq_score ~ as.factor(Treatment_A) + Z_Mean_NR + as.factor(Gender)+Age_mean+ QFIncome + Uni_degree,data) -summary(conseq_model_control_A ) - - -conseq_model_B <- lm(Conseq_score ~ as.factor(Treatment_B), data) -summary(conseq_model_B) - -conseq_model_control_B <- lm(Conseq_score ~ as.factor(Treatment_B) + Z_Mean_NR + as.factor(Gender)+Age_mean+ QFIncome + Uni_degree,data) -summary(conseq_model_control_B) - - -conseq_model_C <- lm(Conseq_score ~ as.factor(Treatment_C), data) -summary(conseq_model_C) - -conseq_model_control_C <- lm(Conseq_score ~ as.factor(Treatment_C) + Z_Mean_NR + as.factor(Gender)+Age_mean+ QFIncome + Uni_degree,data) -summary(conseq_model_control_C) +# Q28W3 + +# Q29W3 + +# A1 I belive, A5 I don't belive; A6 i don't know + +data <- data %>% mutate(Conseq_score = Conseq_UGS + Conseq_Money) + + +conseq_model_A <- lm(Conseq_score ~ as.factor(Treatment_A), data) +summary(conseq_model_A) + +conseq_model_control_A <- lm(Conseq_score ~ as.factor(Treatment_A) + Z_Mean_NR + as.factor(Gender)+Age_mean+ QFIncome + Uni_degree,data) +summary(conseq_model_control_A ) + + +conseq_model_B <- lm(Conseq_score ~ as.factor(Treatment_B), data) +summary(conseq_model_B) + +conseq_model_control_B <- lm(Conseq_score ~ as.factor(Treatment_B) + Z_Mean_NR + as.factor(Gender)+Age_mean+ QFIncome + Uni_degree,data) +summary(conseq_model_control_B) + + +conseq_model_C <- lm(Conseq_score ~ as.factor(Treatment_C), data) +summary(conseq_model_C) + +conseq_model_control_C <- lm(Conseq_score ~ as.factor(Treatment_C) + Z_Mean_NR + as.factor(Gender)+Age_mean+ QFIncome + Uni_degree,data) +summary(conseq_model_control_C) diff --git a/Scripts/ols/ols_nr.R b/Scripts/ols/ols_nr.R index 2ad2e09e81c85f70c4e84fd482d0bafb9cf9ac06..274e610fe37dc9e291966e70002fcb78a62a1460 100644 --- a/Scripts/ols/ols_nr.R +++ b/Scripts/ols/ols_nr.R @@ -1,23 +1,23 @@ -# Analyze NR Score - -data$Treatment_C <- as.factor(data$Treatment_C) - -data$Treatment_C <- relevel(data$Treatment_C, ref = "No Treatment 3") - -nr_model <- lm(Z_Mean_NR ~ Age_mean + Uni_degree + Kids_Dummy + Gender_female+ Rent_SQ + - Naturalness_SQ + WalkingDistance_SQ , data) - -summary(nr_model) - -nr_model_treat <- lm(Z_Mean_NR ~ Age_mean + Uni_degree + Kids_Dummy + Gender_female+ QFIncome + - as.factor(Treatment_C) + Naturalness_SQ, data) - -summary(nr_model_treat) - - -nr_model_treat_A <- lm(Z_Mean_NR ~ as.factor(Treatment_A) + QFIncome + as.factor(Gender)+Age_mean+Uni_degree + Kids_Dummy + - Naturalness_SQ + WalkingDistance_SQ, data) - -summary(nr_model_treat_A) - -vif(nr_model_treat) +# Analyze NR Score + +data$Treatment_C <- as.factor(data$Treatment_C) + +data$Treatment_C <- relevel(data$Treatment_C, ref = "No Treatment 3") + +nr_model <- lm(Z_Mean_NR ~ Age_mean + Uni_degree + Kids_Dummy + Gender_female+ Rent_SQ + + Naturalness_SQ + WalkingDistance_SQ , data) + +summary(nr_model) + +nr_model_treat <- lm(Z_Mean_NR ~ Age_mean + Uni_degree + Kids_Dummy + Gender_female+ QFIncome + + as.factor(Treatment_C) + Naturalness_SQ, data) + +summary(nr_model_treat) + + +nr_model_treat_A <- lm(Z_Mean_NR ~ as.factor(Treatment_A) + QFIncome + as.factor(Gender)+Age_mean+Uni_degree + Kids_Dummy + + Naturalness_SQ + WalkingDistance_SQ, data) + +summary(nr_model_treat_A) + +vif(nr_model_treat) diff --git a/Scripts/ols/ols_opt_out.R b/Scripts/ols/ols_opt_out.R index e63bb765608a700483e29569d65c24ee967f36cd..9c2357fa2ae987a43037f1ba3824282f2b593705 100644 --- a/Scripts/ols/ols_opt_out.R +++ b/Scripts/ols/ols_opt_out.R @@ -1,80 +1,80 @@ -data <- database_full %>% - group_by(id) %>% - slice(1) %>% - ungroup() - -data$Treatment_C <- as.factor(data$Treatment_C) - -data$Treatment_C <- relevel(data$Treatment_C, ref = "No Treatment 3") - -ols_opt_out_A<- lm( count_choosen_3 ~ as.factor(Treatment_A) ,data) -summary(ols_opt_out_A) -ols_opt_out_control_A<- lm( count_choosen_3 ~ as.factor(Treatment_A) + Z_Mean_NR + QFIncome + as.factor(Gender)+Age_mean+Uni_degree,data) -summary(ols_opt_out_control_A) - -ols_opt_out_B<- lm( count_choosen_3 ~ as.factor(Treatment_B) ,data) -summary(ols_opt_out_B) -ols_opt_out_control_B<- lm( count_choosen_3 ~ as.factor(Treatment_B) + Z_Mean_NR + QFIncome + as.factor(Gender)+Age_mean+Uni_degree,data) -summary(ols_opt_out_control_B) -ols_opt_out_C<- lm( count_choosen_3 ~ as.factor(Treatment_C) ,data) -summary(ols_opt_out_C) -ols_opt_out_control_C<- lm( count_choosen_3 ~ as.factor(Treatment_C) + Z_Mean_NR + QFIncome + as.factor(Gender)+Age_mean+Uni_degree,data) -summary(ols_opt_out_control_C) - -# Obtain predicted values -predictions <- predict(ols_opt_out_control_C, data, se.fit = TRUE) - -# Create a data frame with predictions and standard errors -predictions_df <- data.frame( - Treatment_C = data$Treatment_C, - Predicted_Value = predictions$fit, - Standard_Error = predictions$se.fit -) - -# Calculate means and standard errors -means <- aggregate(Predicted_Value ~ Treatment_C, predictions_df, mean) -means$Standard_Error <- aggregate(Standard_Error ~ Treatment_C, predictions_df, mean)$Standard_Error - -# Plot mean and standard errors -library(ggplot2) - -ggplot(means, aes(x = as.factor(Treatment_C), y = Predicted_Value, fill = as.factor(Treatment_C))) + - geom_bar(stat = "identity", position = "dodge") + - geom_errorbar(aes(ymin = Predicted_Value - 1.64*Standard_Error, ymax = Predicted_Value + 1.64*Standard_Error), - position = position_dodge(width = 0.9), width = 0.25) + - labs(fill = "Treatment") + - xlab("Treatment") + - ylab("Mean Predicted Value") + - theme_minimal() -# -# # Create an HTML results table with customized names and stars -# results_table_1 <- stargazer( -# ols_opt_out_1, ols_opt_out_control_1, -# align = TRUE, -# type = "html", -# dep.var.labels = "No. Opt-out choices", -# covariate.labels = c("Video 1", "Video 2", "No Info choosen", "Text 1", "Text 2", -# "Female", "Divers", "Age", "mittlere Reife", "Abitur", "Berufsausbildung", -# "Hochschulabschluss") , -# star.cutoffs = c(0.1, 0.05, 0.01), # Custom significance levels -# star.char = c("*", "**", "***") # Custom significance stars -# ) -# -# # Save the HTML table to a file -# writeLines(results_table_1, "Estimation_results/ols/opt_out_1.html") -# -# # Create an HTML results table with customized names and stars -# results_table_2 <- stargazer( -# ols_opt_out_2, ols_opt_out_control_2, -# align = TRUE, -# type = "html", -# dep.var.labels = "No. Opt-out choices", -# covariate.labels = c("Video", "No Info choosen", "Text", -# "Female", "Divers", "Age", "mittlere Reife", "Abitur", "Berufsausbildung", -# "Hochschulabschluss") , -# star.cutoffs = c(0.1, 0.05, 0.01), # Custom significance levels -# star.char = c("*", "**", "***") # Custom significance stars -# ) -# -# # Save the HTML table to a file -# writeLines(results_table_2, "Estimation_results/ols/opt_out_2.html") +data <- database_full %>% + group_by(id) %>% + slice(1) %>% + ungroup() + +data$Treatment_C <- as.factor(data$Treatment_C) + +data$Treatment_C <- relevel(data$Treatment_C, ref = "No Treatment 3") + +ols_opt_out_A<- lm( count_choosen_3 ~ as.factor(Treatment_A) ,data) +summary(ols_opt_out_A) +ols_opt_out_control_A<- lm( count_choosen_3 ~ as.factor(Treatment_A) + Z_Mean_NR + QFIncome + as.factor(Gender)+Age_mean+Uni_degree,data) +summary(ols_opt_out_control_A) + +ols_opt_out_B<- lm( count_choosen_3 ~ as.factor(Treatment_B) ,data) +summary(ols_opt_out_B) +ols_opt_out_control_B<- lm( count_choosen_3 ~ as.factor(Treatment_B) + Z_Mean_NR + QFIncome + as.factor(Gender)+Age_mean+Uni_degree,data) +summary(ols_opt_out_control_B) +ols_opt_out_C<- lm( count_choosen_3 ~ as.factor(Treatment_C) ,data) +summary(ols_opt_out_C) +ols_opt_out_control_C<- lm( count_choosen_3 ~ as.factor(Treatment_C) + Z_Mean_NR + QFIncome + as.factor(Gender)+Age_mean+Uni_degree,data) +summary(ols_opt_out_control_C) + +# Obtain predicted values +predictions <- predict(ols_opt_out_control_C, data, se.fit = TRUE) + +# Create a data frame with predictions and standard errors +predictions_df <- data.frame( + Treatment_C = data$Treatment_C, + Predicted_Value = predictions$fit, + Standard_Error = predictions$se.fit +) + +# Calculate means and standard errors +means <- aggregate(Predicted_Value ~ Treatment_C, predictions_df, mean) +means$Standard_Error <- aggregate(Standard_Error ~ Treatment_C, predictions_df, mean)$Standard_Error + +# Plot mean and standard errors +library(ggplot2) + +ggplot(means, aes(x = as.factor(Treatment_C), y = Predicted_Value, fill = as.factor(Treatment_C))) + + geom_bar(stat = "identity", position = "dodge") + + geom_errorbar(aes(ymin = Predicted_Value - 1.64*Standard_Error, ymax = Predicted_Value + 1.64*Standard_Error), + position = position_dodge(width = 0.9), width = 0.25) + + labs(fill = "Treatment") + + xlab("Treatment") + + ylab("Mean Predicted Value") + + theme_minimal() +# +# # Create an HTML results table with customized names and stars +# results_table_1 <- stargazer( +# ols_opt_out_1, ols_opt_out_control_1, +# align = TRUE, +# type = "html", +# dep.var.labels = "No. Opt-out choices", +# covariate.labels = c("Video 1", "Video 2", "No Info choosen", "Text 1", "Text 2", +# "Female", "Divers", "Age", "mittlere Reife", "Abitur", "Berufsausbildung", +# "Hochschulabschluss") , +# star.cutoffs = c(0.1, 0.05, 0.01), # Custom significance levels +# star.char = c("*", "**", "***") # Custom significance stars +# ) +# +# # Save the HTML table to a file +# writeLines(results_table_1, "Estimation_results/ols/opt_out_1.html") +# +# # Create an HTML results table with customized names and stars +# results_table_2 <- stargazer( +# ols_opt_out_2, ols_opt_out_control_2, +# align = TRUE, +# type = "html", +# dep.var.labels = "No. Opt-out choices", +# covariate.labels = c("Video", "No Info choosen", "Text", +# "Female", "Divers", "Age", "mittlere Reife", "Abitur", "Berufsausbildung", +# "Hochschulabschluss") , +# star.cutoffs = c(0.1, 0.05, 0.01), # Custom significance levels +# star.char = c("*", "**", "***") # Custom significance stars +# ) +# +# # Save the HTML table to a file +# writeLines(results_table_2, "Estimation_results/ols/opt_out_2.html") diff --git a/Scripts/ols/ols_quiz.R b/Scripts/ols/ols_quiz.R index a4af05205c7f9d087bbf5a3f4b9e474e7515d3a8..d80f763772383736db338362e3ed8a876dce3bcc 100644 --- a/Scripts/ols/ols_quiz.R +++ b/Scripts/ols/ols_quiz.R @@ -1,64 +1,64 @@ - -quiz_data$Treatment_C <- as.factor(quiz_data$Treatment_C) - -quiz_data$Treatment_C <- relevel(quiz_data$Treatment_C, ref = "No Treatment 3") - - -ols_percentage_correct_A<- lm( percentage_correct ~ as.factor(Treatment_A) ,quiz_data) -summary(ols_percentage_correct_A) -ols_percentage_correct_control_A<- lm( percentage_correct ~ as.factor(Treatment_A) + Z_Mean_NR + as.factor(Gender)+Age_mean+ QFIncome + Uni_degree,quiz_data) -summary(ols_percentage_correct_control_A) - -ols_percentage_correct_B<- lm( percentage_correct ~ as.factor(Treatment_B) ,quiz_data) -summary(ols_percentage_correct_B) -ols_percentage_correct_control_B<- lm( percentage_correct ~ as.factor(Treatment_B) + Z_Mean_NR + as.factor(Gender)+Age_mean+ QFIncome + Uni_degree,quiz_data) -summary(ols_percentage_correct_control_B) - -ols_percentage_correct_C<- lm( percentage_correct ~ as.factor(Treatment_C) ,quiz_data) -summary(ols_percentage_correct_C) -ols_percentage_correct_control_C<- lm( percentage_correct ~ as.factor(Treatment_C) + Z_Mean_NR + as.factor(Gender)+Age_mean+ QFIncome + Uni_degree,quiz_data) -summary(ols_percentage_correct_control_C) - -vif(ols_percentage_correct_control_C) - -# -# # Create an HTML results table with customized names and stars -# results_table_3 <- stargazer( -# ols_quiz_1a, ols_quiz_control_1a, ols_quiz_1b, ols_quiz_control_1b, -# align = TRUE, -# type = "html", -# dep.var.labels = "No. correct quiz questions", -# covariate.labels = c("Video 1", "Video 2", "No Info choosen", "Text 1", "Text 2", -# "Female", "Divers", "Age", "mittlere Reife", "Abitur", "Berufsausbildung", -# "Hochschulabschluss") , -# star.cutoffs = c(0.1, 0.05, 0.01), # Custom significance levels -# star.char = c("*", "**", "***") # Custom significance stars -# ) -# -# # Save the HTML table to a file -# writeLines(results_table_3, "Estimation_results/ols/quiz_1.html") -# -# # Create an HTML results table with customized names and stars -# results_table_4 <- stargazer( -# ols_quiz_2a, ols_quiz_control_2a, ols_quiz_2b, ols_quiz_control_2b, -# align = TRUE, -# type = "html", -# dep.var.labels = "No. correct quiz questions", -# covariate.labels = c("Video", "No Info choosen", "Text", -# "Female", "Divers", "Age", "mittlere Reife", "Abitur", "Berufsausbildung", -# "Hochschulabschluss") , -# star.cutoffs = c(0.1, 0.05, 0.01), # Custom significance levels -# star.char = c("*", "**", "***") # Custom significance stars -# ) -# -# # Save the HTML table to a file -# writeLines(results_table_4, "Estimation_results/ols/quiz_2.html") -# -# -# # Poisson regression -# poisson_quiz_2b <- glm( -# number_correct ~ Dummy_Video + Dummy_no_info + Dummy_nv, -# quiz_data = quiz_data, -# family = poisson -# ) -# summary(poisson_quiz_2b) + +quiz_data$Treatment_C <- as.factor(quiz_data$Treatment_C) + +quiz_data$Treatment_C <- relevel(quiz_data$Treatment_C, ref = "No Treatment 3") + + +ols_percentage_correct_A<- lm( percentage_correct ~ as.factor(Treatment_A) ,quiz_data) +summary(ols_percentage_correct_A) +ols_percentage_correct_control_A<- lm( percentage_correct ~ as.factor(Treatment_A) + Z_Mean_NR + as.factor(Gender)+Age_mean+ QFIncome + Uni_degree,quiz_data) +summary(ols_percentage_correct_control_A) + +ols_percentage_correct_B<- lm( percentage_correct ~ as.factor(Treatment_B) ,quiz_data) +summary(ols_percentage_correct_B) +ols_percentage_correct_control_B<- lm( percentage_correct ~ as.factor(Treatment_B) + Z_Mean_NR + as.factor(Gender)+Age_mean+ QFIncome + Uni_degree,quiz_data) +summary(ols_percentage_correct_control_B) + +ols_percentage_correct_C<- lm( percentage_correct ~ as.factor(Treatment_C) ,quiz_data) +summary(ols_percentage_correct_C) +ols_percentage_correct_control_C<- lm( percentage_correct ~ as.factor(Treatment_C) + Z_Mean_NR + as.factor(Gender)+Age_mean+ QFIncome + Uni_degree,quiz_data) +summary(ols_percentage_correct_control_C) + +vif(ols_percentage_correct_control_C) + +# +# # Create an HTML results table with customized names and stars +# results_table_3 <- stargazer( +# ols_quiz_1a, ols_quiz_control_1a, ols_quiz_1b, ols_quiz_control_1b, +# align = TRUE, +# type = "html", +# dep.var.labels = "No. correct quiz questions", +# covariate.labels = c("Video 1", "Video 2", "No Info choosen", "Text 1", "Text 2", +# "Female", "Divers", "Age", "mittlere Reife", "Abitur", "Berufsausbildung", +# "Hochschulabschluss") , +# star.cutoffs = c(0.1, 0.05, 0.01), # Custom significance levels +# star.char = c("*", "**", "***") # Custom significance stars +# ) +# +# # Save the HTML table to a file +# writeLines(results_table_3, "Estimation_results/ols/quiz_1.html") +# +# # Create an HTML results table with customized names and stars +# results_table_4 <- stargazer( +# ols_quiz_2a, ols_quiz_control_2a, ols_quiz_2b, ols_quiz_control_2b, +# align = TRUE, +# type = "html", +# dep.var.labels = "No. correct quiz questions", +# covariate.labels = c("Video", "No Info choosen", "Text", +# "Female", "Divers", "Age", "mittlere Reife", "Abitur", "Berufsausbildung", +# "Hochschulabschluss") , +# star.cutoffs = c(0.1, 0.05, 0.01), # Custom significance levels +# star.char = c("*", "**", "***") # Custom significance stars +# ) +# +# # Save the HTML table to a file +# writeLines(results_table_4, "Estimation_results/ols/quiz_2.html") +# +# +# # Poisson regression +# poisson_quiz_2b <- glm( +# number_correct ~ Dummy_Video + Dummy_no_info + Dummy_nv, +# quiz_data = quiz_data, +# family = poisson +# ) +# summary(poisson_quiz_2b) diff --git a/Scripts/ols/ols_time_spent.R b/Scripts/ols/ols_time_spent.R index ab1090d3e1102d72936fa84f3eb216f4fcfe7f31..a9afc043187fdb5c13303a72dfe0a07f3a6ac0cf 100644 --- a/Scripts/ols/ols_time_spent.R +++ b/Scripts/ols/ols_time_spent.R @@ -1,72 +1,72 @@ -library(car) - -data <- database_full %>% - group_by(id) %>% - slice(1) %>% - ungroup() - - -data$Treatment_C <- as.factor(data$Treatment_C) - -data$Treatment_C <- relevel(data$Treatment_C, ref = "No Treatment 3") - -ols_time_spent_A<- lm( interviewtime_net_clean ~ as.factor(Treatment_A) ,data) -summary(ols_time_spent_A) -ols_time_spent_control_A<- lm( interviewtime_net_clean ~ as.factor(Treatment_A) + Z_Mean_NR + as.factor(Gender)+Age_mean+ QFIncome + Uni_degree,data) -summary(ols_time_spent_control_A) - -ols_time_spent_B<- lm( interviewtime_net_clean ~ as.factor(Treatment_B) ,data) -summary(ols_time_spent_B) -ols_time_spent_control_B<- lm( interviewtime_net_clean ~ as.factor(Treatment_B) + Z_Mean_NR + as.factor(Gender)+Age_mean + QFIncome +Uni_degree,data) -summary(ols_time_spent_control_B) -ols_time_spent_C<- lm( interviewtime_net_clean ~ as.factor(Treatment_C) ,data) -summary(ols_time_spent_C) -ols_time_spent_control_C<- lm( interviewtime_net_clean ~ as.factor(Treatment_C) + Z_Mean_NR + as.factor(Gender)+Age_mean+ QFIncome + Uni_degree,data) -summary(ols_time_spent_control_C) - - -ols_time_cc_A<- lm( CC_time_mean_clean ~ as.factor(Treatment_A) ,data) -summary(ols_time_cc_A) -ols_time_cc_control_A<- lm( CC_time_mean_clean ~ as.factor(Treatment_A) + Z_Mean_NR + as.factor(Gender)+Age_mean + QFIncome +Uni_degree,data) -summary(ols_time_cc_control_A) - -ols_time_cc_B<- lm( CC_time_mean_clean ~ as.factor(Treatment_B) ,data) -summary(ols_time_cc_B) -ols_time_cc_control_B<- lm( CC_time_mean_clean ~ as.factor(Treatment_B) + Z_Mean_NR + as.factor(Gender)+Age_mean + QFIncome + Uni_degree,data) -summary(ols_time_cc_control_B) -ols_time_cc_C<- lm( CC_time_mean_clean ~ as.factor(Treatment_C) ,data) -summary(ols_time_cc_C) -ols_time_cc_control_C<- lm( CC_time_mean_clean ~ as.factor(Treatment_C) + Z_Mean_NR + as.factor(Gender)+Age_mean + QFIncome +Uni_degree,data) -summary(ols_time_cc_control_C) -# # Create an HTML results table with customized names and stars -# results_table_5 <- stargazer( -# ols_tme_spent_1, ols_tme_spent_control_1, -# align = TRUE, -# type = "html", -# dep.var.labels = "Interview Time (without Treatments)", -# covariate.labels = c("Treated_A", "Treated_B","No Info choosen", "Video 1", "Video 2", "Text 1", "Text 2", -# "Female", "Divers", "Age", "mittlere Reife", "Abitur", "Berufsausbildung", -# "Hochschulabschluss") , -# star.cutoffs = c(0.1, 0.05, 0.01), # Custom significance levels -# star.char = c("*", "**", "***") # Custom significance stars -# ) -# -# # Save the HTML table to a file -# writeLines(results_table_5, "Estimation_results/ols/time_spent_1.html") -# -# # Create an HTML results table with customized names and stars -# results_table_6 <- stargazer( -# ols_tme_spent_2, ols_tme_spent_control_2, -# align = TRUE, -# type = "html", -# dep.var.labels = "Interview Time (without Treatments)", -# covariate.labels = c("Video", "No Info choosen", "Text", -# "Female", "Divers", "Age", "mittlere Reife", "Abitur", "Berufsausbildung", -# "Hochschulabschluss") , -# star.cutoffs = c(0.1, 0.05, 0.01), # Custom significance levels -# star.char = c("*", "**", "***") # Custom significance stars -# ) -# -# # Save the HTML table to a file -# writeLines(results_table_6, "Estimation_results/ols/time_spent_2.html") - +library(car) + +data <- database_full %>% + group_by(id) %>% + slice(1) %>% + ungroup() + + +data$Treatment_C <- as.factor(data$Treatment_C) + +data$Treatment_C <- relevel(data$Treatment_C, ref = "No Treatment 3") + +ols_time_spent_A<- lm( interviewtime_net_clean ~ as.factor(Treatment_A) ,data) +summary(ols_time_spent_A) +ols_time_spent_control_A<- lm( interviewtime_net_clean ~ as.factor(Treatment_A) + Z_Mean_NR + as.factor(Gender)+Age_mean+ QFIncome + Uni_degree,data) +summary(ols_time_spent_control_A) + +ols_time_spent_B<- lm( interviewtime_net_clean ~ as.factor(Treatment_B) ,data) +summary(ols_time_spent_B) +ols_time_spent_control_B<- lm( interviewtime_net_clean ~ as.factor(Treatment_B) + Z_Mean_NR + as.factor(Gender)+Age_mean + QFIncome +Uni_degree,data) +summary(ols_time_spent_control_B) +ols_time_spent_C<- lm( interviewtime_net_clean ~ as.factor(Treatment_C) ,data) +summary(ols_time_spent_C) +ols_time_spent_control_C<- lm( interviewtime_net_clean ~ as.factor(Treatment_C) + Z_Mean_NR + as.factor(Gender)+Age_mean+ QFIncome + Uni_degree,data) +summary(ols_time_spent_control_C) + + +ols_time_cc_A<- lm( CC_time_mean_clean ~ as.factor(Treatment_A) ,data) +summary(ols_time_cc_A) +ols_time_cc_control_A<- lm( CC_time_mean_clean ~ as.factor(Treatment_A) + Z_Mean_NR + as.factor(Gender)+Age_mean + QFIncome +Uni_degree,data) +summary(ols_time_cc_control_A) + +ols_time_cc_B<- lm( CC_time_mean_clean ~ as.factor(Treatment_B) ,data) +summary(ols_time_cc_B) +ols_time_cc_control_B<- lm( CC_time_mean_clean ~ as.factor(Treatment_B) + Z_Mean_NR + as.factor(Gender)+Age_mean + QFIncome + Uni_degree,data) +summary(ols_time_cc_control_B) +ols_time_cc_C<- lm( CC_time_mean_clean ~ as.factor(Treatment_C) ,data) +summary(ols_time_cc_C) +ols_time_cc_control_C<- lm( CC_time_mean_clean ~ as.factor(Treatment_C) + Z_Mean_NR + as.factor(Gender)+Age_mean + QFIncome +Uni_degree,data) +summary(ols_time_cc_control_C) +# # Create an HTML results table with customized names and stars +# results_table_5 <- stargazer( +# ols_tme_spent_1, ols_tme_spent_control_1, +# align = TRUE, +# type = "html", +# dep.var.labels = "Interview Time (without Treatments)", +# covariate.labels = c("Treated_A", "Treated_B","No Info choosen", "Video 1", "Video 2", "Text 1", "Text 2", +# "Female", "Divers", "Age", "mittlere Reife", "Abitur", "Berufsausbildung", +# "Hochschulabschluss") , +# star.cutoffs = c(0.1, 0.05, 0.01), # Custom significance levels +# star.char = c("*", "**", "***") # Custom significance stars +# ) +# +# # Save the HTML table to a file +# writeLines(results_table_5, "Estimation_results/ols/time_spent_1.html") +# +# # Create an HTML results table with customized names and stars +# results_table_6 <- stargazer( +# ols_tme_spent_2, ols_tme_spent_control_2, +# align = TRUE, +# type = "html", +# dep.var.labels = "Interview Time (without Treatments)", +# covariate.labels = c("Video", "No Info choosen", "Text", +# "Female", "Divers", "Age", "mittlere Reife", "Abitur", "Berufsausbildung", +# "Hochschulabschluss") , +# star.cutoffs = c(0.1, 0.05, 0.01), # Custom significance levels +# star.char = c("*", "**", "***") # Custom significance stars +# ) +# +# # Save the HTML table to a file +# writeLines(results_table_6, "Estimation_results/ols/time_spent_2.html") + diff --git a/project_start.qmd b/project_start.qmd index 7b38247a81c7e2d40848789c34c44ddcc48294de..7b97fa86ef9bf8e1c723a6d7dee6614bb4b6a5b4 100644 --- a/project_start.qmd +++ b/project_start.qmd @@ -34,101 +34,103 @@ list_ols <- list("(Intercept)" = "Intercept", "as.factor(Treatment_A)Treated" = ``` ## Motivation (1) -::: {.incremental} -- **Stated preference** methods are frequently applied in **environmental valuation** to estimate economic values of policies, goods, and services that cannot be valued otherwise. -- Stated preference methods face **validity challenges**. -- Valid value estimation requires **sufficient information** provision about the good being valued. -- Still unclear **what formats of information** and **how much information** are optimal for valid preference elicitation. + +::: incremental +- **Stated preference** methods are frequently applied in **environmental valuation** to estimate economic values of policies, goods, and services that cannot be valued otherwise. +- Stated preference methods face **validity challenges**. +- Valid value estimation requires **sufficient information** provision about the good being valued. +- Still unclear **what formats of information** and **how much information** are optimal for valid preference elicitation. ::: ## Motivation (2) -::: {.incremental} -- Too **much information** may increase survey **complexity**, leading to respondents being overburdened with it and producing less consistent choices. -- Too **little information** may lead respondents to **not** being able to make an **informed choice**. -- Valid preference elicitation depends not only on the provision of information, but also on the **appropriate processing and recall** of the information by the respondent. -- **Voluntary information** allows the respondents to gather required information if needed. + +::: incremental +- Too **much information** may increase survey **complexity**, leading to respondents being overburdened with it and producing less consistent choices. +- Too **little information** may lead respondents to **not** being able to make an **informed choice**. +- Valid preference elicitation depends not only on the provision of information, but also on the **appropriate processing and recall** of the information by the respondent. +- **Voluntary information** allows the respondents to gather required information if needed. ::: ## Literature -::: {.incremental} -- There is **little research** on the effects of **voluntary information provision** on choice behavior and information recall. -- In their study, **Tienhaara et al. (2022)** surveyed preferences for agricultural genetic resources, allowing respondents the option to access detailed information on the valued goods prior to preference elicitation. -- Similarly, **Hu et al. (2009)** offered respondents the opportunity to access voluntary information about genetic modified food before participating in a choice experiment. -- Both studies conclude that, on average, respondents who retrieve voluntary information -have **larger willingness to pay** for the good to be valued. -- Their study design, however, does not allow comparing the voluntary information retrieval to a version where the additional information was shown obligatory. + +::: incremental +- There is **little research** on the effects of **voluntary information provision** on choice behavior and information recall. +- In their study, **Tienhaara et al. (2022)** surveyed preferences for agricultural genetic resources, allowing respondents the option to access detailed information on the valued goods prior to preference elicitation. +- Similarly, **Hu et al. (2009)** offered respondents the opportunity to access voluntary information about genetic modified food before participating in a choice experiment. +- Both studies conclude that, on average, respondents who retrieve voluntary information have **larger willingness to pay** for the good to be valued. +- Their study design, however, does not allow comparing the voluntary information retrieval to a version where the additional information was shown obligatory. ::: + ## Research Contribution -::: {.incremental} -- Our study explores the impact of additional obligatory and voluntary information on stated preferences using an exogenous split sample approach with three treatments. -- We investigate the effects of information treatments on survey engagement, information recall, consequentiality, and stated preferences, similar to Welling et al. (2023), expanding our understanding of treatment effects. -- We test who choose additional information and to what extent they have different preferences than respondents who do not choose aditional information. +::: incremental +- Our study explores the impact of additional obligatory and voluntary information on stated preferences using an exogenous split sample approach with three treatments. +- We investigate the effects of information treatments on survey engagement, information recall, consequentiality, and stated preferences, similar to Welling et al. (2023), expanding our understanding of treatment effects. +- We test who choose additional information and to what extent they have different preferences than respondents who do not choose aditional information. ::: + ## Research Questions -::: {.incremental} -1. How do obligatory and voluntary information treatments affect **survey engagement**, **information recall**, **consequentiality**, and **stated preferences**? -2. Do **socio-demographic** variables or natural **connectedness** influence the decision to **access voluntary information**? -3. Do **survey engagement**, **information recall**, **consequentiality**, and **stated preferences** differ between respondents who **access voluntary information** and those who do not? + +::: incremental +1. How do obligatory and voluntary information treatments affect **survey engagement**, **information recall**, **consequentiality**, and **stated preferences**? +2. Do **socio-demographic** variables or natural **connectedness** influence the decision to **access voluntary information**? +3. Do **survey engagement**, **information recall**, **consequentiality**, and **stated preferences** differ between respondents who **access voluntary information** and those who do not? ::: + ## Discrete Choice Experiment -::: {.incremental} -- To investigate the research questions, we use data from a **discrete choice experiment (DCE)** on naturalness of urban green spaces. -- The survey is an exact **replication** of the choice experiment of **Bronnmann et al., (2023)** and differs only in the information provided to the respondents. -- In the DCE, respondents were asked to imagine possible **changes** to their **most frequently used UGS**. -- This **restructuring** involved adjustments to the UGS’s **naturalness** and changes to the **walking distance**. -- The associated **costs** of this restructuring were intended to be integrated into monthly **rental payments**. -- Participants in the DCE were presented **ten** randomly assigned **choice cards** with a choice between **two alternative programs** for the renovation of the UGS and the **current status quo**. + +::: incremental +- To investigate the research questions, we use data from a **discrete choice experiment (DCE)** on naturalness of urban green spaces. +- The survey is an exact **replication** of the choice experiment of **Bronnmann et al., (2023)** and differs only in the information provided to the respondents. +- In the DCE, respondents were asked to imagine possible **changes** to their **most frequently used UGS**. +- This **restructuring** involved adjustments to the UGS's **naturalness** and changes to the **walking distance**. +- The associated **costs** of this restructuring were intended to be integrated into monthly **rental payments**. +- Participants in the DCE were presented **ten** randomly assigned **choice cards** with a choice between **two alternative programs** for the renovation of the UGS and the **current status quo**. ::: + ## Choice Card {width="300"} - - ## Treatment (Information provision) -- Short info text about the effect of **natural urban green spaces** on urban **heat islands**. -- **Optional video** with the almost the same information. + +- Short info text about the effect of **natural urban green spaces** on urban **heat islands**. +- **Optional video** with the almost the same information. {width="200"} ## Treatment (Quiz) -::: {.incremental} -**Seven quiz questions** with strict reference to the previously provided information. +::: incremental +**Seven quiz questions** with strict reference to the previously provided information. Example Questions: -1. Which of the following statements are correct? - -- The temperature difference between the city and the surrounding area can be up to 10 -degrees Celsius. (true/false) - +1. Which of the following statements are correct? +- The temperature difference between the city and the surrounding area can be up to 10 degrees Celsius. (true/false) -2. According to the information provided, which of the following properties influences -the temperature in the city? +2. According to the information provided, which of the following properties influences the temperature in the city? -- The proximity of green spaces to nature (yes/no) -- Light pollution in the city (yes/no) +- The proximity of green spaces to nature (yes/no) +- Light pollution in the city (yes/no) ::: ## Treatment (Self reference) -::: {.incremental} + +::: incremental To what extent do you agree or disagree with the following statements? -1. I am limited by high temperatures in the city during the summer. (Strongly agree - Strongly disagree) +1. I am limited by high temperatures in the city during the summer. (Strongly agree - Strongly disagree) -2. The city should do more to avoid heat islands. (Strongly agree - Strongly disagree) +2. The city should do more to avoid heat islands. (Strongly agree - Strongly disagree) ::: - -## Experimental Setting +## Experimental Setting {width="300"} - -## Case A +## Case A {width="300"} @@ -137,18 +139,20 @@ To what extent do you agree or disagree with the following statements? {width="300"} ## Data -::: {.incremental} -- **Socio-demographics**: Age, Gender, Income, Education. -- Natural Relatedness Index: Measure derived from 21 items on **connectedness to nature**. -- Quiz: Evaluation of the quiz we gave to everyone after the DCE.**->Information recall** -- Timings: We saved the net interview time and the mean Choice Card time.-> **Survey engagement** -- **Consequentiality**: + +::: incremental +- **Socio-demographics**: Age, Gender, Income, Education. +- Natural Relatedness Index: Measure derived from 21 items on **connectedness to nature**. +- Quiz: Evaluation of the quiz we gave to everyone after the DCE.**-\>Information recall** +- Timings: We saved the net interview time and the mean Choice Card time.-\> **Survey engagement** +- **Consequentiality**: -- To what extent do you believe that the decisions you make will have an impact on how the green spaces in your neighbourhood are designed in the future? -- To what extent do you believe that the decisions you make will affect whether you have to pay a contribution for urban greening in the future? -::: -## Methods +::: + +## Methods (1) {auto-animate="true"} - Logit regression (voluntary information access): @@ -158,9 +162,7 @@ To what extent do you agree or disagree with the following statements? \label{simple_logit} \end{equation} ``` - - -- OLS regression (survey engagement): +- OLS regression (survey engagement & information recall): ```{=tex} \begin{equation} @@ -168,7 +170,6 @@ To what extent do you agree or disagree with the following statements? \label{ols} \end{equation} ``` - - Mixed logit model with interactions in WTP space: ```{=tex} @@ -177,48 +178,38 @@ To what extent do you agree or disagree with the following statements? \label{mxl_base} \end{equation} ``` -## Socio Demografics {.smaller} +## Methods (2) {auto-animate="true"} -::: {style="font-size: 50%;"} -::: panel-tabset -### Case A +- Mixed logit model with interactions in WTP space: -```{r} -kableExtra::kable(treatment_socio_A) +```{=tex} +\begin{equation} + U_i = -(\beta_{C_i} + \beta_{TreatC_i} \cdot v_{Treat}) \cdot (\beta_{X_i} \cdot v_{X_i} + \beta_{TreatX_i} \cdot v_{X_i} \cdot v_{Treat} - C_i) + \epsilon_i +\end{equation} ``` -### Case B +with -```{r} -kableExtra::kable(treatment_socio_C) +```{=tex} +\begin{equation} + v_{X_i} = \{ASC_{sq_i}, Nat_i, WD_i\} +\end{equation} ``` -::: -::: +and +```{=tex} +\begin{equation} + v_{Treat_A} = \{Treated, Volutary Treated\} +\end{equation} +``` -## NR +```{=tex} +\begin{equation} + v_{Treat_B} = \{Text 1, Text 2, Video 1, Video 2, No Info 2\} +\end{equation} +``` -**Hypotheses:** Individuals with greater Nature Relatedness (NR) are more inclined to autonomously seek information about environmental subjects, such as the impact of urban green spaces on urban heat islands. Consequently, any observed increase in the willingness to pay among the treated group may be attributed to the individuals' higher NR rather than the treatment itself. -## NR OLS - -::: {style="font-size: 65%;"} -```{r, results='asis'} -htmlreg(l=list(nr_model_treat_A), single.row = TRUE, - custom.model.names = c("OLS regression"), - custom.header = list("Dependent variable: NR-Index" = 1), - custom.coef.map = list("(Intercept)" = "Intercept", "as.factor(Treatment_A)Treated" = "Treated", "as.factor(Treatment_A)Vol_Treated" = "Vol. Treated", - "as.factor(Treatment_C)No Info 2" = "No Info 2", "as.factor(Treatment_C)No Video 1" = "Text 1", - "as.factor(Treatment_C)No Video 2" = "Text 2", "as.factor(Treatment_C)Video 1" = "Video 1", - "as.factor(Treatment_C)Video 2" = "Video 2", "Z_Mean_NR" = "NR-Index", "as.factor(Gender)2" = "Female", - "Age_mean" = "Age", "QFIncome" = "Income", "Uni_degree" = "University Degree", "Kids_Dummy" = "Children", - "Naturalness_SQ" = "Naturalness SQ", "WalkingDistance_SQ" = "Walking Distance SQ"), - stars = c(0.01, 0.05, 0.1), float.pos="tb", - custom.note = "%stars. Standard errors in parentheses.", - label = "tab:nr_ols", - caption = "Results of OLS on the NR-index.") -``` -::: ## Logit Regression: Who choses treatment? @@ -237,13 +228,11 @@ htmlreg(l=list(logit_choice_treat_uni), stars = c(0.01, 0.05, 0.1), float.pos="t ``` ::: - - ## OLS Engagement: Interview time ::: panel-tabset - ### Table + ::: {style="font-size: 55%;"} ```{r, results='asis'} htmlreg(l=list(ols_time_spent_A, ols_time_spent_control_A, ols_time_spent_C, ols_time_spent_control_C), @@ -257,22 +246,23 @@ htmlreg(l=list(ols_time_spent_A, ols_time_spent_control_A, ols_time_spent_C, o ::: ### Plot + ```{r} ggpubr::ggarrange(plot_interview_A, plot_interview_C) ``` - ::: - ## OLS Engagement: Choice Card Time ::: panel-tabset ### Plot + ```{r} ggpubr::ggarrange(plot_cc_A, plot_cc_C) ``` ### Table + ::: {style="font-size: 55%;"} ```{r, results='asis'} htmlreg(l=list(ols_time_cc_A, ols_time_cc_control_A, ols_time_cc_C, ols_time_cc_control_C), @@ -284,21 +274,19 @@ htmlreg(l=list(ols_time_cc_A, ols_time_cc_control_A, ols_time_cc_C, ols_time_c caption = "Results of OLS on mean choice card time.") ``` ::: - ::: - - ## OLS: Manipulation check ::: panel-tabset - ### Plot + ```{r} ggpubr::ggarrange(plot_mani_A, plot_mani_C) ``` ### Table + ::: {style="font-size: 55%;"} ```{r, results='asis'} htmlreg(l=list(ols_percentage_correct_A, ols_percentage_correct_control_A, ols_percentage_correct_C, ols_percentage_correct_control_C), @@ -310,7 +298,6 @@ htmlreg(l=list(ols_percentage_correct_A, ols_percentage_correct_control_A, ols_ caption = "Results of OLS on percentage of correct quiz statements.") ``` ::: - ::: <!-- ## Self Reference --> @@ -328,13 +315,14 @@ htmlreg(l=list(ols_percentage_correct_A, ols_percentage_correct_control_A, ols_ ## OLS: Consequentiality ::: panel-tabset - ### Plot + ```{r} ggpubr::ggarrange(plot_cons_A, plot_cons_C) ``` ### Table + ::: {style="font-size: 55%;"} ```{r, results='asis'} htmlreg(l=list(conseq_model_A, conseq_model_control_A, conseq_model_C, conseq_model_control_C), @@ -346,21 +334,19 @@ htmlreg(l=list(conseq_model_A, conseq_model_control_A, conseq_model_C, conseq_mo caption = "Results of OLS on consequentiality score.") ``` ::: - ::: - ## OLS: Opt-out ::: panel-tabset - ### Plot + ```{r} ggpubr::ggarrange(plot_opt_A, plot_opt_C) ``` - ### Table + ::: {style="font-size: 60%;"} ```{r, results='asis'} htmlreg(l=list(ols_opt_out_A, ols_opt_out_control_A, ols_opt_out_C, ols_opt_out_control_C), @@ -372,7 +358,6 @@ htmlreg(l=list(ols_opt_out_A, ols_opt_out_control_A, ols_opt_out_C, ols_opt_out_ caption = "Results of OLS on number of opt-out choices.") ``` ::: - ::: ## MXL: Split Samples @@ -421,13 +406,9 @@ htmlreg(c(case_C_cols[1], remGOF(case_C_cols[2:7])), ::: ::: - - ## MXL: WTP space with NR index - ::: {style="font-size: 60%;"} - ```{r, results='asis'} htmlreg(c(case_C_cols_NR[1], remGOF(case_C_cols_NR[2:8])), custom.coef.map = list("natural" = "Naturalness", "walking" = "Walking Distance", "rent" = "Rent", @@ -439,20 +420,63 @@ htmlreg(c(case_C_cols_NR[1], remGOF(case_C_cols_NR[2:8])), label = "tab:mxl_NR", caption = "Results of mixed logit model with treatment and NR-index interactions for Case B.") ``` - ::: <!-- ## Case D --> <!-- ```{r} --> + <!-- summary(case_d) --> + <!-- ``` --> -## Takeaways +## Discussion (1) + +1. How do obligatory and voluntary information treatments affect survey engagement, information recall, consequentiality, and stated preferences? + +::: incremental + +- Obligatory and voluntary treatments do not increase survey engagement measured via time spend on the survey + +- Small negative effect for obligatory treatment on survey engagement + +- Both treatments increase information recall, stronger effect for obligatory treatment + +- No effect on consequentiality + +- Strong effects on stated preferences for both treatments, more pronounced effect for the obligatory treatment + +::: + +## Discussion (2) + +2. Do socio-demographic variables or natural connectedness influence the decision to access voluntary information? + +::: incremental + +- Respondents that voluntary access information are younger, richer and have a higher natural relatedness index + +- No effects of gender and education + +- Respondents' preferences for the good to be valued influence the likelihood of accessing additional information + +::: + +## Discussion (3) + +3. Do survey engagement, information recall, consequentiality, and stated preferences differ between respondents who access voluntary information and those who do not? + +::: incremental + +- Respondents that voluntary access information do engage differently in the survey + +::: + +## Conclusion ## Appendix -Information provision (Video) -Link to the video: https://idiv.limequery.com/upload/surveys/682191/files/urban-heat-island-effekt.mp4 + +Information provision (Video) Link to the video: https://idiv.limequery.com/upload/surveys/682191/files/urban-heat-island-effekt.mp4 ## Summary Statistics A @@ -461,6 +485,46 @@ Link to the video: https://idiv.limequery.com/upload/surveys/682191/files/urban- ## Summary Statistics B {width="300"} + + +## Socio Demografics {.smaller} + +::: {style="font-size: 50%;"} +::: panel-tabset +### Case A + +```{r} +kableExtra::kable(treatment_socio_A) +``` + +### Case B + +```{r} +kableExtra::kable(treatment_socio_C) +``` +::: +::: + +## NR OLS + +::: {style="font-size: 65%;"} +```{r, results='asis'} +htmlreg(l=list(nr_model_treat_A), single.row = TRUE, + custom.model.names = c("OLS regression"), + custom.header = list("Dependent variable: NR-Index" = 1), + custom.coef.map = list("(Intercept)" = "Intercept", "as.factor(Treatment_A)Treated" = "Treated", "as.factor(Treatment_A)Vol_Treated" = "Vol. Treated", + "as.factor(Treatment_C)No Info 2" = "No Info 2", "as.factor(Treatment_C)No Video 1" = "Text 1", + "as.factor(Treatment_C)No Video 2" = "Text 2", "as.factor(Treatment_C)Video 1" = "Video 1", + "as.factor(Treatment_C)Video 2" = "Video 2", "Z_Mean_NR" = "NR-Index", "as.factor(Gender)2" = "Female", + "Age_mean" = "Age", "QFIncome" = "Income", "Uni_degree" = "University Degree", "Kids_Dummy" = "Children", + "Naturalness_SQ" = "Naturalness SQ", "WalkingDistance_SQ" = "Walking Distance SQ"), + stars = c(0.01, 0.05, 0.1), float.pos="tb", + custom.note = "%stars. Standard errors in parentheses.", + label = "tab:nr_ols", + caption = "Results of OLS on the NR-index.") +``` +::: + <!-- ## MXL: WTP space --> <!-- ::: panel-tabset -->