Skip to content

Commit

Permalink
initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
mscsep authored May 12, 2021
1 parent a07e110 commit 865ecec
Show file tree
Hide file tree
Showing 20 changed files with 2,009 additions and 1 deletion.
2 changes: 1 addition & 1 deletion LICENSE
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
MIT License

Copyright (c) 2021 Milou Sep
Copyright (c) 2020 Milou Sep

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
Expand Down
68 changes: 68 additions & 0 deletions R/FGT_descriptives.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
# Descriptive statistics of the data from the Fear Generalization Task (FGT) in the SAM study
# Written by Milou Sep.

# load packages
library(dplyr)
library(Rmisc) # For Mean and CI calculation of raw data
library(haven) # to load SPSS file in r

# Shock Intensities -------------------------------------------------------
# Load FGT data
FGT_batch <- read.csv2("data/SAM_FGT.csv", na.strings = c("NaN","5555","8888","9999"))
# load shock amperage values
FGT_SWU <- read.csv2("data/SAM_FGT_Amperage.csv")

# Select only participants that completed the FGT (n=117)
FGT_participants <- FGT_batch$subjName
# Changes characters to patterns for grepl(). Info on matching multiple patterns https://stackoverflow.com/questions/6947587/matching-multiple-patterns
FGT_pattern<-paste(FGT_participants, collapse ="|")
FGT_SWU <- FGT_SWU[grepl(FGT_pattern, as.character(FGT_SWU$Participantnummer)),]

# Calculate mean, standard deviation, range of shock intensities
summary(FGT_SWU$`Stroomsterkte..mA.`) # mean & range
sd(FGT_SWU$`Stroomsterkte..mA.`) # standard deviation
# Calculate means CI / Condition
SWU.Mean.Condition.CI <- group.CI(`Stroomsterkte..mA.`~Conditie, FGT_SWU, ci = 0.95)
oneway.test(`Stroomsterkte..mA.`~Conditie, FGT_SWU) # NS


# Count missing FPS data per type missing (for paper) ----------------------------
# Note Missing codes (provided with Matlab code):
# 5555 % Digital registration error, message on screen & MissingValue 5555.
# 8888 % Excessive baseline activity. (More than 2 SD deviation)
# 9999 % Latency onset not valid (Note, this only affects onset scores, so not present in magnitude variables)

FGT_batch_count.missing <- read.csv2("data/SAM_FGT.csv")
Umag_for_missing <- subset.data.frame(FGT_batch_count.missing, select = c(grep("Umag", names(FGT_batch_count.missing))))
# Number of missing values in original data
n.missing.Technical<-sum(Umag_for_missing == '5555') #= 798
n.missing.Noise<-sum(Umag_for_missing == '8888') #= 256
# Total Number of observations
n.observations<-nrow(Umag_for_missing)*ncol(Umag_for_missing)
# percent.missing
(n.missing.Technical/n.observations)*100 # = 7.105
(n.missing.Noise/n.observations)*100 # = 2.279 %
# 0-responses
n.nulltesponses<-sum(Umag_for_missing == "0") #= 240
(n.nulltesponses/n.observations)*100 # 2.137 %


# FGT Contingency ---------------------------------------------------------
# load data
SAM_questionnaires <- read_sav("data/SAM_questionnaires.sav") # Questionnaires
SAM_versions <- read.csv("data/SAM_Codes_Task_Protocol_Versions.csv") # Information on task versions
# select required variables
SAM_questionnaires %>% select(SubjectNumber,Condition,shock_indicator)->SAM_questionnaires2 # NOTE, 59 missing values in 'shock indicator', because this question was added to the questionnaire later.
SAM_versions %>% select(SubjectNumber,FGTversion)->SAM_versions2
# merge information
full_join(SAM_versions2, SAM_questionnaires2, by="SubjectNumber")->FGT_contingency
# Create variable to indicate if threat context was identified correctly.
FGT_contingency %>% mutate(Correct_FGT_contingency = case_when(
shock_indicator == FGTversion ~ 1, # correct response
shock_indicator != FGTversion ~ 0 # incorrect response
))-> FGT_contingency
# Differences between experimental groups?
group.CI(Correct_FGT_contingency~Condition, FGT_contingency, ci = 0.95)
oneway.test(Correct_FGT_contingency~Condition, FGT_contingency) # NS
# save participants with correct responses
FGT_contingency%>% filter(Correct_FGT_contingency == 1) %>% select(SubjectNumber) %>% saveRDS(.,"processed_data/participants.for.contingency.sensitivity.analyses.rda")
79 changes: 79 additions & 0 deletions R/FGT_mids_transformations.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
# Functions for the transformation and manipulation of mids objects in the analysis of the data from the Fear Generalization Task (FGT) in the SAM study
# written by Milou Sep

# Info on how to perform manipulations/calculations on midsobject:
# https://stackoverflow.com/questions/26667162/perform-operation-on-each-imputed-dataset-in-rs-mice

library(dplyr)

# Function to calculate Means on subset mids object data (bv early trials only..)
means_EML_2way <- function(data_subset){
data_subset.means<-aggregate(data_subset$FPS,
by=list(data_subset$.imp, data_subset$pp, data_subset$Condition, data_subset$trialtype),
FUN=mean)
names(data_subset.means)<-c(".imp", "pp", "Condition", "trialtype", "FPS")
# Some checks
print(unique(data_subset.means$Condition))
print(unique(data_subset.means$pp))
print(unique(data_subset.means$.imp))

data_subset.means.mids <- as.mids(data_subset.means) # Make mids.
return(data_subset.means.mids)
}

# Function to log-transform FPS data if necessary (checked in Assumption Checks)
log.transform.mids <- function(data_set){
data_set.long<-complete(data_set, action = "long", include = T) # Change mids to long format
data_set.long$FPS <- log(data_set.long$FPS + 1) # Add 1 to deal with 0-responses
log.mids.dataset <- as.mids(data_set.long)
return(log.mids.dataset)
}


# Select one imputed dataset for testing (Note function returns dataframe!)
only_one_imputation <- function(data_mids, imp){
# 1) Mids to dataframe
data_mids.long <- complete(data_mids, action = "long", include = T)
tibble(data_mids.long)
# 2) Perform manipulations:
data_mids.long %>% filter(.imp == imp) ->x
return(x)
}


# For sensitivity analyses ------------------------------------------------

# Sensitivity analyses to check the effect of potential influential participants:
remove.influential.points <- function(data_mids, influentialpoints){
# Note numbers should be assigned to influential points e.g. c(1,2,3)
# Mids to dataframe
data_mids.long <- complete(data_mids, action = "long", include = T)
# Perform manipulations: search & exclude rows from influential points (or participants)
indices <- which(data_mids.long$pp %in% influentialpoints)
data_mids.long.noinfl <- data_mids.long[-c(indices),]
# Back to mids.
data_mids.noinfl <- as.mids(data_mids.long.noinfl)
return(data_mids.noinfl)
}

# Function to change factor Trial and/or Trialtype to continuous variables
Within.continuous.mids <- function(data_set){
# This was recommended for analyses of fear gradients by https://www.sciencedirect.com/science/article/pii/S0005791618300612 & http://www.frontiersin.org/Quantitative_Psychology_and_Measurement/10.3389/fpsyg.2015.00652/abstract
data_set.long<-complete(data_set, action = "long", include = T) # Change mids to long format
# manipulations:
data_set.long$trialtype <- as.numeric(data_set.long$trialtype)
data_set.long$trial <- as.numeric(data_set.long$trial)
str(data_set.long)
# transform back to mids
mids.dataset <- as.mids(data_set.long)
return(mids.dataset)
}

# Sensitivity analyses on participants that detected the threat stimulus correctly (according to self-report)
Sensitivity_contingency <- function(midsobject){
long_df<-complete(midsobject, action = "long", include = T)
pp_to_include <- which(long_df$pp %in% pp_Sensitivity$X1)
long_df_included <- long_df[c(pp_to_include),]
mids_included <- as.mids(long_df_included)
return(mids_included)
}
Loading

0 comments on commit 865ecec

Please sign in to comment.