Eye-Tracking Analysis Tutorial: Final Exercise Solution

Dan Mirman

24 January 2019

Plot

Make a multi-panel plot showing each kind of competition for each group

ggplot(CohortRhyme, aes(Time, FixProp, color=Object)) + 
  facet_grid(Type ~ Group) + stat_summary(fun.y=mean, geom="line") +
  stat_summary(fun.y=mean, geom="point") + theme_bw()

Group comparisons

Use fourth-order orthogonal polynomials to analyze (separately) the cohort and rhyme competition effects. Do the diagnosis groups differ in competition effect sizes?

# prep for GCA
CohortRhyme.gca <- code_poly(df=CohortRhyme, predictor="timeBin", poly.order=4, draw.poly = FALSE)
# group effects on cohort competition
cohort.base <- lmer(FixProp ~ (poly1+poly2+poly3+poly4)*Object + 
                      (1+poly1+poly2+poly3+poly4 | subjID) + 
                      (1+poly1+poly2 | subjID:Object), 
                    data=subset(CohortRhyme.gca, Type == "Cohort"), REML=F)
## singular fit
cohort.group <- lmer(FixProp ~ (poly1+poly2+poly3+poly4)*Object*Group + 
                       (1+poly1+poly2+poly3+poly4 | subjID) + 
                       (1+poly1+poly2 | subjID:Object), 
                     data=subset(CohortRhyme.gca, Type == "Cohort"), REML=F)
## singular fit
kable(anova(cohort.base, cohort.group))
Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
cohort.base 32 -2278 -2128 1171 -2342 NA NA NA
cohort.group 52 -2304 -2060 1204 -2408 66 20 0
#group effects on rhyme competition
rhyme.base <- lmer(FixProp ~ (poly1+poly2+poly3+poly4)*Object + 
                     (1+poly1+poly2+poly3+poly4 | subjID) + 
                     (1+poly1+poly2 | subjID:Object), 
                   data=subset(CohortRhyme.gca, Type == "Rhyme"), REML=F)
## singular fit
rhyme.group <- lmer(FixProp ~ (poly1+poly2+poly3+poly4)*Object*Group + 
                      (1+poly1+poly2+poly3+poly4 | subjID) + 
                      (1+poly1+poly2 | subjID:Object),
                    data=subset(CohortRhyme.gca, Type == "Rhyme"), REML=F)
## singular fit
kable(anova(rhyme.base, rhyme.group))
Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
rhyme.base 32 -2270 -2120 1167 -2334 NA NA NA
rhyme.group 52 -2265 -2022 1185 -2369 35.6 20 0.0171

Pairwise group comparisons, prep

#there are a lot of estimated coefs, need to find the critical ones for group comparisons
x <- coef(summary(cohort.group))
x[21:30,]
##                                      Estimate Std. Error t value
## ObjectUnrelated:GroupBroca           0.007169    0.01684  0.4257
## ObjectUnrelated:GroupWernicke       -0.049674    0.02042 -2.4322
## poly1:ObjectUnrelated:GroupBroca    -0.047473    0.05987 -0.7929
## poly1:ObjectUnrelated:GroupWernicke  0.009278    0.07260  0.1278
## poly2:ObjectUnrelated:GroupBroca     0.044092    0.05885  0.7493
## poly2:ObjectUnrelated:GroupWernicke  0.279206    0.07136  3.9124
## poly3:ObjectUnrelated:GroupBroca    -0.043154    0.03735 -1.1555
## poly3:ObjectUnrelated:GroupWernicke -0.050129    0.04529 -1.1069
## poly4:ObjectUnrelated:GroupBroca     0.025129    0.03735  0.6729
## poly4:ObjectUnrelated:GroupWernicke -0.177046    0.04529 -3.9093
library(multcomp)
contrast.matrix = rbind(
  "Intercept: Ctrl - Br" = c(rep(0, 20), 1, 0, 0, 0, 0, 0, 0, 0, 0, 0),
  "Intercept: Ctrl - We" = c(rep(0, 20), 0, 1, 0, 0, 0, 0, 0, 0, 0, 0),
  "Intercept: We - Br" = c(rep(0, 20), 1, -1, 0, 0, 0, 0, 0, 0, 0, 0),
  "Linear: Ctrl - Br" = c(rep(0, 20), 0, 0, 1, 0, 0, 0, 0, 0, 0, 0),
  "Linear: Ctrl - We" = c(rep(0, 20), 0, 0, 0, 1, 0, 0, 0, 0, 0, 0),
  "Linear: We - Br" = c(rep(0, 20), 0, 0, 1, -1, 0, 0, 0, 0, 0, 0),
  "Quad: Ctrl - Br" = c(rep(0, 20), 0, 0, 0, 0, 1, 0, 0, 0, 0, 0),
  "Quad: Ctrl - We" = c(rep(0, 20), 0, 0, 0, 0, 0, 1, 0, 0, 0, 0),
  "Quad: We - Br" = c(rep(0, 20), 0, 0, 0, 0, 0, 1, -1, 0, 0, 0)
)

Pairwise group comparisons, results

#pairwise comparisons: Cohort competition
summary(glht(cohort.group, contrast.matrix), test = adjusted("none"))
Cohort competition
lhs rhs estimate std.error statistic p.value
Intercept: Ctrl - Br 0 0.007 0.017 0.426 0.670
Intercept: Ctrl - We 0 -0.050 0.020 -2.432 0.015
Intercept: We - Br 0 0.057 0.023 2.460 0.014
Linear: Ctrl - Br 0 -0.047 0.060 -0.793 0.428
Linear: Ctrl - We 0 0.009 0.073 0.128 0.898
Linear: We - Br 0 -0.057 0.082 -0.691 0.490
Quad: Ctrl - Br 0 0.044 0.059 0.749 0.454
Quad: Ctrl - We 0 0.279 0.071 3.912 0.000
Quad: We - Br 0 0.322 0.081 4.002 0.000
#pairwise comparisons: Rhyme competition
summary(glht(rhyme.group, contrast.matrix), test = adjusted("none"))
Rhyme competition
lhs rhs estimate std.error statistic p.value
Intercept: Ctrl - Br 0 -0.036 0.016 -2.256 0.024
Intercept: Ctrl - We 0 0.018 0.019 0.929 0.353
Intercept: We - Br 0 -0.053 0.022 -2.466 0.014
Linear: Ctrl - Br 0 0.122 0.065 1.889 0.059
Linear: Ctrl - We 0 -0.128 0.078 -1.638 0.101
Linear: We - Br 0 0.250 0.089 2.825 0.005
Quad: Ctrl - Br 0 -0.007 0.056 -0.123 0.902
Quad: Ctrl - We 0 -0.011 0.068 -0.165 0.869
Quad: We - Br 0 0.048 0.078 0.623 0.533

Individual competition effect sizes

Use random effects to compute individual competition effect sizes (note: you’ll need to use a model without any group fixed effects to get random effects relative to the overall sample rather than the diagnosis sub-group).

# get cohort effect sizes
ES.coh <- get_ranef(cohort.base, 'subjID:Object') %>%
  group_by(subjID) %>%
  summarize(Cohort.Intercept = Intercept[Object=="Competitor"] - Intercept[Object=="Unrelated"],
            Cohort.Linear = poly1[Object=="Competitor"] - poly1[Object=="Unrelated"], 
            Cohort.Quadratic = poly2[Object=="Competitor"] - poly2[Object=="Unrelated"])
#head(ES.coh)
# get rhyme effect sizes
ES.rhy <- get_ranef(rhyme.base, 'subjID:Object') %>%
  group_by(subjID) %>%
  summarize(Rhyme.Intercept = Intercept[Object=="Competitor"] - Intercept[Object=="Unrelated"], 
            Rhyme.Linear = poly1[Object=="Competitor"] - poly1[Object=="Unrelated"], 
            Rhyme.Quadratic = poly2[Object=="Competitor"] - poly2[Object=="Unrelated"])
#head(ES.rhy)
# combine cohort and rhyme individual effect sizes
ES <- merge(ES.coh, ES.rhy, by="subjID")
# get grouping info from original data frame
group <- unique(subset(CohortRhyme, select=c(subjID, Group))) 
ES <- merge(ES, group, by="subjID")
summary(ES)
##     subjID          Cohort.Intercept   Cohort.Linear     
##  Length:20          Min.   :-0.04923   Min.   :-0.15154  
##  Class :character   1st Qu.:-0.01712   1st Qu.:-0.05346  
##  Mode  :character   Median :-0.00985   Median :-0.00336  
##                     Mean   : 0.00000   Mean   : 0.00000  
##                     3rd Qu.: 0.02199   3rd Qu.: 0.06445  
##                     Max.   : 0.06869   Max.   : 0.13668  
##  Cohort.Quadratic  Rhyme.Intercept     Rhyme.Linear     Rhyme.Quadratic  
##  Min.   :-0.3099   Min.   :-0.04664   Min.   :-0.2445   Min.   :-0.1237  
##  1st Qu.:-0.0832   1st Qu.:-0.01774   1st Qu.:-0.1184   1st Qu.:-0.0522  
##  Median : 0.0263   Median :-0.00936   Median : 0.0543   Median : 0.0168  
##  Mean   : 0.0000   Mean   : 0.00000   Mean   : 0.0000   Mean   : 0.0000  
##  3rd Qu.: 0.0896   3rd Qu.: 0.01676   3rd Qu.: 0.1060   3rd Qu.: 0.0541  
##  Max.   : 0.1444   Max.   : 0.06739   Max.   : 0.1273   Max.   : 0.1193  
##       Group   
##  Control :12  
##  Broca   : 5  
##  Wernicke: 3  
##               
##               
## 

Correlations among individual competition effect sizes

Test correlations between cohort and rhyme competition effects for patients and controls.

# compute correlations, tidy results, and bind into rows of a data frame
es_corrs <- rbind(
  #effect size correlations: Intercept
  tidy(cor.test(ES$Cohort.Intercept, ES$Rhyme.Intercept)),
  tidy(cor.test(ES$Cohort.Intercept[ES$Group != "Control"], ES$Rhyme.Intercept[ES$Group != "Control"])),
  tidy(cor.test(ES$Cohort.Intercept[ES$Group == "Control"], ES$Rhyme.Intercept[ES$Group == "Control"])),
  #effect size correlations: Linear
  tidy(cor.test(ES$Cohort.Linear, ES$Rhyme.Linear)),
  tidy(cor.test(ES$Cohort.Linear[ES$Group != "Control"], ES$Rhyme.Linear[ES$Group != "Control"])),
  tidy(cor.test(ES$Cohort.Linear[ES$Group == "Control"], ES$Rhyme.Linear[ES$Group == "Control"])),
  #effect size correlations: Quadratic
  tidy(cor.test(ES$Cohort.Quadratic, ES$Rhyme.Quadratic)),
  tidy(cor.test(ES$Cohort.Quadratic[ES$Group != "Control"], ES$Rhyme.Quadratic[ES$Group != "Control"])),
  tidy(cor.test(ES$Cohort.Quadratic[ES$Group == "Control"], ES$Rhyme.Quadratic[ES$Group == "Control"])))
# combine with labels
tab_df <- cbind(Term = rep(c("Intercept", "Linear", "Quadratic"), each=3), 
                Group = rep(c("Overall", "Aphasia", "Control"), 3),
                es_corrs)
kable(tab_df[, 1:8]) #print
Term Group estimate statistic p.value parameter conf.low conf.high
Intercept Overall -0.3827 -1.7573 0.0959 18 -0.7057 0.0720
Intercept Aphasia -0.8605 -4.1381 0.0061 6 -0.9744 -0.3960
Intercept Control 0.3379 1.1352 0.2827 10 -0.2928 0.7637
Linear Overall -0.0221 -0.0939 0.9262 18 -0.4601 0.4246
Linear Aphasia -0.4384 -1.1948 0.2772 6 -0.8733 0.3853
Linear Control 0.4726 1.6959 0.1208 10 -0.1390 0.8232
Quadratic Overall -0.0216 -0.0917 0.9280 18 -0.4597 0.4250
Quadratic Aphasia 0.0558 0.1370 0.8955 6 -0.6754 0.7317
Quadratic Control -0.1698 -0.5449 0.5977 10 -0.6777 0.4477

Scatterplot

Make a multi-panel scatterplot showing these correlations.

ES.p <- ES %>% 
  gather(key="key", value="EffectSize", c(2:7)) %>% # gather the estimates
  separate(key, c("Type", "Term"), sep = "\\.") %>% # separate column labels for plotting
  spread(key = Type, value = EffectSize) # spread Cohort and Rhyme effects into separate columns

ggplot(ES.p, aes(Cohort, Rhyme, color=Group)) + 
  facet_wrap(~ Term, scales="free") + geom_point() + 
  scale_color_manual(values=c("grey", "red", "blue")) +
  theme_bw()