Dan Mirman
13-14 March 2018
wisqars.suicide$Time <- wisqars.suicide$Year - 1999
m.base <- lmer(Crude.Rate ~ Time + (Time | State), data = wisqars.suicide, REML=F)
m.0 <- lmer(Crude.Rate ~ Time + Region + (Time | State), data = wisqars.suicide, REML=F)
m.1 <- lmer(Crude.Rate ~ Time * Region + (Time | State), data = wisqars.suicide, REML=F)
anova(m.base, m.0, m.1)
## Data: wisqars.suicide
## Models:
## m.base: Crude.Rate ~ Time + (Time | State)
## m.0: Crude.Rate ~ Time + Region + (Time | State)
## m.1: Crude.Rate ~ Time * Region + (Time | State)
## Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
## m.base 6 1426 1451 -707 1414
## m.0 9 1400 1437 -691 1382 32.43 3 4.3e-07 ***
## m.1 12 1402 1452 -689 1378 3.57 3 0.31
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Regions differed in baseline suicide rate, did not differ in rate of change in suicide rate.
ggplot(wisqars.suicide, aes(Year, Crude.Rate, color=Region)) +
stat_summary(fun.data=mean_se, geom="pointrange") +
stat_summary(aes(y=fitted(m.1)), fun.y=mean, geom="line") +
scale_x_continuous(breaks=1999:2007)
nr.null <- lmer(Correct ~ 1 + (TestTime | SubjectID), data = NamingRecovery, REML=F)
nr.base <- lmer(Correct ~ TestTime + (TestTime | SubjectID), data = NamingRecovery, REML=F)
nr.0 <- lmer(Correct ~ TestTime + Diagnosis + (TestTime | SubjectID), data = NamingRecovery, REML=F)
nr.1 <- lmer(Correct ~ TestTime * Diagnosis + (TestTime | SubjectID), data = NamingRecovery, REML=F)
anova(nr.null, nr.base, nr.0, nr.1)
## Data: NamingRecovery
## Models:
## nr.null: Correct ~ 1 + (TestTime | SubjectID)
## nr.base: Correct ~ TestTime + (TestTime | SubjectID)
## nr.0: Correct ~ TestTime + Diagnosis + (TestTime | SubjectID)
## nr.1: Correct ~ TestTime * Diagnosis + (TestTime | SubjectID)
## Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
## nr.null 5 -126 -112 67.8 -136
## nr.base 6 -146 -130 79.2 -158 22.84 1 1.8e-06 ***
## nr.0 8 -151 -129 83.6 -167 8.72 2 0.013 *
## nr.1 10 -147 -120 83.7 -167 0.26 2 0.880
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
There was overall recovery, sub-types differed in baseline naming ability, but not the sub-types did not differ in amount of recovery.
ggplot(NamingRecovery, aes(TestTime, Correct, color=Diagnosis)) +
stat_summary(fun.data=mean_se, geom="pointrange") +
stat_summary(aes(y=fitted(nr.1)), fun.y=mean, geom="line")
CP
(d’ peak at category boundary): Compare categorical perception along spectral vs. temporal dimensions using second-order orthogonal polynomials.
summary(CP)
## Participant Type Stimulus d.prime
## 1 : 8 Temporal:128 Min. :1.00 Min. :-1.41
## 2 : 8 Spectral:128 1st Qu.:2.75 1st Qu.: 0.40
## 3 : 8 Median :4.50 Median : 1.18
## 4 : 8 Mean :4.50 Mean : 1.34
## 5 : 8 3rd Qu.:6.25 3rd Qu.: 2.22
## 6 : 8 Max. :8.00 Max. : 4.77
## (Other):208
ggplot(CP, aes(Stimulus, d.prime, color=Type)) +
stat_summary(fun.data=mean_se, geom="pointrange")
# prep for analysis
CP <- code_poly(CP, predictor="Stimulus", poly.order = 2, draw.poly = FALSE)
# fit the models
m.base <- lmer(d.prime ~ (poly1 + poly2) + (poly1 + poly2 | Participant), data=CP, REML=F)
m.0 <- lmer(d.prime ~ (poly1 + poly2) + Type + (poly1 + poly2 | Participant), data=CP, REML=F)
m.1 <- lmer(d.prime ~ poly1*Type + poly2 + (poly1 + poly2 | Participant), data=CP, REML=F)
m.2 <- lmer(d.prime ~ (poly1 + poly2)*Type + (poly1 + poly2 | Participant), data=CP, REML=F)
anova(m.base, m.0, m.1, m.2)
## Data: CP
## Models:
## m.base: d.prime ~ (poly1 + poly2) + (poly1 + poly2 | Participant)
## m.0: d.prime ~ (poly1 + poly2) + Type + (poly1 + poly2 | Participant)
## m.1: d.prime ~ poly1 * Type + poly2 + (poly1 + poly2 | Participant)
## m.2: d.prime ~ (poly1 + poly2) * Type + (poly1 + poly2 | Participant)
## Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
## m.base 10 817 852 -398 797
## m.0 11 819 858 -398 797 0.09 1 0.767
## m.1 12 821 863 -398 797 0.30 1 0.583
## m.2 13 817 863 -396 791 5.30 1 0.021 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
get_pvalues(m.2, method = "all")
## Estimate Std..Error t.value p.normal p.normal.star
## (Intercept) 1.40491 0.1087 12.9251 0.000e+00 ***
## poly1 -0.43154 0.2753 -1.5677 1.169e-01
## poly2 -1.58482 0.3848 -4.1188 3.808e-05 ***
## TypeSpectral -0.12523 0.1537 -0.8147 4.153e-01
## poly1:TypeSpectral -0.06156 0.3893 -0.1581 8.744e-01
## poly2:TypeSpectral 1.30526 0.5442 2.3987 1.645e-02 *
## df.KR p.KR p.KR.star
## (Intercept) 30 8.527e-14 ***
## poly1 30 1.274e-01
## poly2 30 2.752e-04 ***
## TypeSpectral 30 4.217e-01
## poly1:TypeSpectral 30 8.754e-01
## poly2:TypeSpectral 30 2.287e-02 *
ggplot(CP, aes(Stimulus, d.prime, color=Type)) +
stat_summary(fun.data=mean_se, geom="pointrange") +
stat_summary(aes(y=fitted(m.2)), fun.y=mean, geom="line")
Az
: Use natural (not orthogonal) polynomials to analyze decline in performance of 30 individuals with probable Alzheimer’s disease on three different kinds of tasks.
summary(Az)
## Subject Time Task Performance
## 1 : 30 Min. : 1.0 cADL :300 Min. : 2.0
## 2 : 30 1st Qu.: 3.0 sADL :300 1st Qu.:40.0
## 3 : 30 Median : 5.5 Memory:300 Median :52.0
## 4 : 30 Mean : 5.5 Mean :49.3
## 5 : 30 3rd Qu.: 8.0 3rd Qu.:61.0
## 6 : 30 Max. :10.0 Max. :85.0
## (Other):720
ggplot(Az, aes(Time, Performance, color=Task, fill=Task)) +
stat_summary(fun.data=mean_se, geom="ribbon", color=NA, alpha=0.5) +
stat_summary(fun.y=mean, geom="line")
# prep for analysis
Az <- code_poly(Az, predictor="Time", poly.order=2, orthogonal=F, draw.poly = F)
# fit the full model
# m.base <- lmer(Performance ~ (poly1 + poly2) +
# (poly1 + poly2 | Subject) + (poly1 + poly2 | Subject:Task),
# data=Az, REML=F)
# m.0 <- lmer(Performance ~ (poly1 + poly2) + Task +
# (poly1 + poly2 | Subject) + (poly1 + poly2 | Subject:Task),
# data=Az, REML=F)
# m.1 <- lmer(Performance ~ poly1*Task + poly2 +
# (poly1 + poly2 | Subject) + (poly1 + poly2 | Subject:Task),
# data=Az, REML=F)
m.Az.full <- lmer(Performance ~ (poly1 + poly2)*Task +
(poly1 + poly2 | Subject) + (poly1 + poly2 | Subject:Task),
data=Az, REML=F)
# anova(m.base, m.0, m.1, m.Az.full)
get_pvalues(m.Az.full)
## Estimate Std..Error t.value p.normal p.normal.star
## (Intercept) 67.16167 0.92587 72.5392 0.000e+00 ***
## poly1 -3.28780 0.34109 -9.6391 0.000e+00 ***
## poly2 0.00947 0.01243 0.7616 4.463e-01
## TasksADL 0.09500 0.81172 0.1170 9.068e-01
## TaskMemory 1.24000 0.81172 1.5276 1.266e-01
## poly1:TasksADL 1.36220 0.26754 5.0915 3.553e-07 ***
## poly1:TaskMemory -3.97707 0.26754 -14.8651 0.000e+00 ***
## poly2:TasksADL -0.01326 0.01748 -0.7585 4.481e-01
## poly2:TaskMemory 0.33889 0.01748 19.3894 0.000e+00 ***
ggplot(Az, aes(Time, Performance, color=Task)) +
stat_summary(fun.data=mean_se, geom="pointrange") +
stat_summary(fun.y=mean, geom="line", aes(y=fitted(m.Az.full)))
Try GCA on your own data, ask for help if you need it.
If you don’t have any appropriate data on hand, try FunctTheme
: Use orthogonal polynomials to compare “Function” (broom - vacuum cleaner) and “Thematic” (broom - dustpan) semantic competition.
ggplot(FunctTheme, aes(Time, meanFix, color=Object, fill=Object)) +
facet_wrap(~ Condition) + stat_summary(fun.y=mean, geom="line") +
stat_summary(fun.data=mean_se, geom="ribbon", color=NA, alpha=0.5) +
theme_bw() + labs(y = "Fixation Proportion")
# already ran this part
FT.gca <- code_poly(subset(FunctTheme, Object!="Target"), predictor = "Time", poly.order = 4, draw.poly = FALSE)
m.ft <- lmer(meanFix ~ (poly1+poly2+poly3+poly4)*Object*Condition +
(1+poly1+poly2+poly3+poly4 | Subject) +
(1+poly1+poly2+poly3+poly4 | Subject:Object:Condition),
data=FT.gca, REML=F)
Re-code the factors to make the parameter estimates more intuitive to interpret
# set the Unrelated object to be the reference level for the Object factor
FT.gca$Object <- relevel(FT.gca$Object, "Unrelated")
# re-fit model with sum-coding the Condition factor
m.ft2 <- lmer(meanFix ~ (poly1+poly2+poly3+poly4)*Object*Condition +
(poly1+poly2+poly3+poly4 | Subject) +
(poly1+poly2+poly3+poly4 | Subject:Object:Condition),
contrasts = list(Condition = "contr.sum"),
data=FT.gca, REML=FALSE)
# get_pvalues(summary(m.ft2))
Make a graph
# term labels
trm <- c("Intercept", "poly1", "poly2", "poly3", "poly4", "Object", "Condition",
"poly1:Object", "poly2:Object", "poly3:Object", "poly4:Object",
"poly1:Condition", "poly2:Condition", "poly3:Condition", "poly4:Condition",
"Object:Condition", "poly1:Object:Condition", "poly2:Object:Condition",
"poly3:Object:Condition", "poly4:Object:Condition")
# assemble terms labels and parameter estimates from both models into one data frame
coefs <- data.frame(Term=ordered(rep(trm, 2), levels=rev(trm)),
Model = rep(c("Initial", "Revised"), each=20),
Estimate = c(coef(summary(m.ft))[, 1], coef(summary(m.ft2))[, 1]),
SE = c(coef(summary(m.ft))[, 2], coef(summary(m.ft2))[, 2]))
# plot the parameter estimates
ggplot(coefs, aes(Term, Estimate, color=Model)) +
geom_pointrange(aes(ymin=Estimate-2*SE, ymax = Estimate+2*SE),
position=position_dodge(width=0.4)) +
geom_hline(yintercept=0) + coord_flip()
This figure shows all of the coefficient estimates (+/- 2SE) for the two models. There are two key differences:
Object
coefficient refers to the main effect of competition (competitor vs. unrelated) on the intercept across both conditions and is statistically significant (SE range does not include 0). In the initial model, the Object coefficient is the simple effect in the Function condition and does not reach statistical significance (SE range includes 0).Condition
coefficients refer to the simple effect of condition (function vs. thematic) on the unrelated object and none of them are statistically significant, so there were no systematic differences in the time course of unrelated object fixations between conditions (as expected). In the initial model, the Condition coefficients refer to the simple effect of condition on the competitor object and some of the coefficients are statistically significant (effects on the cubic and quartic term) reflecting differences in competitor fixation between the two conditions, although the Object-by-Condition interaction coefficients better capture that effect.m.wisqars <- lmer(Crude.Rate ~ Time * Region + (Time | State), data = wisqars.suicide, REML=F)
get_pvalues(m.wisqars)
## Estimate Std..Error t.value p.normal p.normal.star
## (Intercept) 9.07573 0.76125 11.9222 0.000e+00 ***
## Time 0.08428 0.04725 1.7838 7.446e-02 .
## RegionSouth 2.22908 0.94143 2.3678 1.790e-02 *
## RegionMidwest 1.59681 1.00703 1.5857 1.128e-01
## RegionWest 6.10760 0.99029 6.1675 6.939e-10 ***
## Time:RegionSouth 0.01468 0.05843 0.2513 8.016e-01
## Time:RegionMidwest 0.10122 0.06250 1.6195 1.053e-01
## Time:RegionWest 0.05906 0.06146 0.9608 3.366e-01
contrast.matrix.int = rbind(
"NE vs. S" = c(0, 0, 1, 0, 0, 0, 0, 0),
"NE vs. MW" = c(0, 0, 0, 1, 0, 0, 0, 0),
"NE vs. W" = c(0, 0, 0, 0, 1, 0, 0, 0),
"S vs. MW" = c(0, 0, -1, 1, 0, 0, 0, 0),
"S vs. W" = c(0, 0, -1, 0, 1, 0, 0, 0),
"MW vs. W" = c(0, 0, 0, 1, -1, 0, 0, 0))
comps.int <- glht(m.wisqars, contrast.matrix.int)
summary(comps.int, test = adjusted("none"))
##
## Simultaneous Tests for General Linear Hypotheses
##
## Fit: lmer(formula = Crude.Rate ~ Time * Region + (Time | State), data = wisqars.suicide,
## REML = F)
##
## Linear Hypotheses:
## Estimate Std. Error z value Pr(>|z|)
## NE vs. S == 0 2.229 0.941 2.37 0.018 *
## NE vs. MW == 0 1.597 1.007 1.59 0.113
## NE vs. W == 0 6.108 0.990 6.17 6.9e-10 ***
## S vs. MW == 0 -0.632 0.861 -0.73 0.463
## S vs. W == 0 3.879 0.841 4.61 4.0e-06 ***
## MW vs. W == 0 -4.511 0.914 -4.93 8.1e-07 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- none method)
contrast.matrix.lin = rbind(
"NE vs. S" = c(0, 0, 0, 0, 0, 1, 0, 0),
"NE vs. MW" = c(0, 0, 0, 0, 0, 0, 1, 0),
"NE vs. W" = c(0, 0, 0, 0, 0, 0, 0, 1),
"S vs. MW" = c(0, 0, 0, 0, 0, -1, 1, 0),
"S vs. W" = c(0, 0, 0, 0, 0, -1, 0, 1),
"MW vs. W" = c(0, 0, 0, 0, 0, 0, 1, -1))
comps.lin <- glht(m.wisqars, contrast.matrix.lin)
summary(comps.lin, test = adjusted("none"))
##
## Simultaneous Tests for General Linear Hypotheses
##
## Fit: lmer(formula = Crude.Rate ~ Time * Region + (Time | State), data = wisqars.suicide,
## REML = F)
##
## Linear Hypotheses:
## Estimate Std. Error z value Pr(>|z|)
## NE vs. S == 0 0.0147 0.0584 0.25 0.80
## NE vs. MW == 0 0.1012 0.0625 1.62 0.11
## NE vs. W == 0 0.0591 0.0615 0.96 0.34
## S vs. MW == 0 0.0865 0.0534 1.62 0.11
## S vs. W == 0 0.0444 0.0522 0.85 0.40
## MW vs. W == 0 0.0422 0.0567 0.74 0.46
## (Adjusted p values reported -- none method)
Re-analyze the word learning data using logistic GCA.
# calculate number correct and incorrect
WordLearnEx$Correct <- round(WordLearnEx$Accuracy * 6)
WordLearnEx$Error <- 6 - WordLearnEx$Correct
# prep for GCA
WordLearnEx <- code_poly(WordLearnEx, predictor="Block", poly.order=2, draw.poly=F)
# fit logistic model
wl.logit <- glmer(cbind(Correct, Error) ~ (poly1+poly2)*TP +
(poly1+poly2 | Subject),
data=WordLearnEx, family=binomial)
coef(summary(wl.logit))
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 1.58261 0.1982 7.9861 1.392e-15
## poly1 2.21221 0.3234 6.8403 7.904e-12
## poly2 -0.08209 0.2318 -0.3542 7.232e-01
## TPHigh 0.56565 0.2842 1.9904 4.654e-02
## poly1:TPHigh 0.34726 0.4474 0.7762 4.376e-01
## poly2:TPHigh -0.98955 0.3254 -3.0407 2.360e-03
wl.lin <- lmer(Accuracy ~ (poly1+poly2)*TP +
(poly1+poly2 | Subject),
data=WordLearnEx, REML=F)
get_pvalues(wl.lin)
## Estimate Std..Error t.value p.normal p.normal.star
## (Intercept) 0.778525 0.02173 35.83141 0.000e+00 ***
## poly1 0.286315 0.03779 7.57682 3.531e-14 ***
## poly2 -0.050849 0.03319 -1.53217 1.255e-01
## TPHigh 0.052961 0.03073 1.72357 8.478e-02 .
## poly1:TPHigh 0.001075 0.05344 0.02012 9.839e-01
## poly2:TPHigh -0.116455 0.04693 -2.48121 1.309e-02 *
ggplot(WordLearnEx, aes(Block, Accuracy, color=TP)) +
stat_summary(fun.data = mean_se, geom="pointrange") +
stat_summary(aes(y=fitted(wl.logit)), fun.y = mean, geom="line") +
stat_summary(aes(y=fitted(wl.lin)), fun.y = mean, geom="line", linetype="dashed") +
theme_bw() + expand_limits(y=c(0.5, 1.0)) + scale_x_continuous(breaks=1:10) +
scale_color_manual(values=c("red", "blue")) + labs(y="Accuracy")
Make a multi-panel plot showing each kind of competition for each group
ggplot(CohortRhyme, aes(Time, FixProp, color=Object)) +
facet_grid(Type ~ Group) + stat_summary(fun.y=mean, geom="line") +
stat_summary(fun.y=mean, geom="point") + theme_bw()
Use fourth-order orthogonal polynomials to analyze (separately) the cohort and rhyme competition effects. Do the diagnosis groups differ in competition effect sizes?
# prep for GCA
CohortRhyme.gca <- code_poly(df=CohortRhyme, predictor="timeBin", poly.order=4, draw.poly = FALSE)
# group effects on cohort competition
cohort.base <- lmer(FixProp ~ (poly1+poly2+poly3+poly4)*Object +
(1+poly1+poly2+poly3+poly4 | subjID) +
(1+poly1+poly2 | subjID:Object),
data=subset(CohortRhyme.gca, Type == "Cohort"), REML=F)
cohort.group <- lmer(FixProp ~ (poly1+poly2+poly3+poly4)*Object*Group +
(1+poly1+poly2+poly3+poly4 | subjID) +
(1+poly1+poly2 | subjID:Object),
data=subset(CohortRhyme.gca, Type == "Cohort"), REML=F)
kable(anova(cohort.base, cohort.group))
Df | AIC | BIC | logLik | deviance | Chisq | Chi Df | Pr(>Chisq) | |
---|---|---|---|---|---|---|---|---|
cohort.base | 32 | -2278 | -2128 | 1171 | -2342 | NA | NA | NA |
cohort.group | 52 | -2304 | -2060 | 1204 | -2408 | 66 | 20 | 0 |
#group effects on rhyme competition
rhyme.base <- lmer(FixProp ~ (poly1+poly2+poly3+poly4)*Object +
(1+poly1+poly2+poly3+poly4 | subjID) +
(1+poly1+poly2 | subjID:Object),
data=subset(CohortRhyme.gca, Type == "Rhyme"), REML=F)
rhyme.group <- lmer(FixProp ~ (poly1+poly2+poly3+poly4)*Object*Group +
(1+poly1+poly2+poly3+poly4 | subjID) +
(1+poly1+poly2 | subjID:Object),
data=subset(CohortRhyme.gca, Type == "Rhyme"), REML=F)
kable(anova(rhyme.base, rhyme.group))
Df | AIC | BIC | logLik | deviance | Chisq | Chi Df | Pr(>Chisq) | |
---|---|---|---|---|---|---|---|---|
rhyme.base | 32 | -2270 | -2120 | 1167 | -2334 | NA | NA | NA |
rhyme.group | 52 | -2265 | -2022 | 1185 | -2369 | 35.6 | 20 | 0.0171 |
#there are a lot of estimated coefs, need to find the critical ones for group comparisons
x <- coef(summary(cohort.group))
x[21:30,]
## Estimate Std. Error t value
## ObjectUnrelated:GroupBroca 0.007169 0.01684 0.4257
## ObjectUnrelated:GroupWernicke -0.049674 0.02042 -2.4322
## poly1:ObjectUnrelated:GroupBroca -0.047473 0.05987 -0.7929
## poly1:ObjectUnrelated:GroupWernicke 0.009278 0.07260 0.1278
## poly2:ObjectUnrelated:GroupBroca 0.044092 0.05885 0.7493
## poly2:ObjectUnrelated:GroupWernicke 0.279206 0.07136 3.9124
## poly3:ObjectUnrelated:GroupBroca -0.043154 0.03735 -1.1555
## poly3:ObjectUnrelated:GroupWernicke -0.050129 0.04529 -1.1069
## poly4:ObjectUnrelated:GroupBroca 0.025129 0.03735 0.6729
## poly4:ObjectUnrelated:GroupWernicke -0.177046 0.04529 -3.9093
library(multcomp)
contrast.matrix = rbind(
"Intercept: Ctrl - Br" = c(rep(0, 20), 1, 0, 0, 0, 0, 0, 0, 0, 0, 0),
"Intercept: Ctrl - We" = c(rep(0, 20), 0, 1, 0, 0, 0, 0, 0, 0, 0, 0),
"Intercept: We - Br" = c(rep(0, 20), 1, -1, 0, 0, 0, 0, 0, 0, 0, 0),
"Linear: Ctrl - Br" = c(rep(0, 20), 0, 0, 1, 0, 0, 0, 0, 0, 0, 0),
"Linear: Ctrl - We" = c(rep(0, 20), 0, 0, 0, 1, 0, 0, 0, 0, 0, 0),
"Linear: We - Br" = c(rep(0, 20), 0, 0, 1, -1, 0, 0, 0, 0, 0, 0),
"Quad: Ctrl - Br" = c(rep(0, 20), 0, 0, 0, 0, 1, 0, 0, 0, 0, 0),
"Quad: Ctrl - We" = c(rep(0, 20), 0, 0, 0, 0, 0, 1, 0, 0, 0, 0),
"Quad: We - Br" = c(rep(0, 20), 0, 0, 0, 0, 0, 1, -1, 0, 0, 0)
)
#pairwise comparisons: Cohort competition
summary(glht(cohort.group, contrast.matrix), test = adjusted("none"))
lhs | rhs | estimate | std.error | statistic | p.value |
---|---|---|---|---|---|
Intercept: Ctrl - Br | 0 | 0.007 | 0.017 | 0.426 | 0.670 |
Intercept: Ctrl - We | 0 | -0.050 | 0.020 | -2.432 | 0.015 |
Intercept: We - Br | 0 | 0.057 | 0.023 | 2.460 | 0.014 |
Linear: Ctrl - Br | 0 | -0.047 | 0.060 | -0.793 | 0.428 |
Linear: Ctrl - We | 0 | 0.009 | 0.073 | 0.128 | 0.898 |
Linear: We - Br | 0 | -0.057 | 0.082 | -0.691 | 0.490 |
Quad: Ctrl - Br | 0 | 0.044 | 0.059 | 0.749 | 0.454 |
Quad: Ctrl - We | 0 | 0.279 | 0.071 | 3.912 | 0.000 |
Quad: We - Br | 0 | 0.322 | 0.081 | 4.002 | 0.000 |
#pairwise comparisons: Rhyme competition
summary(glht(rhyme.group, contrast.matrix), test = adjusted("none"))
lhs | rhs | estimate | std.error | statistic | p.value |
---|---|---|---|---|---|
Intercept: Ctrl - Br | 0 | -0.036 | 0.016 | -2.256 | 0.024 |
Intercept: Ctrl - We | 0 | 0.018 | 0.019 | 0.929 | 0.353 |
Intercept: We - Br | 0 | -0.053 | 0.022 | -2.466 | 0.014 |
Linear: Ctrl - Br | 0 | 0.122 | 0.065 | 1.889 | 0.059 |
Linear: Ctrl - We | 0 | -0.128 | 0.078 | -1.638 | 0.101 |
Linear: We - Br | 0 | 0.250 | 0.089 | 2.825 | 0.005 |
Quad: Ctrl - Br | 0 | -0.007 | 0.056 | -0.123 | 0.902 |
Quad: Ctrl - We | 0 | -0.011 | 0.068 | -0.165 | 0.869 |
Quad: We - Br | 0 | 0.048 | 0.078 | 0.623 | 0.533 |
Use random effects to compute individual competition effect sizes (note: you’ll need to use a model without any group fixed effects to get random effects relative to the overall sample rather than the diagnosis sub-group).
# get cohort effect sizes
blup.cohort <- get_ranef(cohort.base, 'subjID:Object')
ES.coh <- ddply(blup.cohort, .(subjID), summarize,
Cohort.Intercept = Intercept[Object=="Competitor"] - Intercept[Object=="Unrelated"],
Cohort.Linear = poly1[Object=="Competitor"] - poly1[Object=="Unrelated"],
Cohort.Quadratic = poly2[Object=="Competitor"] - poly2[Object=="Unrelated"])
#head(ES.coh)
# get rhyme effect sizes
blup.rhyme <- get_ranef(rhyme.base, 'subjID:Object')
ES.rhy <- ddply(blup.rhyme, .(subjID), summarize,
Rhyme.Intercept = Intercept[Object=="Competitor"] - Intercept[Object=="Unrelated"],
Rhyme.Linear = poly1[Object=="Competitor"] - poly1[Object=="Unrelated"],
Rhyme.Quadratic = poly2[Object=="Competitor"] - poly2[Object=="Unrelated"])
#head(ES.rhy)
# combine cohort and rhyme individual effect sizes
ES <- merge(ES.coh, ES.rhy, by="subjID")
# get grouping info from original data frame
group <- unique(subset(CohortRhyme, select=c(subjID, Group)))
ES <- merge(ES, group, by="subjID")
summary(ES)
## subjID Cohort.Intercept Cohort.Linear Cohort.Quadratic
## Min. :101 Min. :-0.04923 Min. :-0.15154 Min. :-0.3099
## 1st Qu.:106 1st Qu.:-0.01712 1st Qu.:-0.05346 1st Qu.:-0.0832
## Median :110 Median :-0.00985 Median :-0.00336 Median : 0.0263
## Mean :110 Mean : 0.00000 Mean : 0.00000 Mean : 0.0000
## 3rd Qu.:115 3rd Qu.: 0.02199 3rd Qu.: 0.06445 3rd Qu.: 0.0896
## Max. :120 Max. : 0.06869 Max. : 0.13668 Max. : 0.1444
## Rhyme.Intercept Rhyme.Linear Rhyme.Quadratic Group
## Min. :-0.04664 Min. :-0.2445 Min. :-0.1237 Control :12
## 1st Qu.:-0.01774 1st Qu.:-0.1184 1st Qu.:-0.0522 Broca : 5
## Median :-0.00936 Median : 0.0543 Median : 0.0168 Wernicke: 3
## Mean : 0.00000 Mean : 0.0000 Mean : 0.0000
## 3rd Qu.: 0.01676 3rd Qu.: 0.1060 3rd Qu.: 0.0541
## Max. : 0.06739 Max. : 0.1273 Max. : 0.1193
Test correlations between cohort and rhyme competition effects for patients and controls.
# compute correlations, tidy results, and bind into rows of a data frame
es_corrs <- rbind(
#effect size correlations: Intercept
tidy(cor.test(ES$Cohort.Intercept, ES$Rhyme.Intercept)),
tidy(cor.test(ES$Cohort.Intercept[ES$Group != "Control"], ES$Rhyme.Intercept[ES$Group != "Control"])),
tidy(cor.test(ES$Cohort.Intercept[ES$Group == "Control"], ES$Rhyme.Intercept[ES$Group == "Control"])),
#effect size correlations: Linear
tidy(cor.test(ES$Cohort.Linear, ES$Rhyme.Linear)),
tidy(cor.test(ES$Cohort.Linear[ES$Group != "Control"], ES$Rhyme.Linear[ES$Group != "Control"])),
tidy(cor.test(ES$Cohort.Linear[ES$Group == "Control"], ES$Rhyme.Linear[ES$Group == "Control"])),
#effect size correlations: Quadratic
tidy(cor.test(ES$Cohort.Quadratic, ES$Rhyme.Quadratic)),
tidy(cor.test(ES$Cohort.Quadratic[ES$Group != "Control"], ES$Rhyme.Quadratic[ES$Group != "Control"])),
tidy(cor.test(ES$Cohort.Quadratic[ES$Group == "Control"], ES$Rhyme.Quadratic[ES$Group == "Control"])))
# combine with labels
tab_df <- cbind(Term = rep(c("Intercept", "Linear", "Quadratic"), each=3),
Group = rep(c("Overall", "Aphasia", "Control"), 3),
es_corrs)
kable(tab_df[, 1:8]) #print
Term | Group | estimate | statistic | p.value | parameter | conf.low | conf.high |
---|---|---|---|---|---|---|---|
Intercept | Overall | -0.3827 | -1.7573 | 0.0959 | 18 | -0.7057 | 0.0720 |
Intercept | Aphasia | -0.8605 | -4.1381 | 0.0061 | 6 | -0.9744 | -0.3960 |
Intercept | Control | 0.3379 | 1.1352 | 0.2827 | 10 | -0.2928 | 0.7637 |
Linear | Overall | -0.0221 | -0.0939 | 0.9262 | 18 | -0.4601 | 0.4246 |
Linear | Aphasia | -0.4384 | -1.1948 | 0.2772 | 6 | -0.8733 | 0.3853 |
Linear | Control | 0.4726 | 1.6959 | 0.1208 | 10 | -0.1390 | 0.8232 |
Quadratic | Overall | -0.0216 | -0.0917 | 0.9280 | 18 | -0.4597 | 0.4250 |
Quadratic | Aphasia | 0.0558 | 0.1370 | 0.8955 | 6 | -0.6754 | 0.7317 |
Quadratic | Control | -0.1698 | -0.5449 | 0.5977 | 10 | -0.6777 | 0.4477 |
Make a multi-panel scatterplot showing these correlations.
ES.m <- melt(ES, id=c("subjID", "Group"))
ES.m <- cbind(ES.m, colsplit(ES.m$variable, "\\.", c("Type", "Term")))
ES.c <- dcast(ES.m, subjID + Group + Term ~ Type)
ggplot(ES.c, aes(Cohort, Rhyme, color=Group)) +
facet_wrap(~ Term, scales="free") + geom_point() +
scale_color_manual(values=c("grey", "red", "blue")) +
theme_bw()