These analyses investigate connectivity dynamics of the posterior medial network (PMN) during a movie-watching task. The data includes 136 younger adults (age 18-40) (68 for the initial ‘discovery’ analyses, 68 for replication) from the CamCan dataset.
Each PMN region is a cluster of 100 contiguous voxels (2x2x2mm) within the Default A and C subnetworks (Schaefer et al., 2018) that activate during ‘episodic’ tasks (according to the NeuroSynth meta analysis) - based on the definition in Ritchey & Cooper (2020, TiCS).
The analyses first test time-averaged functional connectivity, so the pearson correlation (and partial correlation) between ROI time series during the full movie watching task. I use this to investigate the presence of subsystems (from seed-to-voxel analyses) and intranetwork connectivity.
Next, the analyses test time-varying connectivity of the PMN subsystems in relation to the movie content (similarity across subjects), including change in connectivity at event transitions.
library(tidyverse)
library(plotly)
library(ggnetwork)
library(cowplot)
library(ggpubr)
library(knitr)
library(psych)
library(rstatix)
library(reshape2)
library(pracma)
library(mosaic)
library(NetworkToolbox)
library(pander)
library(lessR)
library(fmri)
library(ppcor)
library(corpcor)
library(network)
library(GGally)
library(igraph)
library(tidygraph)
library(lsa)
library(prodlim)
library(stringr)
library(caret)
library(abind)
## define sample to analyze (discovery or replication)
this.group <- "Replication"
cat(this.group,'sample')
## Replication sample
R package information
sessionInfo()
## R version 3.5.1 (2018-07-02)
## Platform: x86_64-apple-darwin15.6.0 (64-bit)
## Running under: macOS High Sierra 10.13.6
##
## Matrix products: default
## BLAS: /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libRblas.0.dylib
## LAPACK: /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libRlapack.dylib
##
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] abind_1.4-5 caret_6.0-86 prodlim_2019.11.13
## [4] lsa_0.73.1 SnowballC_0.6.0 tidygraph_1.1.2
## [7] igraph_1.2.2 GGally_1.4.0 network_1.15
## [10] corpcor_1.6.9 ppcor_1.1 MASS_7.3-51.5
## [13] fmri_1.9.3 lessR_3.8.1 pander_0.6.3
## [16] NetworkToolbox_1.2.1 mosaic_1.4.0 Matrix_1.2-14
## [19] mosaicData_0.17.0 ggformula_0.9.0 ggstance_0.3.1
## [22] lattice_0.20-35 pracma_2.1.8 reshape2_1.4.3
## [25] rstatix_0.6.0 psych_1.8.10 knitr_1.20
## [28] ggpubr_0.1.8 magrittr_1.5 cowplot_0.9.3
## [31] ggnetwork_0.5.8 plotly_4.8.0 forcats_0.4.0
## [34] stringr_1.4.0 dplyr_0.8.5 purrr_0.3.2
## [37] readr_1.1.1 tidyr_1.0.2 tibble_3.0.2
## [40] ggplot2_3.1.0 tidyverse_1.2.1
##
## loaded via a namespace (and not attached):
## [1] readxl_1.1.0 backports_1.1.2 plyr_1.8.4
## [4] lazyeval_0.2.2 splines_3.5.1 digest_0.6.18
## [7] foreach_1.4.4 htmltools_0.3.6 wesanderson_0.3.6
## [10] cluster_2.0.7-1 mosaicCore_0.6.0 openxlsx_4.1.0
## [13] recipes_0.1.13 modelr_0.1.2 gower_0.2.1
## [16] aws_2.4-1 colorspace_1.4-1 rvest_0.3.2
## [19] ggrepel_0.8.0 haven_1.1.2 crayon_1.3.4
## [22] jsonlite_1.5 survival_3.1-11 iterators_1.0.10
## [25] glue_1.3.2 gtable_0.3.0 ipred_0.9-9
## [28] V8_1.5 car_3.0-2 DEoptimR_1.0-8
## [31] scales_1.0.0 randomcoloR_1.1.0 Rcpp_1.0.1
## [34] viridisLite_0.3.0 foreign_0.8-71 awsMethods_1.1-1
## [37] stats4_3.5.1 lava_1.6.7 htmlwidgets_1.3
## [40] httr_1.3.1 RColorBrewer_1.1-2 ellipsis_0.3.0
## [43] pkgconfig_2.0.2 reshape_0.8.8 nnet_7.3-12
## [46] tidyselect_1.0.0 rlang_0.4.5 munsell_0.5.0
## [49] cellranger_1.1.0 tools_3.5.1 cli_1.1.0
## [52] generics_0.0.2 sas7bdat_0.5 broom_0.7.0
## [55] evaluate_0.12 ggdendro_0.1-20 yaml_2.2.0
## [58] ModelMetrics_1.2.2.2 zip_1.0.0 robustbase_0.93-3
## [61] nlme_3.1-137 leaps_3.0 xml2_1.2.0
## [64] compiler_3.5.1 rstudioapi_0.8 curl_4.3
## [67] png_0.1-7 stringi_1.4.3 gsl_1.9-10.3
## [70] vctrs_0.2.4 pillar_1.4.4 lifecycle_0.2.0
## [73] data.table_1.11.8 R6_2.4.0 latticeExtra_0.6-28
## [76] gridExtra_2.3 rio_0.5.10 codetools_0.2-15
## [79] assertthat_0.2.1 rprojroot_1.3-2 withr_2.1.2
## [82] metafor_2.4-0 mnormt_1.5-5 triangle_0.11
## [85] parallel_3.5.1 hms_0.4.2 grid_3.5.1
## [88] rpart_4.1-13 timeDate_3043.102 class_7.3-14
## [91] rmarkdown_1.10 carData_3.0-2 Rtsne_0.15
## [94] pROC_1.16.2 lubridate_1.7.4 ellipse_0.4.1
Define custom functions & color palettes
### define functions:
se <- function(x) sqrt(var(x)/length(x)) #function to calculate SE
ci <- function(x) (sqrt(var(x)/length(x))) * 1.96 #function to calculate 95% CI
### color palettes:
# color my 8 nodes
mycolors = c('#f2a68c','#929292','#e6664d','#8ce9f7','#3c7ff5','#934be0','#fcd841','#95df3a')
module.colors <- c("#fcd841","#3c7ff5","#8ce9f7") #for "Dorsal PM", "Ventral PM", "Ventral PM-to-Dorsal PM"
# color scales
gray.colors <- colorRampPalette(c("gray90","gray10"))
heat.colors <- colorRampPalette(c("#009FFF","white","#ff5858"))
blue.colors <- colorRampPalette(c("gray70","gray90","#56CCF2","#2F80ED","#0575E6"))
Load in event boundaries
# create event boundary windows
my.TR = 2.47
nTime = 193 #total number of movie TRs
boundaries <- read.csv('../../../behavioral-data/CamCan_movie_event_onsets_corrected.csv') # from Ben-Yakov & Henson (2018) - updated Nov 2020
boundaries$TR <- boundaries$Boundary/my.TR
# I am now classifying time points as in (1) or outside (0) of an event transition window,
# with a window centered on each boundary
# test the effect of window size
window_TR = 2
boundary.windows <- array(0,c(nTime))
boundaries$adjustedTR <- round(boundaries$TR + 2) #accounting for HRF lag (~5s) before rounding to nearest TR
for (b in 1:nrow(boundaries)){
min.point <- boundaries$adjustedTR[b] - window_TR
max.point <- boundaries$adjustedTR[b] + window_TR
boundary.windows[min.point:max.point] <- 1
}
### option to re-define nTime if we only want to analyze up to a certain point.
### added to check the influence of the "gun shot" event toward the end of the movie (no change to results)
#nTime = 162
boundary.windows <- boundary.windows[1:nTime]
cat('Analyzing PMN connectivity over',nTime,'TRs (',round(nTime*my.TR,2),'seconds ) of movie watching\n')
## Analyzing PMN connectivity over 193 TRs ( 476.71 seconds ) of movie watching
Mean (across voxels) time series from unsmoothed data
timeseries.file <- paste('../submission-orig/',this.group,'/PM_node_timeseries_',this.group,'.csv',sep="")
cat('Loading PMN time series from:',timeseries.file,'\n')
## Loading PMN time series from: ../submission-orig/Replication/PM_node_timeseries_Replication.csv
allData = read.csv(timeseries.file, header = TRUE)
# for testing -- analyze subset of time points
allData <- subset(allData, Time <=nTime)
allData$Subject=as.factor(allData$Subject)
allData$Node=as.factor(allData$Node)
subjects <- levels(allData$Subject)
NSubs <- length(subjects)
cat('Total Number of Subjects =',NSubs,'\n')
## Total Number of Subjects = 68
rois = levels(allData$Node)
cat('ROIs =',rois,'\n')
## ROIs = aAG MPFC pAG PCC PHC pHipp Prec RSC
# number of unique connections for 8x8 rois
nConnections <- ((length(rois)*length(rois)) - length(rois))/2
To facilitate data inspection, here is an interactive plot showing the denoised, mean time series of each ROI per subject.
Each time series has been standardized within subject and ROI.
scaled_data <- allData %>% group_by(Subject, Node) %>%
mutate(Z = scale(Value))
lim <- ceil(max(abs(scaled_data$Z)))
plot_ly(scaled_data, x = ~Time, y = ~Z, frame = ~Subject, color = ~Node,
colors = mycolors, type = "scatter", mode = "lines",
width = 900, height = 400) %>%
layout(title = "Z scored PM time series",
xaxis = list(title = "TR"),
yaxis = list(range = c(-lim,lim))) %>%
animation_opts(frame = 500, transition = 100, redraw = FALSE)
Mean standardized time series across subjects, by ROI:
summary_timeseries <- scaled_data %>% group_by(Time, Node) %>%
summarise(MeanZ = mean(Z),
SEZ = se(Z))
ggplot(summary_timeseries) +
scale_fill_manual(values = mycolors) +
scale_color_manual(values = mycolors) +
geom_hline(yintercept=0, size=1) +
geom_ribbon(alpha=0.3, aes(x=Time*my.TR, fill=Node,
ymin=MeanZ-SEZ, ymax=MeanZ+SEZ), color=NA) +
geom_line(aes(x=Time*my.TR, y=MeanZ, color=Node), size=1.5) +
scale_y_continuous(expand=c(0,0.01,0,0), limits=c(-2,2.5)) +
scale_x_continuous(expand=c(0,0,0,0)) +
labs(x="Time (seconds)", y='Mean Z') +
ggtitle('Group-averged ROI time-series') +
theme(plot.title = element_text(size = 30, face='plain', hjust=0.5),
axis.line.x = element_line(color = "black", size = 1),
axis.line.y = element_line(color = "black", size = 1),
axis.text.x = element_text(size = 20), axis.text.y = element_text(size = 18),
axis.title.y = element_text(size = 22), axis.title.x = element_text(size = 22),
panel.background = element_blank(),
legend.title = element_blank(),
legend.key.size = unit(2,"line"),
legend.margin = margin(0,0,0,0, "cm"),
plot.margin = margin(0.25, 0.25, 0.25, 0.25, "cm"))
Even though FD, and other confounds, have been removed from the above ROI time series, here’s the TR-by-TR FD for reference:
fdData = read.csv('../../../dataset-info/FD_byTR.csv', header = TRUE)
# filter by subjects in this sample:
fdData <- fdData[fdData$SubID %in% subjects,] %>%
droplevels()
fdData$SubID=as.factor(fdData$SubID)
colnames(fdData)[2] <- 'Time'
fdData <- subset(fdData, Time <= nTime)
summary_FD <- fdData %>% group_by(Time) %>%
summarise(Mean = mean(FD),
SE = se(FD))
ggplot(summary_FD) +
geom_ribbon(alpha=0.3, aes(x=Time*my.TR, ymin=Mean-SE, ymax=Mean+SE),
color=NA, fill="gray40") +
geom_line(aes(x=Time*my.TR, y=Mean), size=1.5, color="gray20") +
scale_y_continuous(expand=c(0,0,0,0), limits=c(0,.24)) +
scale_x_continuous(expand=c(0,0,0,0)) +
labs(x="Time (seconds)", y='Mean FD') +
ggtitle('Group-averged FD') +
theme(plot.title = element_text(size = 30, face='plain', hjust=0.5),
axis.line.x = element_line(color = "black", size = 1),
axis.line.y = element_line(color = "black", size = 1),
axis.text.x = element_text(size = 20), axis.text.y = element_text(size = 18),
axis.title.y = element_text(size = 22), axis.title.x = element_text(size = 22),
panel.background = element_blank(),
legend.title = element_blank(),
legend.key.size = unit(2,"line"),
legend.margin = margin(0,0,0,0, "cm"),
plot.margin = margin(0.25, 0.25, 0.25, 0.25, "cm"))
Whole-brain data was smoothed (6mm FWHM) and masked with the MNI brainmask from normalization. Calculates the top 20% (range 30-10%) of voxel connections for each ROI.
Note. Whole-brain connectivity was calculated in MATLAB, and is loaded in here as a csv file and pre-generated BrainNet Viewer images.
th_WB = 0.80 #top 20 % of connections -- for visualization
# also iterate over other roi-specific network densities to check the stability of communities
wb_iter = seq(from=.7, to=.9, by=.05) # 30% to 10% network densitiies
# read in csv with group-averaged seed-to-voxel connectivity patterns (r values)
seedtovoxel.file <- paste('../submission-orig/',this.group,'/wholebrain/group-average/group_PM_wholebrain_connectivity.csv',sep="")
cat('Loading seed-to-voxel connectivity values from:',seedtovoxel.file,'\n')
## Loading seed-to-voxel connectivity values from: ../submission-orig/Replication/wholebrain/group-average/group_PM_wholebrain_connectivity.csv
roi_wholebrain <- read.csv(seedtovoxel.file)
# order columns by my roi order (currently alphabetical):
ord = match(rois,colnames(roi_wholebrain))
roi_wholebrain <- roi_wholebrain[,ord]
# store binarized whole brain connections (from group-level averaged connectivity) per seed, per density threshold
wholebrain_th <- array(NA,c(nrow(roi_wholebrain),ncol(roi_wholebrain),length(wb_iter)))
colnames(wholebrain_th) <- rois
for (d in 1:length(wb_iter)) {
for (r in 1:length(rois)) {
th <- quantile(roi_wholebrain[,r],wb_iter[d])
wholebrain_th[roi_wholebrain[,r] >= th,r,d] <- 1
wholebrain_th[roi_wholebrain[,r] < th,r,d] <- 0
}
}
# plot pre-generated images of each roi's top 20% of connections:
plots.images = list()
for (r in 1:length(rois)) {
plots.images[[r]] <- ggdraw() +
draw_image(paste('../submission-orig/',this.group,'/wholebrain/group-average/task-movie_',rois[r],
'_R_seedtovoxel_binarized_quantile_',th_WB,'.png',sep="")) +
ggtitle(rois[r]) +
theme(plot.title = element_text(hjust = 0.5, size=20, margin=margin(0,0,5,0)),
plot.margin = margin(0, 0, 0, 0, "cm"))
}
Now, I am using the Louvain method to detect ROI clusters based on the similarity of whole-brain connectivity patterns.
gammas = seq(0.75,1.25,0.01) #default Louvain gamma = 1
group_modules <- data.frame(array(0,c(length(gammas)*length(wb_iter),length(rois))))
colnames(group_modules) <- rois
n = 0
# Calculate Louvain modules over densities...
for (d in 1:length(wb_iter)) {
# correlate wholebrain seed-to-voxel connectivity patterns between rois
net <- cor(wholebrain_th[,,d])
# ... and gammas
for (g in gammas) {
n = n+1
modules <- louvain(net,g)$community
group_modules[n,] <- modules
}
} # end of loop over densities
# calculate % of the time each ROI is grouped with every other (across all density + gamma levels):
roi_modules <- array(0,c(length(rois),length(rois)))
colnames(roi_modules) <- rois
rownames(roi_modules) <- rois
for (x in 1:length(rois)) {
x_vec <- group_modules[,colnames(group_modules) == rois[x]]
for (y in 1:length(rois)) {
y_vec <- group_modules[,colnames(group_modules) == rois[y]]
xy <- x_vec == y_vec
roi_modules[x,y] <- (sum(xy)/length(x_vec))*100
}
}
### plot ------
my.modules <- as.vector(group_modules[30,]) # manually selecting the best parcellation after visual inspection of clustering
n.mod <- max(unique(my.modules))
########## difference between discovery and replication markdowns #########
if (this.group == "Discovery") {
# sort by communities
ord = c()
for (n in 1:n.mod) {
idx <- which(my.modules == n)
#hclust within module in case any more fine-grained module groupings
h <- hclust(as.dist(100-roi_modules[idx,idx]))
ord <- c(ord, idx[h$order])
}
# save this roi order for use with replication ample
save(ord, file='roi_order.RData')
} else if (this.group == "Replication") {
# load in ROI order from discovery sample to keep visualization consistent
load('roi_order.RData')
}
############################################################################
# ***** re-order by community ******* #
roi_modules <- roi_modules[ord,ord]
my.modules <- my.modules[ord]
allData$Node <- factor(allData$Node,levels(allData$Node)[ord])
mycolors <- mycolors[ord]
rois <- rois[ord]
# *********************************** #
# plot module matrix
data <- melt(roi_modules)
# remove diagonal
data <- data[data$Var1 != data$Var2,]
p2 <- ggplot(data, aes(Var1, Var2, fill=value)) +
geom_tile(color="white", size=.5) +
geom_text(aes(label = round(value)), size = 4, color="white") +
scale_fill_gradientn(limits=c(40,100), colors = gray.colors(12), na.value = "white",
guide = guide_colorbar(frame.colour = "black")) +
geom_rect(xmin=.5, xmax=length(rois)+.5, ymin=.5, ymax=length(rois)+.5,
fill=NA, color="black",size=1) +
geom_segment(aes(x = 0.5, y = 0.5, xend = 5.5, yend = 0.5), color = module.colors[1], size=4) +
geom_segment(aes(x = 5.5, y = 0.5, xend = 8.5, yend = 0.5), color = module.colors[2], size=4) +
geom_segment(aes(x = 0.5, y = 0.5, xend = 0.5, yend = 5.5), color = module.colors[1], size=4) +
geom_segment(aes(x = 0.5, y = 5.5, xend = 0.5, yend = 8.5), color = module.colors[2], size=4) +
labs(fill="% Same\nModule", tag="b") +
ggtitle('Community probability')+
theme(plot.title = element_text(hjust = 0.5, size=24, margin=margin(0,0,20,0), face="plain"),
axis.text.x = element_text(size = 16, color = mycolors, hjust=1, vjust=1, angle=45),
axis.text.y = element_text(size = 16, color = mycolors),
axis.ticks.x = element_blank(),axis.ticks.y = element_blank(),
axis.line.x = element_blank(),axis.line.y = element_blank(),
axis.title.x = element_blank(), axis.title.y = element_blank(),
legend.title = element_text(size=14), legend.text = element_text(size=12),
legend.key.width = unit(0.4,"cm"), legend.key.height = unit(0.8,"cm"),
plot.margin = margin(0, 0, 0, 0, "cm"),
plot.tag = element_text(size = 34, face = "bold"))
# add images showing overlap of connections by submodule
p3 <- ggdraw() +
draw_image(paste('../submission-orig/',this.group,'/wholebrain/group-average/task-movie_quantile_',th_WB,'_sum_DorsalPM.png',sep="")) +
ggtitle('"Dorsal PM": 20% density') + labs(tag="c") +
geom_label(aes(x=0.5, y=0.065, label="Connected Regions"),
size=4.5, color="black", fill="white", label.size=0) +
geom_label(aes(x=0.5, y=0.115, label="0 5"),
size=4, color="black", fill=NA, label.size=0) +
theme(plot.title = element_text(hjust = 0.5, size=24, margin=margin(0,0,10,0)),
plot.tag = element_text(size = 34, face="bold"),
plot.margin = margin(0, 0, 0, 0, "cm"))
p4 <- ggdraw() +
draw_image(paste('../submission-orig/',this.group,'/wholebrain/group-average/task-movie_quantile_',th_WB,'_sum_VentralPM.png',sep="")) +
ggtitle('"Ventral PM": 20% density') + labs(tag="d") +
geom_label(aes(x=0.5, y=0.065, label="Connected Regions"),
size=4.5, color="black", fill="white", label.size=0) +
geom_label(aes(x=0.5, y=0.115, label="0 3"),
size=4, color="black", fill=NA, label.size=0) +
theme(plot.title = element_text(hjust = 0.5, size=24, margin=margin(0,0,10,0)),
plot.tag = element_text(size = 34, face="bold"),
plot.margin = margin(0, 0, 0, 0, "cm"))
# re-order panel A by module assignments and group:
p1 <- ggarrange(plotlist=plots.images[ord], ncol = 8, nrow = 1) +
labs(tag="a") + theme(plot.tag = element_text(size = 34, face="bold"),
plot.margin = margin(0, 0, 0, 0, "cm"))
###### combine whole brain plots
p <- ggarrange(p2,p3,p4, ncol = 3, nrow = 1, widths=c(1.05,1,1))
myPlot <- ggarrange(p1,p, ncol = 1, nrow = 2, heights=c(0.75,1))
plot(myPlot)
## save
ggsave(paste('./',this.group,'/figures/seed-to-voxel_',this.group,'.pdf',sep=""), plot = myPlot, device = "pdf", width=15, height=8, unit="in")
From now on, ROIs are sorted into the above modules – ‘Ventral PM’ and ‘Dorsal PM’.
Pearson’s correlation between the time-series of PMM regions, per subject:
## create full pearson's correlation connectivity matrix --> pearson's r
connMatrix = array(data = 0, c(length(rois),length(rois),NSubs))
colnames(connMatrix) <- rois
rownames(connMatrix) <- rois
for (s in 1:NSubs) {
subData <- subset(allData, Subject == subjects[s])
# from long to wide format
subMatrix <- spread(subData, Node, Value)
# correlations between ROI time series
connMatrix[,,s] <- cor(subMatrix[,match(rois,colnames(subMatrix))])
diag(connMatrix[,,s]) <- 0
} #end of loop through subjects
# store
node.bivariate <- connMatrix
### calculate mean for each connection across subjects, applying fisher z transformation before averaging,
### with group-averaged values converted back to r
node.bivariate.mean <- fisherz2r(apply(fisherz(node.bivariate), c(1,2), mean))
Functional connections as a graph - r > .2 (dark = top 50%):
scale.factor <- 5 #just to scale edge weights for visualization, no impact on stats
c.th = .2
###########
net <- node.bivariate.mean
net[net < c.th] = 0 #remove low connections
c.upper = median(net[abs(net)>0]) #dark for top 50% of edges
#plot network, highlighting strong connections
net <- net*scale.factor
net <- network(net, directed = FALSE, ignore.eval = FALSE, names.eval = "weights")
network::set.edge.attribute(net, "color",
ifelse(net %e% "weights" > (c.upper*scale.factor), "gray30", "gray70"))
network.vertex.names(net) = rois
p <- suppressMessages(ggnet2(net, mode = "circle",
node.size = 11, node.color = mycolors,
label=TRUE, label.size=4.5,
edge.size = "weights", edge.color = "color",
layout.exp = 0.05) +
scale_y_continuous(expand=c(.05,.05,.05,.05)) +
scale_x_continuous(expand=c(.05,.05,.05,.05)) +
theme(plot.title = element_text(hjust = 0.5, size=28, margin=margin(0,0,20,0), face="plain"),
plot.margin = margin(0.2, 0.8, 0.2, 0.2, "cm"),
axis.line.x = element_blank(), axis.line.y = element_blank(),
axis.text.y = element_blank(), axis.ticks.y = element_blank(),
axis.text.x = element_blank(), axis.ticks.x = element_blank()))
p <- ggarrange(p) +
labs(tag="a") +
theme(plot.tag = element_text(size = 34, face="bold"),
plot.margin = margin(0.2, 0.2, 0, 0.2, "cm")) +
geom_text(aes(x=0.86, y=0.15), size=6, color = "gray70",
label=paste('r > ',c.th,sep=""), fontface = "bold.italic") +
geom_text(aes(x=0.88, y=0.08), size=6, color = "gray30",
label='top 50%', fontface = "bold.italic")
Average functional connectivity within and between PMN subsystems:
# labels connections by module:
data.mod <- melt(node.bivariate) #Var3 = Subject index
data.mod <- data.mod[data.mod$value != 0,] # remove diagonal
data.mod$Module = ''
data.mod$Module[data.mod$Var1 %in% rois[my.modules==1] & data.mod$Var2 %in% rois[my.modules==1]] <- 'Dorsal'
data.mod$Module[data.mod$Var1 %in% rois[my.modules==2] & data.mod$Var2 %in% rois[my.modules==2]] <- 'Ventral'
data.mod$Module[(data.mod$Var1 %in% rois[my.modules==1] & data.mod$Var2 %in% rois[my.modules==2]) |
(data.mod$Var1 %in% rois[my.modules==2] & data.mod$Var2 %in% rois[my.modules==1])] <- 'Ventral-Dorsal'
# within-subject mean module connectivity (fisher z transformed)
data.summary.subject <- data.frame(
data.mod %>%
group_by(Var3, Module) %>%
summarise(Strength = mean(fisherz(value)))
)
# plot subject-level averages
p1 <- ggplot(data.summary.subject, aes(x=Module, y=fisherz2r(Strength), fill=Module)) +
scale_fill_manual(values = module.colors) +
scale_color_manual(values = module.colors) +
geom_point(aes(color=Module), size = 1.5, position=position_jitter(width = .15), alpha=.6) +
geom_boxplot(size=1, width=.5, color="black", outlier.color=NA, alpha=.4) +
geom_hline(yintercept = 0, size = 1) +
scale_y_continuous(expand = c(0,0.01,0,0.01), limits=c(-.25,.77)) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 7)) +
labs(y='Pearson R', tag="b") +
theme(axis.line.x = element_line(color = "black", size = 1), axis.line.y = element_line(color = "black", size = 1),
axis.text.x = element_text(size = 18),
axis.text.y = element_text(size = 18),
axis.title.y = element_text(size = 20), axis.title.x = element_blank(),
panel.background = element_blank(),
legend.position="none",
plot.tag = element_text(size = 34, face="bold"),
plot.margin = margin(0.2, 0.2, 0, 0.2, "cm"))
# annotage with significance (manual)
p1 <- p1 + geom_segment(aes(x = 1.05, y = 0.65, xend = 1.85, yend = 0.65),size=0.5) +
geom_text(aes(x=1.45, y=0.66), size=10, label='*') +
geom_segment(aes(x = 1.05, y = 0.72, xend = 2.95, yend = 0.72),size=0.5) +
geom_text(aes(x=2, y=0.73), size=10, label='*') +
geom_segment(aes(x = 2.15, y = 0.65, xend = 2.95, yend = 0.65),size=0.5) +
geom_text(aes(x=2.55, y=0.66), size=10, label='*')
#### means and CIs per module
data.summary.group <- data.summary.subject %>% group_by(Module) %>%
summarise(Mean = mean(Strength),
CI = ci(Strength))
kable(data.summary.group, format = "pandoc",
caption = "Mean functional connectivity (z) within and between PM modules")
Module | Mean | CI |
---|---|---|
Dorsal | 0.3166850 | 0.0241432 |
Ventral | 0.3740555 | 0.0295989 |
Ventral-Dorsal | 0.2658060 | 0.0210117 |
#### t-tests to validate whole-brain defined modules in terms of bivariate connectivity
# runs on each pair of modules
ts <- data.summary.subject %>%
pairwise_t_test(
Strength ~ Module, paired = TRUE,
p.adjust.method = "none",
detailed=T
)
kable(ts, format = "pandoc",
caption = "Functional connectivity differences within and between PM modules")
estimate | .y. | group1 | group2 | n1 | n2 | statistic | p | df | conf.low | conf.high | method | alternative | p.adj | p.adj.signif |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
-0.0573705 | Strength | Dorsal | Ventral | 68 | 68 | -2.823081 | 0.006000 | 67 | -0.0979332 | -0.0168077 | T-test | two.sided | 0.006000 | ** |
0.0508790 | Strength | Dorsal | Ventral-Dorsal | 68 | 68 | 4.069440 | 0.000127 | 67 | 0.0259235 | 0.0758345 | T-test | two.sided | 0.000127 | *** |
0.1082495 | Strength | Ventral | Ventral-Dorsal | 68 | 68 | 6.823739 | 0.000000 | 67 | 0.0765855 | 0.1399135 | T-test | two.sided | 0.000000 | **** |
### save out time-averaged module connectivity for episodic memory individual differences analyses
data.summary.subject$Var3 <- subjects[data.summary.subject$Var3] # add in actual subject IDs from indexes
timeaveraged.file <- paste('./',this.group,'/module-connectivity_',this.group,'.RData',sep="")
cat('\nSaving time-averaged module connectivity to:',timeaveraged.file,'\n')
##
## Saving time-averaged module connectivity to: ./Replication/module-connectivity_Replication.RData
save(data.summary.subject, file=timeaveraged.file)
Functional connectivity per ROI to each subsystem – shows ROIs that separate most are Precuneus and PHC:
# labels connections by module:
data.mod <- melt(node.bivariate) #Var3 = Subject index
data.mod <- data.mod[data.mod$value != 0,] # remove diagonal
data.mod$Network = ''
# group according to first ROI column:
data.mod$Network[data.mod$Var1 %in% rois[my.modules==1]] <- 'Dorsal'
data.mod$Network[data.mod$Var1 %in% rois[my.modules==2]] <- 'Ventral'
# within-subject mean module and roi connectivity (fisher z transformed)
data.summary.subject <- data.frame(
data.mod %>%
group_by(Var3, Var2, Network) %>%
summarise(Strength = mean(fisherz(value)))
)
#### means and CIs per module/ROI
data.summary.group <- data.summary.subject %>% group_by(Var2, Network) %>%
summarise(Mean = mean(Strength),
CI = ci(Strength))
kable(data.summary.group, format = "pandoc",
caption = "Mean functional connectivity (z) per ROI and subsystem")
Var2 | Network | Mean | CI |
---|---|---|---|
MPFC | Dorsal | 0.2979579 | 0.0281358 |
MPFC | Ventral | 0.2217346 | 0.0315411 |
pHipp | Dorsal | 0.2023746 | 0.0273014 |
pHipp | Ventral | 0.1879981 | 0.0270038 |
Prec | Dorsal | 0.3281500 | 0.0329246 |
Prec | Ventral | 0.2076464 | 0.0359380 |
aAG | Dorsal | 0.3505826 | 0.0330113 |
aAG | Ventral | 0.3643573 | 0.0389410 |
PCC | Dorsal | 0.4043598 | 0.0333497 |
PCC | Ventral | 0.3472934 | 0.0402385 |
RSC | Dorsal | 0.3575941 | 0.0260162 |
RSC | Ventral | 0.4048205 | 0.0352368 |
pAG | Dorsal | 0.2752690 | 0.0439675 |
pAG | Ventral | 0.3613792 | 0.0348839 |
PHC | Dorsal | 0.1645549 | 0.0251211 |
PHC | Ventral | 0.3559666 | 0.0318774 |
#### t-tests to validate whole-brain defined modules in terms of bivariate connectivity
# runs on each pair of modules
ts <- data.summary.subject %>%
group_by(Var2) %>%
rstatix::t_test(
Strength ~ Network, paired = TRUE,
detailed=T
)
ts$sig <- ''
ts$sig[ts$p < .05] = '*'
kable(ts, format = "pandoc",
caption = "Functional connectivity differences by ROI")
Var2 | estimate | .y. | group1 | group2 | n1 | n2 | statistic | p | df | conf.low | conf.high | method | alternative | sig |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
MPFC | 0.0762233 | Strength | Dorsal | Ventral | 68 | 68 | 4.7934756 | 0.0000095 | 67 | 0.0444838 | 0.1079628 | T-test | two.sided | * |
pHipp | 0.0143765 | Strength | Dorsal | Ventral | 68 | 68 | 0.9833469 | 0.3290000 | 67 | -0.0148051 | 0.0435581 | T-test | two.sided | |
Prec | 0.1205036 | Strength | Dorsal | Ventral | 68 | 68 | 7.3369342 | 0.0000000 | 67 | 0.0877207 | 0.1532865 | T-test | two.sided | * |
aAG | -0.0137747 | Strength | Dorsal | Ventral | 68 | 68 | -0.5928894 | 0.5550000 | 67 | -0.0601483 | 0.0325989 | T-test | two.sided | |
PCC | 0.0570664 | Strength | Dorsal | Ventral | 68 | 68 | 2.2825140 | 0.0256000 | 67 | 0.0071631 | 0.1069697 | T-test | two.sided | * |
RSC | -0.0472264 | Strength | Dorsal | Ventral | 68 | 68 | -2.2163390 | 0.0301000 | 67 | -0.0897579 | -0.0046949 | T-test | two.sided | * |
pAG | -0.0861102 | Strength | Dorsal | Ventral | 68 | 68 | -3.5397762 | 0.0007330 | 67 | -0.1346661 | -0.0375544 | T-test | two.sided | * |
PHC | -0.1914118 | Strength | Dorsal | Ventral | 68 | 68 | -10.7322079 | 0.0000000 | 67 | -0.2270111 | -0.1558124 | T-test | two.sided | * |
# plot subject-level averages
p2 <- ggplot(data.summary.subject, aes(x=Var2, y=fisherz2r(Strength))) +
scale_fill_manual(values = module.colors) +
scale_color_manual(values = module.colors) +
geom_point(aes(color=Network, fill=Network), size = 1,
position=position_jitterdodge(jitter.width = .15, dodge.width=.75),
alpha=.6) +
geom_boxplot(aes(fill=Network), size=1, width=.5, alpha=.4,
outlier.color=NA, color="black",
position=position_dodge(.75)) +
geom_text(data=ts, aes(y=0.7, label=sig), size=12) +
geom_hline(yintercept = 0, size = 1) +
scale_y_continuous(expand = c(0,0.01,0,0.01), limits=c(-.3,.77)) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 7)) +
labs(y='Pearson R', tag="c") +
theme(axis.line.x = element_line(color = "black", size = 1), axis.line.y = element_line(color = "black", size = 1),
axis.text.y = element_text(size = 18),
axis.text.x = element_text(size = 20, color = mycolors),
axis.title.y = element_text(size = 20), axis.title.x = element_blank(),
panel.background = element_blank(),
legend.position="top", legend.title = element_blank(),
legend.text = element_text(size=18),
legend.key.width = unit(1,"cm"), legend.key.height = unit(1,"cm"),
plot.tag = element_text(size = 34, face="bold"),
plot.margin = margin(0, 0.5, 0, 0.2, "cm"))
Combine plots:
myPlotA <- ggarrange(p,p1, ncol = 2, widths=c(1,1))
myPlot <- ggarrange(myPlotA, p2, nrow=2, heights=c(1,1))
# add annotation for network groupings
lineA = ggplot() + geom_segment(aes(x = 0, y = 0, xend = 1, yend = 0),
color = module.colors[1], size=3) + theme_blank() +
theme(plot.margin = margin(0, 0, 0, 3.5, "cm"),
panel.background = element_rect(fill = "transparent"),
plot.background = element_rect(fill = "transparent", color = NA))
lineB = ggplot() + geom_segment(aes(x = 0, y = 0, xend = 1, yend = 0),
color = module.colors[2], size=3) + theme_blank() +
theme(plot.margin = margin(0, 1, 0, 0, "cm"),
panel.background = element_rect(fill = "transparent"),
plot.background = element_rect(fill = "transparent", color = NA))
lines = ggarrange(lineA, lineB, ncol=2, widths= c(2.05,1))
myPlot <- ggarrange(myPlot, lines, nrow=2, heights=c(18,1))
plot(myPlot)
## save as Rdata for merging with other group
save(myPlot, file=paste('./',this.group,'/figures/intranetwork_',this.group,'.RData',sep=""))
Here, I’m testing the mediating influence of each node on other connections within the network – if we partial out (remove) each node in turn rather than all at once (partial network, above), how do the other connections in the network change?
For each node, I’m generating a circular graph without that node and it’s connections. The width of each edge represents the strength of the original connection (c in a mediation model). The color of the edge (from gray to blue) represents how much that connection has decreased after ‘lesioning’ a node (Pm = 1-[c’/c], where c’ is the partial correlation in a mediation model). [Pm = proportion mediated]
### group-level bivariate mask --
### only plot connections originally > threshold for visualization
### (to make sure there is something meaningful to mediate)
b.mask <- node.bivariate.mean
b.mask[b.mask < c.th] <- 0
b.mask[b.mask > 0] <- 1
diag(b.mask) <- 0
# color gradient of edge colors to show % connection strength mediated
edge.bins <- seq(.1,1,by=.1) # increments of Pm
edge.colors <- blue.colors(length(edge.bins))
# ---------------------------------------------------------------------#
# to store summary network Pm stats:
roi_effect = data.frame(array(0,c(length(rois)*NSubs,3)))
colnames(roi_effect) = c('SubID','Node','Influence')
row = 0
# to store partial networks after removing RSC and PCC, to illustrate strongest effects
lesion.examples <- array(data = NA, c(length(rois),length(rois),2))
ex = 0
plots.graphs = list()
for (p in 1:length(rois)) {
#for full bivariate (ignoring this node's connections)
connMatrix.c = array(data = NA, c(length(rois),length(rois),NSubs))
colnames(connMatrix.c) <- rois
rownames(connMatrix.c) <- rois
#for partial (removing mediating influence of this roi)
connMatrix.p <- connMatrix.c
for (s in 1:length(subjects)) {
subData <- subset(allData, Subject == subjects[s])
#time-series for this node (mediator)
pData <- subData$Value[subData$Node == rois[p]]
for (y in 1:length(rois)) {
#time-series for ROIy
yData = subData$Value[subData$Node == rois[y]]
for (x in 1:length(rois)) {
# time-series for ROIx
xData = subData$Value[subData$Node == rois[x]]
if ((y != p) & (x != p) & (x != y)) {
# partial correlation between x and y when controlling for p
connMatrix.p[y,x,s] <- pcor(cbind(xData,yData,pData))$estimate["xData","yData"]
# original, bivariate correlation between x and y
connMatrix.c[y,x,s] <- cor(xData,yData)
}
}
}
# per subject, what is the % of variance in all other connections accounted for?
# (% change from mean original PMN correlation to mean partial)
row = row + 1
roi_effect$SubID[row] <- s #subject index
roi_effect$Node[row] <- rois[p]
p.conn <- fisherz2r(mean(fisherz(connMatrix.p[,,s]), na.rm = TRUE)) # average partial correlation (c')
c.conn <- fisherz2r(mean(fisherz(connMatrix.c[,,s]), na.rm = TRUE)) # average original correlation (c)
Pm <- 1 - (p.conn/c.conn) # proportion mediated
if (Pm < 0) { #mask any small "supressing" effects where average p.conn is actually larger than average c.conn
Pm <- 0
} else if (Pm > 1) { #if the average p.conn is now negative
Pm <- 1
}
roi_effect$Influence[row] <- Pm
} #end of loop through subjects
### calculate group-level Pm for each PMN connection ----------------------- #
connmean.p <- fisherz2r(apply(fisherz(connMatrix.p), c(1,2), mean))
## store partial network for example plot if PCC or RSC
if (rois[p] == "PCC" || rois[p] == "RSC") {
ex = ex + 1
lesion.examples[,,ex] <- connmean.p
}
connmean.p[b.mask == 0] <- NA #ignore connections that weren't present above our threshold (needs to be something to reduce)
connmean.c <- fisherz2r(apply(fisherz(connMatrix.c), c(1,2), mean))
connmean.c[b.mask == 0] <- NA
# pm for each connection
connmean.r <- 1 - (connmean.p/connmean.c)
connmean.r[connmean.r < 0] <- 0.001 #mask for edges where there is a small increase for partial cor ("supressing" effects)
# ^^ setting non-zero just so it gets recognized as an edge below
connmean.r[connmean.r > 1] <- 1 #where p.conn is now negative, all variance in c.conn is explained.
##### PLOT ---------------------------------------------------------------- #
#specify initial network of variance explained (Pm) to get edge colors
net <- connmean.r
net[is.na(net)] <- 0
net <- network(net, directed = FALSE, ignore.eval = FALSE, names.eval = "weights")
# set gradient color for % reduction per edge
my.edges <- network::get.edge.value(net, "weights")
edge.colors.sort <- array('',c(length(my.edges)))
for (e in 1:length(my.edges)) {
diff <- edge.bins - my.edges[e]
diff[diff < 0] = 2 #set above max to get next lowest bin
edge.colors.sort[e] <- edge.colors[diff == min(diff)]
}
# if it's the first mediator ROI, get a color scale to later add to the plot:
if (p == 1) {
net.df <- ggnetwork(net, layout="circle")
edge.p <- ggplot(net.df, aes(x = x, y = y, xend = xend, yend = yend,
color=weights)) +
geom_edges() +
scale_color_gradientn(limits = c(min(edge.bins),max(edge.bins)),
colors = edge.colors, na.value = "white",
guide = guide_colorbar(frame.colour = "black")) +
labs(color="Proportion\nmediated") +
theme(legend.text = element_text(size=16),
legend.title = element_text(size=18, margin = margin(0,0,5,0)),
legend.key.width = unit(0.4,"cm"), legend.key.height = unit(1,"cm"),
plot.margin = margin(0, 0.25, 0, 0, "cm"))
edge.legend <- as_ggplot(get_legend(edge.p))
}
#now replace network with the original bivariate connections (for edge width)
net <- connmean.c*6 #scale just for visualization purposes
net[is.na(net)] <- 0
net <- network(net, directed = FALSE, ignore.eval = FALSE, names.eval = "weights")
# draw graph
# make current node white so not visible (removed statistically)
lesion.colors <- mycolors
lesion.colors[p] <- "white" #remove current mediator node
network.vertex.names(net) = rois
plots.graphs[[p]] <- suppressMessages(ggnet2(net, mode = "circle",
node.size = 10, node.color = lesion.colors,
label=FALSE,
edge.size = "weights", edge.color = edge.colors.sort,
layout.exp = 0.05) +
scale_y_continuous(expand=c(.05,.05,.05,.05)) +
scale_x_continuous(expand=c(.05,.05,.05,.05)) +
theme(plot.margin = margin(0.25, 0.5, 1, 0.5, "cm"),
plot.title = element_text(size = 28, margin=margin(5,0,10,0), face="plain", hjust=0),
axis.line.x = element_blank(), axis.line.y = element_blank(),
axis.text.y = element_blank(), axis.ticks.y = element_blank(),
axis.text.x = element_blank(), axis.ticks.x = element_blank()) +
ggtitle(rois[p]) + border(size = 1.5, linetype = 1))
} # end of loop through mediator rois ---------------------------------------------- #
Proportion of network connectivity mediated, by ROI:
## Add summary node influence bar graph ---------------------------------------
roi_effect$Node <- as.factor(roi_effect$Node)
roi_effect$Node <- factor(roi_effect$Node,levels(roi_effect$Node)[ord])
roi_effect.summary <- roi_effect %>%
group_by(Node) %>%
summarise(Strength = mean(Influence), CI = ci(Influence))
# now reorder rois (and their colors) by mean mediating effect for these plots
med.ord <- order(roi_effect.summary$Strength)
med.rois <- rois[med.ord]
med.colors <- mycolors[med.ord]
roi_effect$Node <- factor(roi_effect$Node,levels(roi_effect$Node)[med.ord])
### plot average influence per roi:
p1 <- ggplot(roi_effect, aes(x=Node, y=Influence, color=Node, fill=Node)) +
scale_fill_manual(values = med.colors) +
scale_color_manual(values = med.colors) +
geom_point(size = 2, position=position_jitter(width = .1), alpha=.6) +
geom_boxplot(size=1, width=.5, color="black", outlier.color=NA, alpha=.4) +
scale_y_continuous(expand = c(0,0,0,0), limits=c(-.01,1.01)) +
labs(y='Network Pm', tag="c") +
theme(axis.line.x = element_line(color = "black", size = 1),
axis.line.y = element_line(color = "black", size = 1),
axis.text.x = element_text(size = 22, hjust=1, vjust=1, angle=45),
axis.text.y = element_text(size = 20),
axis.title.y = element_text(size = 22), axis.title.x = element_blank(),
panel.background = element_blank(),
legend.position="none",
plot.tag = element_text(size = 40, face="bold"),
plot.margin = margin(0.25, 0.5, 0.25, 0, "cm"))
Illustrate the resulting network (thresholded at r > .2) if you remove RSC or PCC (largest Pm):
lesion.examples[is.na(lesion.examples)] <- 0
lesion.examples[lesion.examples < c.th] <- 0 #remove low correlations
### RSC
net <- lesion.examples[rois!="RSC",rois!="RSC",2]
net <- network(net, directed = FALSE, ignore.eval = FALSE, names.eval = "weights")
# draw graph
network.vertex.names(net) = rois[rois!="RSC"]
p.lesionA <- suppressMessages(ggnet2(net,
node.size = 8, node.color = mycolors[rois!="RSC"],
label=FALSE,
edge.size = 2, edge.color = "gray70",
layout.exp = 0.1) +
scale_y_continuous(expand=c(.05,.05,.05,.05)) +
theme(plot.margin = margin(0, 0.5, 0, 3, "cm"),
axis.text.y = element_blank(), axis.ticks.y = element_blank()) +
border(size = 2, linetype = 1, color=mycolors[rois=="RSC"]))
### PCC
net <- lesion.examples[rois!="PCC",rois!="PCC",1]
net <- network(net, directed = FALSE, ignore.eval = FALSE, names.eval = "weights")
# draw graph
network.vertex.names(net) = rois[rois!="PCC"]
p.lesionB <- suppressMessages(ggnet2(net,
node.size = 8, node.color = mycolors[rois!="PCC"],
label=FALSE,
edge.size = 2, edge.color = "gray70",
layout.exp = 0.1) +
scale_y_continuous(expand=c(.05,.05,.05,.05)) +
theme(plot.margin = margin(0, 1, 0, 0.5, "cm"),
axis.text.y = element_blank(), axis.ticks.y = element_blank()) +
border(size = 2, linetype = 1, color=mycolors[rois=="PCC"]))
# join
p.lesion <- ggarrange(p.lesionA,p.lesionB, ncol=2, widths=c(1.32,1)) +
labs(tag="b") + theme(plot.tag = element_text(size = 40, face="bold"))
# order and plot graph panels:
myPlot <- ggarrange(plotlist=plots.graphs[med.ord], ncol = 4, nrow = 2) +
labs(tag="a") + theme(plot.tag = element_text(size = 40, face="bold"))
# add lesion examples over summary plot
myPlotB <- ggarrange(p.lesion, p1, nrow=2, heights=c(2,5))
# join! (with edge color legend)
joined.Plot <- ggarrange(myPlot, edge.legend, myPlotB, ncol=3, nrow=1, widths=c(2,0.25,1))
plot(joined.Plot)
## save as Rdata for merging with other group
save(joined.Plot, file=paste('./',this.group,'/figures/virtual-lesion_',this.group,'.RData',sep=""))