Overview

Human connectome project data.

Preprocessing steps involved ( at this instructional level ) for each subject:

  1. preprocess fmri
    • distortion correction
    • average bold
    • motion correction
    • rigid map to t1
  2. preprocess t1
    • crude brain extraction
    • segmentation
    • map to template
  3. extract relevant networks
    • default mode
    • motor
  4. perform a joint structural / functional registration between these subjects.

IO and modalities

rdir = "/Users/stnava/code/structuralFunctionalJointRegistration/"
if ( ! exists("id") ) id = '3026'
if ( ! exists("id") ) id = '2001'
# collect image data
# data/LS2001/unprocessed/3T/T1w_MPR1/LS2001_3T_T1w_MPR1_gdc.nii.gz
t1fn = paste0( rdir, 'data/LS',id, "/unprocessed/3T/T1w_MPR1/LS",id,"_3T_T1w_MPR1_gdc.nii.gz")
# now the bold data
boldfnsL = Sys.glob( paste0( rdir, 'data/LS',id, "/LS", id, "fmri/unprocessed/3T/rfMRI_REST1_*/*REST1_LR_gdc.nii.gz" ) )
boldfnsR = Sys.glob( paste0( rdir, 'data/LS',id, "/LS", id, "fmri/unprocessed/3T/rfMRI_REST1_*/*REST1_RL_gdc.nii.gz" ) )
# get the ref data
reffns = Sys.glob( paste0( rdir, 'data/LS',id, "/LS", id, "fmri/unprocessed/3T/rfMRI_REST1_*/*SBRef_gdc.nii.gz" ) )

Distortion correction (without a field map)

Let us first “undistort”

if ( ! exists( "und" ) ) {
  i1 = antsImageRead( reffns[1] )
  i2 = antsImageRead( reffns[2] )
  und = buildTemplate( i1, list( i1, i2 ), "SyN" )
  } 
t1 = antsImageRead( t1fn ) %>% n3BiasFieldCorrection( 8 ) %>%
  n3BiasFieldCorrection( 4 )
bmask = getMask( und, lowThresh = mean( und ) * 0.75, Inf, 3 ) %>% iMath("FillHoles")
# this is a fragile approach - not really recommended - but it is quick
t1mask = getMask( t1, lowThresh = mean( t1 ) * 1.1, Inf, 5 ) %>% iMath("FillHoles")
t1rig = antsRegistration( und * bmask, t1 * t1mask, "BOLDRigid" )
t1reg = antsRegistration( und * bmask, t1 * t1mask, "SyNOnly", initialTransform = t1rig$fwd, 
          synMetric = 'CC', synSampling = 2, regIterations = c(5) )
###########################

These are the original LR and RL images.

print(id)
## [1] "2001"
plot( i1,  axis = 3 )

## NULL
plot( i2,  axis = 3 )

## NULL

This is the result of mapping them together with an averaging transform.

plot( und,  axis = 3 )

## NULL

The distortion to the T1 is greatly reduced.

Brain masking

Use the BOLD mask to extract the brain from the t1 (for expedience, usually we would have run antsCorticalThickness already.)

t1maskFromBold = antsApplyTransforms( t1, bmask, t1reg$invtransforms, 
                                      interpolator = 'nearestNeighbor' )
t1 = n4BiasFieldCorrection( t1, t1mask, 8 ) %>%
  n4BiasFieldCorrection( t1mask, 4 )
bmask =  antsApplyTransforms( und, t1mask, t1reg$fwdtransforms, 
  interpolator = 'nearestNeighbor' ) %>% morphology("close",3)
ofn = paste0( rdir, "features/LS", id, '_mask.nii.gz' )
antsImageWrite( bmask, ofn )
t1toBOLD = antsApplyTransforms( und, t1, t1reg$fwdtransforms )
ofn = paste0( rdir, "features/LS", id, '_t1ToBold.nii.gz' )
antsImageWrite( t1toBOLD, ofn )
plot( t1, t1mask, alpha = 0.5, axis = 3 )

## NULL
############################

here we might repeat the registration process – if we trust the masks

Exercise: Try it yourself.

Tissue segmentation

Now we can segment the T1.

# a simple method
################################################
if ( ! exists( "t1seg" ) ) {
  qt1 = iMath( t1, "TruncateIntensity", 0, 0.95 )
  t1seg = kmeansSegmentation( qt1, 3, t1mask, 0.2 ) 
  volumes = labelStats( t1seg$segmentation, t1seg$segmentation )
  rownames( volumes ) = c("background",'csf', 'gm', 'wm' )
  volumes$NormVolume = volumes$Volume / sum( volumes$Volume[-1])
  pander::pander( volumes[ , c("LabelValue","Volume","NormVolume")] )

  # if we look and realize this is not good - fix & redo - caused by local hyperintensity
  if ( volumes["wm","NormVolume"] < 0.2 ) {
    t1mask2 = t1mask * thresholdImage( t1seg$segmentation, 1, 2 )
    t1seg = kmeansSegmentation( t1, 3, t1mask2, 0.1 ) 
    }
}
plot( t1, t1seg$segmentation, axis = 3, window.overlay=c(0,3) )

## NULL
boldseg = antsApplyTransforms( und, t1seg$segmentation, t1reg$fwdtransforms, 
                               interpolator = 'nearestNeighbor' )
plot(und,boldseg,axis=3,alpha=0.5)

## NULL
########################################

Template mapping

  • include prior information e.g. from meta-analysis or anatomy
if ( ! exists( "treg" ) ) {
  data( "powers_areal_mni_itk" )
  myvoxes = 1:nrow( powers_areal_mni_itk )
  anat = powers_areal_mni_itk$Anatomy[myvoxes]
  syst = powers_areal_mni_itk$SystemName[myvoxes]
  Brod = powers_areal_mni_itk$Brodmann[myvoxes]
  xAAL  = powers_areal_mni_itk$AAL[myvoxes]
  if ( ! exists( "ch2" ) )
    ch2 = antsImageRead( getANTsRData( "ch2" ) )
  treg = antsRegistration( t1 * t1mask, ch2, 'SyN' )
}
concatx2 = c( treg$invtransforms, t1reg$invtransforms )
pts2bold = antsApplyTransformsToPoints( 3, powers_areal_mni_itk, concatx2, 
                                        whichtoinvert = c( T, F, T, F ) )
## Warning in data.matrix(points): NAs introduced by coercion

## Warning in data.matrix(points): NAs introduced by coercion

## Warning in data.matrix(points): NAs introduced by coercion

## Warning in data.matrix(points): NAs introduced by coercion

## Warning in data.matrix(points): NAs introduced by coercion
ptImg = makePointsImage( pts2bold, bmask, radius = 3 )
plot( und, ptImg, axis=3, window.overlay = range( ptImg ) )

## NULL
bold2ch2 = antsApplyTransforms( ch2, und,  concatx2, 
      whichtoinvert = c( T, F, T, F ) )
# check the composite output
plot( bold2ch2 , axis=3 )

## NULL
###########################################################

Extracting canonical functional network maps

preprocessing

back to the fmri …

* undistort
* motion correction

then we will be ready for first-level analysis …

if ( ! exists( "motcorr" ) ) {
  bold = antsImageRead( boldfnsR )
  avgBold = getAverageOfTimeSeries( bold )
  # map to und 
  boldUndTX = antsRegistration( und, avgBold, "SyN" )
  boldUndTS = antsApplyTransforms( und, bold, boldUndTX$fwd, imagetype = 3  )
  avgBold = getAverageOfTimeSeries( boldUndTS )
  motcorr = antsrMotionCalculation( boldUndTS, verbose = F, typeofTransform = 'Rigid' )
  }
plot( ts( motcorr$fd$MeanDisplacement ) )

print( names( motcorr ) )
## [1] "moco_img"     "moco_params"  "moco_avg_img" "moco_mask"   
## [5] "fd"           "dvars"

trim the bold time series for “good” time points

  • trim the first \(k\) time points

  • trim the high motion time points and their \(k\) neighbors

goodtimes = rep( TRUE, dim( motcorr$moco_img )[4] )
goodtimes[ 1:10 ] = FALSE  # signal stabilization
highMotionTimes = which( motcorr$fd$MeanDisplacement >= 0.5 )
for ( highs in -2:2 ) 
  highMotionTimes = c( highMotionTimes, highMotionTimes + highs )
highMotionTimes = sort( unique( highMotionTimes ))
goodtimes[ highMotionTimes ] = FALSE
print( table( goodtimes ) )
## goodtimes
## FALSE  TRUE 
##    17   403

We can then perform regression only in the “good” time points.

Or we can impute / interpolate ( see the RestingBold article/vignette in ANTsR documentation ).

network modeling

now we can do some additional level-one analysis to extract relevant networks.

* default mode
* motor 
# use tissue segmentation to guide compcor 
# FIXME - provide reference for this approach
csfAndWM = thresholdImage( boldseg, 1, 1 ) + ( thresholdImage( boldseg, 3, 3 ) %>% iMath("ME",1))
ccmat = timeseries2matrix( motcorr$moco_img, csfAndWM )
noiseu = compcor( ccmat, ncompcor = 10 )
# convert GM to a matrix
smth = c( 5, 5, 5, 2.0 ) # this is for sigmaInPhysicalCoordinates = T
smth = rep( 1, 4 ) # might need to mess with this some ...
simg = smoothImage( motcorr$moco_img, smth, sigmaInPhysicalCoordinates = F )
gmmat = timeseries2matrix( simg, thresholdImage( boldseg, 2, 2 ) )
tr = antsGetSpacing( bold )[4]
gmmatf = frequencyFilterfMRI( gmmat, tr = tr, freqLo = 0.01, freqHi = 0.1,  opt = "trig" )
# smoothing or filtering?
getNetworkBeta <-function( networkName = 'Default Mode' ) {
  dfnpts = which( powers_areal_mni_itk$SystemName == networkName ) 
  print( paste(networkName, length(dfnpts)))
  dfnmask = maskImage( ptImg, ptImg, level = dfnpts, binarize = T )
  dfnmat = timeseries2matrix( simg, dfnmask )
  dfnmatf = frequencyFilterfMRI( dfnmat, tr = tr, freqLo = 0.01, freqHi = 0.1,  opt = "trig" )
  dfnsignal = rowMeans( data.matrix( dfnmatf ) )
  dfndf = data.frame( signal = dfnsignal, noiseu = noiseu, fd = motcorr$fd )
  myform = paste( " mat ~ ", paste( names( dfndf ), collapse = "+") )
  dfnmdl = ilr( dfndf[goodtimes,], 
    list( mat = data.matrix( data.frame( gmmatf ))[goodtimes,] ),  myform )
  dfnBetaImg = makeImage(  thresholdImage( boldseg, 2, 2 ),  dfnmdl$tValue["signal",] )
  return( dfnBetaImg )
  }
# salBetaImg = getNetworkBeta( "Salience" )
vizBetaImg = getNetworkBeta( "Visual" )
## [1] "Visual 31"
dfnBetaImg = getNetworkBeta( 'Default Mode' )
## [1] "Default Mode 60"
####################################
##############

now we can look at the full continuous beta map for the default mode network

plot( und, vizBetaImg, alpha = 1.0, axis = 3, window.overlay = c(3, max(dfnBetaImg )),
      nslices = 24, ncolumns = 6 )

## NULL
plot( und, dfnBetaImg, alpha = 1.0, axis = 3, window.overlay = c(3, max(dfnBetaImg )),
      nslices = 24, ncolumns = 6 )

## NULL
ofn = paste0( rdir, "features/LS", id, '_vizBetaImg.nii.gz' )
antsImageWrite( vizBetaImg, ofn )
ofn = paste0( rdir, "features/LS", id, '_dfnBetaImg.nii.gz' )
antsImageWrite( dfnBetaImg, ofn )
ofn = paste0( rdir, "features/LS", id, '_undistort.nii.gz' )
antsImageWrite( und, ofn )
ofn = paste0( rdir, "features/LS", id, '_t1ToBold.nii.gz' )
antsImageWrite( t1toBOLD, ofn )
ofn = paste0( rdir, "features/LS", id, '_mask.nii.gz' )
antsImageWrite( bmask, ofn )

Now repeat all of this for the next subject by changing the ID.

Structural functional joint registration

Finally, use all of this data to do a joint registration using both structural and functional features.

# antsRegistration with multivariateExtras
id1 = '2001'
s1f1 = antsImageRead( paste0( rdir, "features/LS", id1, '_dfnBetaImg.nii.gz' ) )
s1f2 = antsImageRead( paste0( rdir, "features/LS", id1, '_undistort.nii.gz' ) )
s1f3 = antsImageRead( paste0( rdir, "features/LS", id1, '_t1ToBold.nii.gz' ) )
s1fv = antsImageRead( paste0( rdir, "features/LS", id1, '_vizBetaImg.nii.gz' ) )
s1mask = antsImageRead( paste0( rdir, "features/LS", id1, '_mask.nii.gz' ) )
id2 = '3026'
s2f1 = antsImageRead( paste0( rdir, "features/LS", id2, '_dfnBetaImg.nii.gz' ) )
s2f2 = antsImageRead( paste0( rdir, "features/LS", id2, '_undistort.nii.gz' ) )
s2f3 = antsImageRead( paste0( rdir, "features/LS", id2, '_t1ToBold.nii.gz' ) )
s2fv = antsImageRead( paste0( rdir, "features/LS", id2, '_vizBetaImg.nii.gz' ) )
s2mask = antsImageRead( paste0( rdir, "features/LS", id2, '_mask.nii.gz' ) )
#############
jrig = antsRegistration( s1f3 * s1mask, s2f3  * s2mask, "Affine"  )
ureg = antsRegistration( s1f3, s2f3, "SyNOnly", initialTransform = jrig$fwd,  mask = s1mask )
jreg = antsRegistration( s1f3, s2f3, "SyNOnly", initialTransform = jrig$fwd,  mask = s1mask,
  multivariateExtras = list(
   list( "mattes", s1fv, s2fv, 1, 32 ),
   list( "mattes", s1f1, s2f1, 1, 32 ) ), verbose = FALSE )
#############
vizWarped = antsApplyTransforms( s1f1, s2fv, ureg$fwdtransforms )
metric = antsrMetricCreate( s1fv, vizWarped, type="Correlation" )
strWarped = antsApplyTransforms( s1f1, s2f3, ureg$fwdtransforms )
smetric = antsrMetricCreate( s1f3, strWarped, type="Correlation" )
print( paste("univar-dfn", antsrMetricGetValue( metric ), 'str', antsrMetricGetValue( smetric ) ) )
## [1] "univar-dfn -0.210871061336536 str -0.665838991397789"
#############
dfnWarped = antsApplyTransforms( s1f1, s2f1, jreg$fwdtransforms )
vizWarped = antsApplyTransforms( s1f1, s2fv, jreg$fwdtransforms )
metric = antsrMetricCreate( s1fv, vizWarped, type="Correlation" )
strWarped = antsApplyTransforms( s1f1, s2f3, jreg$fwdtransforms )
smetric = antsrMetricCreate( s1f3, strWarped, type="Correlation" )
print( paste("mulvar-dfn", antsrMetricGetValue( metric ), 'str', antsrMetricGetValue( smetric ) ) )
## [1] "mulvar-dfn -0.242817647081149 str -0.658078306134552"
#############

Result: the joint registration performs better on both structural and functional metrics as measured by correlation of the features (not ideal).

Visualize the “fixed” subject and DFN first.

Then show the “deformed” subject and DFN second.

plot( s1f3, s1f1, alpha = 1.0, axis = 3, window.overlay = c(3, max(dfnBetaImg )),
      nslices = 24, ncolumns = 6, doCropping=FALSE )

## NULL
plot( strWarped, dfnWarped, alpha = 1.0, axis = 3, window.overlay = c(3, max(dfnBetaImg )),
      nslices = 24, ncolumns = 6, doCropping=FALSE  )

## NULL